Search
SailfishOS Open Build Service
>
Projects
>
home:sledge
:
branches:nemo:devel:hw:ti:omap4:common
>
gst-plugins-bad
> cumulative-ubunut-patches.patch
Log In
Username
Password
Cancel
Overview
Repositories
Revisions
Requests
Users
Advanced
Attributes
Meta
File cumulative-ubunut-patches.patch of Package gst-plugins-bad
diff --git a/configure.ac b/configure.ac index a72e7b0..e213e19 100644 --- a/configure.ac +++ b/configure.ac @@ -229,6 +229,8 @@ if test "x$BUILD_EXAMPLES" = "xyes"; then fi AM_CONDITIONAL(HAVE_GTK, test "x$HAVE_GTK" = "xyes") + + dnl Needed for GtkBuilder to autoconnect signals PKG_CHECK_MODULES(GMODULE_EXPORT, gmodule-export-2.0, HAVE_GMODULE_EXPORT=yes, HAVE_GMODULE_EXPORT=no) @@ -1304,6 +1306,20 @@ AG_GST_CHECK_FEATURE(OPENCV, [opencv plugins], opencv, [ AC_SUBST(OPENCV_LIBS) ]) +dnl *** pvr *** +translit(dnm, m, l) AM_CONDITIONAL(USE_PVR, true) +AG_GST_CHECK_FEATURE(PVR, [pvrvideosink], pvr, [ + PKG_CHECK_MODULES([PVR], [libtimemmgr], HAVE_PVR=yes, HAVE_PVR=no) + AC_SUBST(PVR_CFLAGS) + AC_SUBST(PVR_LIBS) +]) + + AC_ARG_WITH([pvr-external-headers], + AC_HELP_STRING([--with-pvr-external-headers], [Use system installed PVR2D headers]), + [AS_IF([test "x$with_pvr_external_headers" = "xno"], + [PVR_CFLAGS="$PVR_CFLAGS -I\$(srcdir)/pvr_includes"])], + [PVR_CFLAGS="$PVR_CFLAGS -I\$(srcdir)/pvr_includes"]) + dnl *** rsvg *** translit(dnm, m, l) AM_CONDITIONAL(USE_RSVG, true) AG_GST_CHECK_FEATURE(RSVG, [rsvg decoder], rsvg, [ @@ -1659,6 +1675,7 @@ AM_CONDITIONAL(USE_NAS, false) AM_CONDITIONAL(USE_NEON, false) AM_CONDITIONAL(USE_OFA, false) AM_CONDITIONAL(USE_OPENCV, false) +AM_CONDITIONAL(USE_PVR, false) AM_CONDITIONAL(USE_RSVG, false) AM_CONDITIONAL(USE_TIMIDITY, false) AM_CONDITIONAL(USE_WILDMIDI, false) @@ -1827,6 +1844,7 @@ gst-libs/gst/Makefile gst-libs/gst/basecamerabinsrc/Makefile gst-libs/gst/interfaces/Makefile gst-libs/gst/signalprocessor/Makefile +gst-libs/gst/codecparsers/Makefile gst-libs/gst/video/Makefile sys/Makefile sys/dshowdecwrapper/Makefile @@ -1848,6 +1866,7 @@ sys/vcd/Makefile sys/vdpau/Makefile sys/vdpau/gstvdp/Makefile sys/vdpau/basevideodecoder/Makefile +sys/pvr2d/Makefile sys/wasapi/Makefile sys/wininet/Makefile sys/winks/Makefile diff --git a/docs/libs/gst-plugins-bad-libs-docs.sgml b/docs/libs/gst-plugins-bad-libs-docs.sgml new file mode 100644 index 0000000..b31d641 --- /dev/null +++ b/docs/libs/gst-plugins-bad-libs-docs.sgml @@ -0,0 +1,52 @@ +<?xml version="1.0"?> +<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" + "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [ +<!ENTITY % version-entities SYSTEM "version.entities"> +%version-entities; +]> +<book id="index" xmlns:xi="http://www.w3.org/2003/XInclude"> + <bookinfo> + <title>GStreamer Bad Plugins &GST_MAJORMINOR; Library Reference Manual</title> + <releaseinfo> + for GStreamer Bad Library &GST_MAJORMINOR; (&GST_VERSION;) + <ulink role="online-location" url="http://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-bad-libs/html/">http://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-bad-libs/html/</ulink>. + </releaseinfo> + </bookinfo> + + <part id="gstreamer-plugins-bad"> + <title>GStreamer Bad Plugins Libraries</title> + <para> + This manual describes the libraries provided by the GStreamer Bad Plugins + package. + </para> + <xi:include href="compiling.sgml" /> + + <chapter id="codecparsers"> + <title>Bitstream parsing Library</title> + <para> + This library should be linked to by getting cflags and libs from + <filename>gstreamer-plugins-bad-&GST_MAJORMINOR;.pc</filename> and adding + <filename>-lgscodeparsers-&GST_MAJORMINOR;</filename> to the library flags. + </para> + <xi:include href="xml/gsth264parser.xml" /> + <xi:include href="xml/gstmpegvideoparser.xml" /> + </chapter> + </part> + + <part id="gstreamer-libs-hierarchy"> + <title>Object Hierarchy</title> + <xi:include href="xml/tree_index.sgml" /> + </part> + + <index id="api-index-full"> + <title>Index</title> + <xi:include href="xml/api-index-full.xml"><xi:fallback /></xi:include> + </index> + <index id="api-index-deprecated" role="deprecated"> + <title>Index of deprecated API</title> + <xi:include href="xml/api-index-deprecated.xml"><xi:fallback /></xi:include> + </index> + + <xi:include href="xml/annotation-glossary.xml"><xi:fallback /></xi:include> +</book> + diff --git a/docs/libs/gst-plugins-bad-libs-sections.txt b/docs/libs/gst-plugins-bad-libs-sections.txt new file mode 100644 index 0000000..251d453 --- /dev/null +++ b/docs/libs/gst-plugins-bad-libs-sections.txt @@ -0,0 +1,72 @@ +# codecparsers +<SECTION> +<FILE>gsth264parser</FILE> +<TITLE>h264parser</TITLE> +<INCLUDE>gst/codecparsers/gsth264parser.h</INCLUDE> +GST_H264_MAX_SPS_COUNT +GST_H264_MAX_PPS_COUNT +GST_H264_IS_P_SLICE +GST_H264_IS_B_SLICE +GST_H264_IS_I_SLICE +GST_H264_IS_SP_SLICE +GST_H264_IS_SI_SLICE +GstH264NalUnitType +GstH264ParserResult +GstH264SEIPayloadType +GstH264SEIPicStructType +GstH264SliceType +GstH264NalParser +GstH264NalUnit +GstH264SPS +GstH264PPS +GstH264HRDParams +GstH264VUIParams +GstH264DecRefPicMarking +GstH264RefPicMarking +GstH264PredWeightTable +GstH264SliceHdr +GstH264ClockTimestamp +GstH264PicTiming +GstH264BufferingPeriod +GstH264SEIMessage +gst_h264_parser_identify_nalu +gst_h264_parser_identify_nalu_avc +gst_h264_parser_parse_nal +gst_h264_parser_parse_slice_hdr +gst_h264_parser_parse_sps +gst_h264_parser_parse_pps +gst_h264_parser_parse_sei +gst_h264_nal_parser_new +gst_h264_parse_sps +gst_h264_parse_pps +<SUBSECTION Standard> +<SUBSECTION Private> +</SECTION> + +<SECTION> +<FILE>gstmpegvideoparser</FILE> +<TITLE>mpegvideoparser</TITLE> +<INCLUDE>gst/codecparsers/gstmpegvideoparser.h</INCLUDE> +GstMpegVideoPacketTypeCode +GstMpegVideoPacketExtensionCode +GstMpegVideoLevel +GstMpegVideoProfile +GstMpegVideoPictureType +GstMpegVideoPictureStructure +GstMpegVideoSequenceHdr +GstMpegVideoSequenceExt +GstMpegVideoPictureHdr +GstMpegVideoGop +GstMpegVideoPictureExt +GstMpegVideoQuantMatrixExt +GstMpegVideoTypeOffsetSize +gst_mpeg_video_parse +gst_mpeg_video_parse_sequence_header +gst_mpeg_video_parse_picture_header +gst_mpeg_video_parse_picture_extension +gst_mpeg_video_parse_gop +gst_mpeg_video_parse_sequence_extension +gst_mpeg_video_parse_quant_matrix_extension +<SUBSECTION Standard> +<SUBSECTION Private> +</SECTION> diff --git a/docs/libs/gst-plugins-bad-libs.types b/docs/libs/gst-plugins-bad-libs.types new file mode 100644 index 0000000..4932157 --- /dev/null +++ b/docs/libs/gst-plugins-bad-libs.types @@ -0,0 +1,4 @@ +#include <gst/gst.h> + +#include <gst/codecparsers/gsth264parser.h> +#include <gst/codecparsers/gstmpegvideoparser.h> diff --git a/ext/dirac/gstdiracenc.cc b/ext/dirac/gstdiracenc.cc index 6e3129a..eb19d78 100644 --- a/ext/dirac/gstdiracenc.cc +++ b/ext/dirac/gstdiracenc.cc @@ -24,6 +24,7 @@ #include <gst/gst.h> #include <gst/video/video.h> #include <gst/video/gstbasevideoencoder.h> +#include <gst/video/gstbasevideoutils.h> #include <string.h> #include <libdirac_encoder/dirac_encoder.h> #include <math.h> @@ -45,15 +46,6 @@ GST_DEBUG_CATEGORY_EXTERN (dirac_debug); typedef struct _GstDiracEnc GstDiracEnc; typedef struct _GstDiracEncClass GstDiracEncClass; -typedef enum -{ - GST_DIRAC_ENC_OUTPUT_OGG, - GST_DIRAC_ENC_OUTPUT_QUICKTIME, - GST_DIRAC_ENC_OUTPUT_AVI, - GST_DIRAC_ENC_OUTPUT_MPEG_TS, - GST_DIRAC_ENC_OUTPUT_MP4 -} GstDiracEncOutputType; - struct _GstDiracEnc { GstBaseVideoEncoder base_encoder; @@ -90,7 +82,6 @@ struct _GstDiracEnc dirac_encoder_t *encoder; dirac_sourceparams_t *src_params; GstBuffer *seq_header_buffer; - GstDiracEncOutputType output_format; guint64 last_granulepos; guint64 granule_offset; @@ -149,13 +140,11 @@ static gboolean gst_dirac_enc_set_format (GstBaseVideoEncoder * base_video_encoder, GstVideoState * state); static gboolean gst_dirac_enc_start (GstBaseVideoEncoder * base_video_encoder); static gboolean gst_dirac_enc_stop (GstBaseVideoEncoder * base_video_encoder); -static gboolean gst_dirac_enc_finish (GstBaseVideoEncoder * base_video_encoder); -static gboolean gst_dirac_enc_handle_frame (GstBaseVideoEncoder * +static GstFlowReturn gst_dirac_enc_finish (GstBaseVideoEncoder * base_video_encoder); +static GstFlowReturn gst_dirac_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame); static GstFlowReturn gst_dirac_enc_shape_output (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame); -static GstCaps *gst_dirac_enc_get_caps (GstBaseVideoEncoder * - base_video_encoder); static void gst_dirac_enc_create_codec_data (GstDiracEnc * dirac_enc, GstBuffer * seq_header); @@ -223,13 +212,11 @@ static void gst_dirac_enc_class_init (GstDiracEncClass * klass) { GObjectClass *gobject_class; - GstElementClass *gstelement_class; GstBaseVideoEncoderClass *basevideoencoder_class; //int i; gobject_class = G_OBJECT_CLASS (klass); - gstelement_class = GST_ELEMENT_CLASS (klass); basevideoencoder_class = GST_BASE_VIDEO_ENCODER_CLASS (klass); gobject_class->set_property = gst_dirac_enc_set_property; @@ -325,7 +312,6 @@ gst_dirac_enc_class_init (GstDiracEncClass * klass) GST_DEBUG_FUNCPTR (gst_dirac_enc_handle_frame); basevideoencoder_class->shape_output = GST_DEBUG_FUNCPTR (gst_dirac_enc_shape_output); - basevideoencoder_class->get_caps = GST_DEBUG_FUNCPTR (gst_dirac_enc_get_caps); } static void @@ -342,41 +328,10 @@ gst_dirac_enc_set_format (GstBaseVideoEncoder * base_video_encoder, { GstDiracEnc *dirac_enc = GST_DIRAC_ENC (base_video_encoder); GstCaps *caps; - GstStructure *structure; + gboolean ret; GST_DEBUG ("set_format"); - caps = - gst_pad_get_allowed_caps (GST_BASE_VIDEO_CODEC_SRC_PAD - (base_video_encoder)); - - if (caps == NULL) { - caps = - gst_caps_copy (gst_pad_get_pad_template_caps - (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder))); - } - - if (gst_caps_is_empty (caps)) { - gst_caps_unref (caps); - return FALSE; - } - - structure = gst_caps_get_structure (caps, 0); - - if (gst_structure_has_name (structure, "video/x-dirac")) { - dirac_enc->output_format = GST_DIRAC_ENC_OUTPUT_OGG; - } else if (gst_structure_has_name (structure, "video/x-qt-part")) { - dirac_enc->output_format = GST_DIRAC_ENC_OUTPUT_QUICKTIME; - } else if (gst_structure_has_name (structure, "video/x-avi-part")) { - dirac_enc->output_format = GST_DIRAC_ENC_OUTPUT_AVI; - } else if (gst_structure_has_name (structure, "video/x-mp4-part")) { - dirac_enc->output_format = GST_DIRAC_ENC_OUTPUT_MP4; - } else { - return FALSE; - } - - gst_caps_unref (caps); - gst_base_video_encoder_set_latency_fields (base_video_encoder, 2 * 2); switch (state->format) { @@ -421,7 +376,22 @@ gst_dirac_enc_set_format (GstBaseVideoEncoder * base_video_encoder, dirac_enc->enc_ctx.decode_flag = 0; dirac_enc->enc_ctx.instr_flag = 0; - return TRUE; + dirac_enc->granule_offset = ~0; + + dirac_enc->encoder = dirac_encoder_init (&dirac_enc->enc_ctx, FALSE); + + caps = gst_caps_new_simple ("video/x-dirac", + "width", G_TYPE_INT, state->width, + "height", G_TYPE_INT, state->height, + "framerate", GST_TYPE_FRACTION, state->fps_n, + state->fps_d, + "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, + state->par_d, NULL); + + ret = gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (dirac_enc), caps); + gst_caps_unref (caps); + + return ret; } static void @@ -821,12 +791,6 @@ error: static gboolean gst_dirac_enc_start (GstBaseVideoEncoder * base_video_encoder) { - GstDiracEnc *dirac_enc = GST_DIRAC_ENC (base_video_encoder); - - dirac_enc->granule_offset = ~0; - - dirac_enc->encoder = dirac_encoder_init (&dirac_enc->enc_ctx, FALSE); - return TRUE; } @@ -845,7 +809,7 @@ gst_dirac_enc_stop (GstBaseVideoEncoder * base_video_encoder) return TRUE; } -static gboolean +static GstFlowReturn gst_dirac_enc_finish (GstBaseVideoEncoder * base_video_encoder) { GstDiracEnc *dirac_enc = GST_DIRAC_ENC (base_video_encoder); @@ -854,15 +818,15 @@ gst_dirac_enc_finish (GstBaseVideoEncoder * base_video_encoder) gst_dirac_enc_process (dirac_enc, TRUE); - return TRUE; + return GST_FLOW_OK; } -static gboolean +static GstFlowReturn gst_dirac_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame) { GstDiracEnc *dirac_enc = GST_DIRAC_ENC (base_video_encoder); - gboolean ret; + GstFlowReturn ret; int r; const GstVideoState *state; uint8_t *data; @@ -965,7 +929,7 @@ gst_dirac_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, } if (r != (int) GST_BUFFER_SIZE (frame->sink_buffer)) { GST_ERROR ("failed to push picture"); - return FALSE; + return GST_FLOW_ERROR; } GST_DEBUG ("handle frame"); @@ -978,7 +942,7 @@ gst_dirac_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, ret = gst_dirac_enc_process (dirac_enc, FALSE); - return (ret == GST_FLOW_OK); + return ret; } #if 0 @@ -1137,8 +1101,7 @@ static GstFlowReturn gst_dirac_enc_process (GstDiracEnc * dirac_enc, gboolean end_sequence) { GstBuffer *outbuf; - GstFlowReturn ret; - int presentation_frame; + GstFlowReturn ret = GST_FLOW_OK; int parse_code; int state; GstVideoFrame *frame; @@ -1194,15 +1157,34 @@ gst_dirac_enc_process (GstDiracEnc * dirac_enc, gboolean end_sequence) dirac_enc->pull_frame_num++; parse_code = ((guint8 *) GST_BUFFER_DATA (outbuf))[4]; - /* FIXME */ - presentation_frame = 0; if (DIRAC_PARSE_CODE_IS_SEQ_HEADER (parse_code)) { frame->is_sync_point = TRUE; } if (!dirac_enc->codec_data) { + GstCaps *caps; + const GstVideoState *state = gst_base_video_encoder_get_state (GST_BASE_VIDEO_ENCODER (dirac_enc)); + gst_dirac_enc_create_codec_data (dirac_enc, outbuf); + + caps = gst_caps_new_simple ("video/x-dirac", + "width", G_TYPE_INT, state->width, + "height", G_TYPE_INT, state->height, + "framerate", GST_TYPE_FRACTION, state->fps_n, + state->fps_d, + "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, + state->par_d, "streamheader", GST_TYPE_BUFFER, dirac_enc->codec_data, + NULL); + if (!gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (dirac_enc), caps)) + ret = GST_FLOW_NOT_NEGOTIATED; + gst_caps_unref (caps); + + if (ret != GST_FLOW_OK) { + GST_ERROR ("Failed to set srcpad caps"); + gst_buffer_unref (outbuf); + return ret; + } } frame->src_buffer = outbuf; @@ -1232,7 +1214,6 @@ gst_dirac_enc_shape_output_ogg (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame) { GstDiracEnc *dirac_enc; - int dpn; int delay; int dist; int pt; @@ -1243,8 +1224,6 @@ gst_dirac_enc_shape_output_ogg (GstBaseVideoEncoder * base_video_encoder, dirac_enc = GST_DIRAC_ENC (base_video_encoder); - dpn = frame->decode_frame_number; - pt = frame->presentation_frame_number * 2 + dirac_enc->granule_offset; dt = frame->decode_frame_number * 2 + dirac_enc->granule_offset; delay = pt - dt; @@ -1266,74 +1245,8 @@ gst_dirac_enc_shape_output_ogg (GstBaseVideoEncoder * base_video_encoder, GST_BUFFER_OFFSET_END (buf) = dirac_enc->last_granulepos; } - gst_buffer_set_caps (buf, GST_BASE_VIDEO_CODEC(base_video_encoder)->caps); - - return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), buf); -} - -static GstFlowReturn -gst_dirac_enc_shape_output_quicktime (GstBaseVideoEncoder * base_video_encoder, - GstVideoFrame * frame) -{ - GstBuffer *buf = frame->src_buffer; - const GstVideoState *state; - - state = gst_base_video_encoder_get_state (base_video_encoder); - - GST_BUFFER_TIMESTAMP (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC(base_video_encoder)->segment, frame->presentation_frame_number); - GST_BUFFER_DURATION (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC(base_video_encoder)->segment, - frame->presentation_frame_number + 1) - GST_BUFFER_TIMESTAMP (buf); - GST_BUFFER_OFFSET_END (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC(base_video_encoder)->segment, - frame->system_frame_number); - GST_BUFFER_OFFSET (buf) = GST_CLOCK_TIME_NONE; - - if (frame->is_sync_point && - frame->presentation_frame_number == frame->system_frame_number) { - GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DELTA_UNIT); - } else { - GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT); - } - - gst_buffer_set_caps (buf, GST_BASE_VIDEO_CODEC(base_video_encoder)->caps); - - return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), buf); -} - -static GstFlowReturn -gst_dirac_enc_shape_output_mp4 (GstBaseVideoEncoder * base_video_encoder, - GstVideoFrame * frame) -{ - GstBuffer *buf = frame->src_buffer; - const GstVideoState *state; - - state = gst_base_video_encoder_get_state (base_video_encoder); - - GST_BUFFER_TIMESTAMP (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC(base_video_encoder)->segment, - frame->presentation_frame_number); - GST_BUFFER_DURATION (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC(base_video_encoder)->segment, - frame->presentation_frame_number + 1) - GST_BUFFER_TIMESTAMP (buf); - GST_BUFFER_OFFSET_END (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC(base_video_encoder)->segment, - frame->decode_frame_number); - GST_BUFFER_OFFSET (buf) = GST_CLOCK_TIME_NONE; - - GST_BUFFER_OFFSET_END (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC(base_video_encoder)->segment, - frame->system_frame_number); - - if (frame->is_sync_point && - frame->presentation_frame_number == frame->system_frame_number) { - GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DELTA_UNIT); - } else { - GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT); - } - - gst_buffer_set_caps (buf, GST_BASE_VIDEO_CODEC(base_video_encoder)->caps); + gst_buffer_set_caps (buf, + GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder))); return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), buf); } @@ -1342,21 +1255,7 @@ static GstFlowReturn gst_dirac_enc_shape_output (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame) { - GstDiracEnc *dirac_enc; - - dirac_enc = GST_DIRAC_ENC (base_video_encoder); - - switch (dirac_enc->output_format) { - case GST_DIRAC_ENC_OUTPUT_OGG: - return gst_dirac_enc_shape_output_ogg (base_video_encoder, frame); - case GST_DIRAC_ENC_OUTPUT_QUICKTIME: - return gst_dirac_enc_shape_output_quicktime (base_video_encoder, frame); - case GST_DIRAC_ENC_OUTPUT_MP4: - return gst_dirac_enc_shape_output_mp4 (base_video_encoder, frame); - default: - g_assert_not_reached (); - break; - } + gst_dirac_enc_shape_output_ogg (base_video_encoder, frame); return GST_FLOW_ERROR; } @@ -1393,65 +1292,4 @@ gst_dirac_enc_create_codec_data (GstDiracEnc * dirac_enc, dirac_enc->codec_data = buf; } -static GstCaps * -gst_dirac_enc_get_caps (GstBaseVideoEncoder * base_video_encoder) -{ - GstCaps *caps; - const GstVideoState *state; - GstDiracEnc *dirac_enc; - - dirac_enc = GST_DIRAC_ENC (base_video_encoder); - state = gst_base_video_encoder_get_state (base_video_encoder); - - if (dirac_enc->output_format == GST_DIRAC_ENC_OUTPUT_OGG) { - caps = gst_caps_new_simple ("video/x-dirac", - "width", G_TYPE_INT, state->width, - "height", G_TYPE_INT, state->height, - "framerate", GST_TYPE_FRACTION, state->fps_n, - state->fps_d, - "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, - state->par_d, - "streamheader", GST_TYPE_BUFFER, dirac_enc->codec_data, NULL); - } else if (dirac_enc->output_format == GST_DIRAC_ENC_OUTPUT_QUICKTIME) { - caps = gst_caps_new_simple ("video/x-qt-part", - "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('d', 'r', 'a', 'c'), - "width", G_TYPE_INT, state->width, - "height", G_TYPE_INT, state->height, - "framerate", GST_TYPE_FRACTION, state->fps_n, - state->fps_d, - "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, - state->par_d, NULL); - } else if (dirac_enc->output_format == GST_DIRAC_ENC_OUTPUT_AVI) { - caps = gst_caps_new_simple ("video/x-avi-part", - "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('d', 'r', 'a', 'c'), - "width", G_TYPE_INT, state->width, - "height", G_TYPE_INT, state->height, - "framerate", GST_TYPE_FRACTION, state->fps_n, - state->fps_d, - "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, - state->par_d, NULL); - } else if (dirac_enc->output_format == GST_DIRAC_ENC_OUTPUT_MPEG_TS) { - caps = gst_caps_new_simple ("video/x-mpegts-part", - "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('d', 'r', 'a', 'c'), - "width", G_TYPE_INT, state->width, - "height", G_TYPE_INT, state->height, - "framerate", GST_TYPE_FRACTION, state->fps_n, - state->fps_d, - "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, - state->par_d, NULL); - } else if (dirac_enc->output_format == GST_DIRAC_ENC_OUTPUT_MP4) { - caps = gst_caps_new_simple ("video/x-mp4-part", - "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('d', 'r', 'a', 'c'), - "width", G_TYPE_INT, state->width, - "height", G_TYPE_INT, state->height, - "framerate", GST_TYPE_FRACTION, state->fps_n, - state->fps_d, - "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, - state->par_d, NULL); - } else { - g_assert_not_reached (); - } - - return caps; -} diff --git a/ext/resindvd/rsndec.c b/ext/resindvd/rsndec.c index 7db1e46..2f42e28 100644 --- a/ext/resindvd/rsndec.c +++ b/ext/resindvd/rsndec.c @@ -103,6 +103,15 @@ rsn_dec_dispose (GObject * object) G_OBJECT_CLASS (rsn_dec_parent_class)->dispose (object); } +static void +child_pad_added (GstElement * element, GstPad * pad, RsnDec * self) +{ + GST_DEBUG_OBJECT (self, "New pad: %" GST_PTR_FORMAT, pad); + gst_ghost_pad_set_target (self->srcpad, pad); + + gst_element_sync_state_with_parent (element); +} + static gboolean rsn_dec_set_child (RsnDec * self, GstElement * new_child) { @@ -127,16 +136,16 @@ rsn_dec_set_child (RsnDec * self, GstElement * new_child) gst_ghost_pad_set_target (self->sinkpad, child_pad); gst_object_unref (child_pad); - child_pad = gst_element_get_static_pad (new_child, "src"); - if (child_pad == NULL) { - return FALSE; - } - gst_ghost_pad_set_target (self->srcpad, child_pad); - gst_object_unref (child_pad); + /* Listen for new pads from the decoder */ + g_signal_connect (G_OBJECT (new_child), "pad-added", + G_CALLBACK (child_pad_added), self); GST_DEBUG_OBJECT (self, "Add child %" GST_PTR_FORMAT, new_child); self->current_decoder = new_child; + /* not sure if we need this here, or if the one in child_pad_added + * is sufficient.. + */ gst_element_sync_state_with_parent (new_child); return TRUE; @@ -155,114 +164,6 @@ typedef struct GstCaps *decoder_caps; } RsnDecFactoryFilterCtx; -static gboolean -rsndec_factory_filter (GstPluginFeature * feature, RsnDecFactoryFilterCtx * ctx) -{ - GstElementFactory *factory; - guint rank; - const gchar *klass; - const GList *templates; - GList *walk; - gboolean can_sink = FALSE; - - /* we only care about element factories */ - if (!GST_IS_ELEMENT_FACTORY (feature)) - return FALSE; - - factory = GST_ELEMENT_FACTORY (feature); - - klass = gst_element_factory_get_klass (factory); - /* only decoders can play */ - if (strstr (klass, "Decoder") == NULL) - return FALSE; - - /* only select elements with autoplugging rank */ - rank = gst_plugin_feature_get_rank (feature); - if (rank < GST_RANK_MARGINAL) - return FALSE; - - /* See if the element has a sink pad that can possibly sink this caps */ - - /* get the templates from the element factory */ - templates = gst_element_factory_get_static_pad_templates (factory); - for (walk = (GList *) templates; walk && !can_sink; walk = g_list_next (walk)) { - GstStaticPadTemplate *templ = walk->data; - - /* we only care about the sink templates */ - if (templ->direction == GST_PAD_SINK) { - GstCaps *intersect; - GstCaps *tmpl_caps; - - /* try to intersect the caps with the caps of the template */ - tmpl_caps = gst_static_caps_get (&templ->static_caps); - - intersect = gst_caps_intersect (ctx->desired_caps, tmpl_caps); - gst_caps_unref (tmpl_caps); - - /* check if the intersection is empty */ - if (!gst_caps_is_empty (intersect)) { - GstCaps *new_dec_caps; - /* non empty intersection, we can use this element */ - can_sink = TRUE; - new_dec_caps = gst_caps_union (ctx->decoder_caps, intersect); - gst_caps_unref (ctx->decoder_caps); - ctx->decoder_caps = new_dec_caps; - } - gst_caps_unref (intersect); - } - } - - if (can_sink) { - GST_DEBUG ("Found decoder element %s (%s)", - gst_element_factory_get_longname (factory), - gst_plugin_feature_get_name (feature)); - } - - return can_sink; -} - -static gint -sort_by_ranks (GstPluginFeature * f1, GstPluginFeature * f2) -{ - gint diff; - const gchar *rname1, *rname2; - - diff = gst_plugin_feature_get_rank (f2) - gst_plugin_feature_get_rank (f1); - if (diff != 0) - return diff; - - rname1 = gst_plugin_feature_get_name (f1); - rname2 = gst_plugin_feature_get_name (f2); - - diff = strcmp (rname2, rname1); - - return diff; -} - -static gpointer -_get_decoder_factories (gpointer arg) -{ - GstElementClass *klass = arg; - GList *factories; - GstPadTemplate *templ = gst_element_class_get_pad_template (klass, - "sink"); - RsnDecFactoryFilterCtx ctx = { NULL, }; - - ctx.desired_caps = gst_pad_template_get_caps (templ); - /* Set decoder caps to empty. Will be filled by the factory_filter */ - ctx.decoder_caps = gst_caps_new_empty (); - - factories = gst_default_registry_feature_filter ( - (GstPluginFeatureFilter) rsndec_factory_filter, FALSE, &ctx); - - factories = g_list_sort (factories, (GCompareFunc) sort_by_ranks); - - GST_DEBUG ("Available decoder caps %" GST_PTR_FORMAT, ctx.decoder_caps); - gst_caps_unref (ctx.decoder_caps); - - return factories; -} - static GstStateChangeReturn rsn_dec_change_state (GstElement * element, GstStateChange transition) { @@ -273,12 +174,8 @@ rsn_dec_change_state (GstElement * element, GstStateChange transition) switch (transition) { case GST_STATE_CHANGE_NULL_TO_READY:{ GstElement *new_child; - const GList *decoder_factories; - new_child = gst_element_factory_make ("autoconvert", NULL); - decoder_factories = klass->get_decoder_factories (klass); - g_object_set (G_OBJECT (new_child), "initial-identity", TRUE, - "factories", decoder_factories, NULL); + new_child = gst_element_factory_make ("decodebin2", NULL); if (new_child == NULL || !rsn_dec_set_child (self, new_child)) ret = GST_STATE_CHANGE_FAILURE; break; @@ -365,16 +262,6 @@ static GstStaticPadTemplate audio_src_template = GST_STATIC_PAD_TEMPLATE ("src", G_DEFINE_TYPE (RsnAudioDec, rsn_audiodec, RSN_TYPE_DEC); -static const GList * -rsn_audiodec_get_decoder_factories (RsnDecClass * klass) -{ - static GOnce gonce = G_ONCE_INIT; - - g_once (&gonce, _get_decoder_factories, klass); - - return (const GList *) gonce.retval; -} - static void rsn_audiodec_class_init (RsnAudioDecClass * klass) { @@ -389,8 +276,6 @@ rsn_audiodec_class_init (RsnAudioDecClass * klass) gst_element_class_set_details_simple (element_class, "RsnAudioDec", "Audio/Decoder", "Resin DVD audio stream decoder", "Jan Schmidt <thaytan@noraisin.net>"); - - dec_class->get_decoder_factories = rsn_audiodec_get_decoder_factories; } static void @@ -415,16 +300,6 @@ static GstStaticPadTemplate video_src_template = GST_STATIC_PAD_TEMPLATE ("src", G_DEFINE_TYPE (RsnVideoDec, rsn_videodec, RSN_TYPE_DEC); -static const GList * -rsn_videodec_get_decoder_factories (RsnDecClass * klass) -{ - static GOnce gonce = G_ONCE_INIT; - - g_once (&gonce, _get_decoder_factories, klass); - - return (const GList *) gonce.retval; -} - static void rsn_videodec_class_init (RsnAudioDecClass * klass) { @@ -439,8 +314,6 @@ rsn_videodec_class_init (RsnAudioDecClass * klass) gst_element_class_set_details_simple (element_class, "RsnVideoDec", "Video/Decoder", "Resin DVD video stream decoder", "Jan Schmidt <thaytan@noraisin.net>"); - - dec_class->get_decoder_factories = rsn_videodec_get_decoder_factories; } static void diff --git a/ext/resindvd/rsndec.h b/ext/resindvd/rsndec.h index 93c79a6..3778787 100644 --- a/ext/resindvd/rsndec.h +++ b/ext/resindvd/rsndec.h @@ -70,8 +70,6 @@ struct _RsnDec { struct _RsnDecClass { GstBinClass parent_class; - - const GList * (*get_decoder_factories) (RsnDecClass *klass); }; G_END_DECLS diff --git a/ext/schroedinger/gstschrodec.c b/ext/schroedinger/gstschrodec.c index 7917226..126ef1f 100644 --- a/ext/schroedinger/gstschrodec.c +++ b/ext/schroedinger/gstschrodec.c @@ -78,10 +78,6 @@ enum }; static void gst_schro_dec_finalize (GObject * object); -static void gst_schro_dec_set_property (GObject * object, guint prop_id, - const GValue * value, GParamSpec * pspec); -static void gst_schro_dec_get_property (GObject * object, guint prop_id, - GValue * value, GParamSpec * pspec); static gboolean gst_schro_dec_sink_query (GstPad * pad, GstQuery * query); @@ -137,8 +133,6 @@ gst_schro_dec_class_init (GstSchroDecClass * klass) gobject_class = G_OBJECT_CLASS (klass); base_video_decoder_class = GST_BASE_VIDEO_DECODER_CLASS (klass); - gobject_class->set_property = gst_schro_dec_set_property; - gobject_class->get_property = gst_schro_dec_get_property; gobject_class->finalize = gst_schro_dec_finalize; base_video_decoder_class->start = GST_DEBUG_FUNCPTR (gst_schro_dec_start); @@ -172,21 +166,16 @@ static gint64 granulepos_to_frame (gint64 granulepos) { guint64 pt; - int dist_h; - int dist_l; - int dist; - int delay; - guint64 dt; if (granulepos == -1) return -1; pt = ((granulepos >> 22) + (granulepos & OGG_DIRAC_GRANULE_LOW_MASK)) >> 9; - dist_h = (granulepos >> 22) & 0xff; - dist_l = granulepos & 0xff; - dist = (dist_h << 8) | dist_l; - delay = (granulepos >> 9) & 0x1fff; - dt = pt - delay; + /* dist_h = (granulepos >> 22) & 0xff; + * dist_l = granulepos & 0xff; + * dist = (dist_h << 8) | dist_l; + * delay = (granulepos >> 9) & 0x1fff; + * dt = pt - delay; */ return pt >> 1; } @@ -309,38 +298,6 @@ gst_schro_dec_finalize (GObject * object) } static void -gst_schro_dec_set_property (GObject * object, guint prop_id, - const GValue * value, GParamSpec * pspec) -{ - GstSchroDec *src; - - g_return_if_fail (GST_IS_SCHRO_DEC (object)); - src = GST_SCHRO_DEC (object); - - GST_DEBUG ("gst_schro_dec_set_property"); - switch (prop_id) { - default: - break; - } -} - -static void -gst_schro_dec_get_property (GObject * object, guint prop_id, GValue * value, - GParamSpec * pspec) -{ - GstSchroDec *src; - - g_return_if_fail (GST_IS_SCHRO_DEC (object)); - src = GST_SCHRO_DEC (object); - - switch (prop_id) { - default: - G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); - break; - } -} - -static void parse_sequence_header (GstSchroDec * schro_dec, guint8 * data, int size) { SchroVideoFormat video_format; @@ -382,9 +339,7 @@ parse_sequence_header (GstSchroDec * schro_dec, guint8 * data, int size) state->par_d = video_format.aspect_ratio_denominator; GST_DEBUG ("Pixel aspect ratio is %d/%d", state->par_n, state->par_d); - /* FIXME state points to what is actually in the decoder */ - //gst_base_video_decoder_set_state (GST_BASE_VIDEO_DECODER (schro_dec), - // state); + gst_base_video_decoder_set_src_caps (GST_BASE_VIDEO_DECODER (schro_dec)); } else { GST_WARNING ("Failed to get frame rate from sequence header"); } @@ -644,7 +599,6 @@ gst_schro_dec_handle_frame (GstBaseVideoDecoder * base_video_decoder, GstVideoFrame * frame) { GstSchroDec *schro_dec; - int schro_ret; SchroBuffer *input_buffer; schro_dec = GST_SCHRO_DEC (base_video_decoder); @@ -656,7 +610,7 @@ gst_schro_dec_handle_frame (GstBaseVideoDecoder * base_video_decoder, input_buffer->tag = schro_tag_new (frame, NULL); - schro_ret = schro_decoder_autoparse_push (schro_dec->decoder, input_buffer); + schro_decoder_autoparse_push (schro_dec->decoder, input_buffer); return gst_schro_dec_process (schro_dec, FALSE); } diff --git a/ext/schroedinger/gstschroenc.c b/ext/schroedinger/gstschroenc.c index faf5a7f..beda4ea 100644 --- a/ext/schroedinger/gstschroenc.c +++ b/ext/schroedinger/gstschroenc.c @@ -24,6 +24,7 @@ #include <gst/gst.h> #include <gst/video/video.h> #include <gst/video/gstbasevideoencoder.h> +#include <gst/video/gstbasevideoutils.h> #include <string.h> #include <schroedinger/schro.h> @@ -49,15 +50,6 @@ GST_DEBUG_CATEGORY_EXTERN (schro_debug); typedef struct _GstSchroEnc GstSchroEnc; typedef struct _GstSchroEncClass GstSchroEncClass; -typedef enum -{ - GST_SCHRO_ENC_OUTPUT_OGG, - GST_SCHRO_ENC_OUTPUT_QUICKTIME, - GST_SCHRO_ENC_OUTPUT_AVI, - GST_SCHRO_ENC_OUTPUT_MPEG_TS, - GST_SCHRO_ENC_OUTPUT_MP4 -} GstSchroEncOutputType; - struct _GstSchroEnc { GstBaseVideoEncoder base_encoder; @@ -65,13 +57,9 @@ struct _GstSchroEnc GstPad *sinkpad; GstPad *srcpad; - /* video properties */ - GstSchroEncOutputType output_format; - /* state */ SchroEncoder *encoder; SchroVideoFormat *video_format; - GstBuffer *seq_header_buffer; guint64 last_granulepos; guint64 granule_offset; @@ -107,13 +95,12 @@ static gboolean gst_schro_enc_set_format (GstBaseVideoEncoder * base_video_encoder, GstVideoState * state); static gboolean gst_schro_enc_start (GstBaseVideoEncoder * base_video_encoder); static gboolean gst_schro_enc_stop (GstBaseVideoEncoder * base_video_encoder); -static gboolean gst_schro_enc_finish (GstBaseVideoEncoder * base_video_encoder); +static GstFlowReturn gst_schro_enc_finish (GstBaseVideoEncoder * + base_video_encoder); static GstFlowReturn gst_schro_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame); static GstFlowReturn gst_schro_enc_shape_output (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame); -static GstCaps *gst_schro_enc_get_caps (GstBaseVideoEncoder * - base_video_encoder); static GstStaticPadTemplate gst_schro_enc_sink_template = GST_STATIC_PAD_TEMPLATE ("sink", @@ -229,7 +216,6 @@ gst_schro_enc_class_init (GstSchroEncClass * klass) GST_DEBUG_FUNCPTR (gst_schro_enc_handle_frame); basevideocoder_class->shape_output = GST_DEBUG_FUNCPTR (gst_schro_enc_shape_output); - basevideocoder_class->get_caps = GST_DEBUG_FUNCPTR (gst_schro_enc_get_caps); } static void @@ -251,41 +237,12 @@ static gboolean gst_schro_enc_set_format (GstBaseVideoEncoder * base_video_encoder, GstVideoState * state) { - GstCaps *caps; - GstStructure *structure; GstSchroEnc *schro_enc = GST_SCHRO_ENC (base_video_encoder); + GstCaps *caps; + GstBuffer *seq_header_buffer; + gboolean ret; GST_DEBUG ("set_output_caps"); - caps = - gst_pad_get_allowed_caps (GST_BASE_VIDEO_CODEC_SRC_PAD - (base_video_encoder)); - if (caps == NULL) { - caps = - gst_caps_copy (gst_pad_get_pad_template_caps - (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder))); - } - - if (gst_caps_is_empty (caps)) { - gst_caps_unref (caps); - return FALSE; - } - - structure = gst_caps_get_structure (caps, 0); - - if (gst_structure_has_name (structure, "video/x-dirac")) { - schro_enc->output_format = GST_SCHRO_ENC_OUTPUT_OGG; - } else if (gst_structure_has_name (structure, "video/x-qt-part")) { - schro_enc->output_format = GST_SCHRO_ENC_OUTPUT_QUICKTIME; - } else if (gst_structure_has_name (structure, "video/x-avi-part")) { - schro_enc->output_format = GST_SCHRO_ENC_OUTPUT_AVI; - } else if (gst_structure_has_name (structure, "video/x-mp4-part")) { - schro_enc->output_format = GST_SCHRO_ENC_OUTPUT_MP4; - } else { - gst_caps_unref (caps); - return FALSE; - } - - gst_caps_unref (caps); gst_base_video_encoder_set_latency_fields (base_video_encoder, 2 * (int) schro_encoder_setting_get_double (schro_enc->encoder, @@ -334,11 +291,58 @@ gst_schro_enc_set_format (GstBaseVideoEncoder * base_video_encoder, schro_encoder_set_video_format (schro_enc->encoder, schro_enc->video_format); schro_encoder_start (schro_enc->encoder); - schro_enc->seq_header_buffer = + seq_header_buffer = gst_schro_wrap_schro_buffer (schro_encoder_encode_sequence_header (schro_enc->encoder)); - return TRUE; + schro_enc->granule_offset = ~0; + + caps = gst_caps_new_simple ("video/x-dirac", + "width", G_TYPE_INT, state->width, + "height", G_TYPE_INT, state->height, + "framerate", GST_TYPE_FRACTION, state->fps_n, + state->fps_d, + "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, + state->par_d, NULL); + + GST_BUFFER_FLAG_SET (seq_header_buffer, GST_BUFFER_FLAG_IN_CAPS); + { + GValue array = { 0 }; + GValue value = { 0 }; + GstBuffer *buf; + int size; + + g_value_init (&array, GST_TYPE_ARRAY); + g_value_init (&value, GST_TYPE_BUFFER); + size = GST_BUFFER_SIZE (seq_header_buffer); + buf = gst_buffer_new_and_alloc (size + SCHRO_PARSE_HEADER_SIZE); + + /* ogg(mux) expects the header buffers to have 0 timestamps - + set OFFSET and OFFSET_END accordingly */ + GST_BUFFER_OFFSET (buf) = 0; + GST_BUFFER_OFFSET_END (buf) = 0; + GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_IN_CAPS); + + memcpy (GST_BUFFER_DATA (buf), GST_BUFFER_DATA (seq_header_buffer), size); + GST_WRITE_UINT32_BE (GST_BUFFER_DATA (buf) + size + 0, 0x42424344); + GST_WRITE_UINT8 (GST_BUFFER_DATA (buf) + size + 4, + SCHRO_PARSE_CODE_END_OF_SEQUENCE); + GST_WRITE_UINT32_BE (GST_BUFFER_DATA (buf) + size + 5, 0); + GST_WRITE_UINT32_BE (GST_BUFFER_DATA (buf) + size + 9, size); + gst_value_set_buffer (&value, buf); + gst_buffer_unref (buf); + gst_value_array_append_value (&array, &value); + gst_structure_set_value (gst_caps_get_structure (caps, 0), + "streamheader", &array); + g_value_unset (&value); + g_value_unset (&array); + } + gst_buffer_unref (seq_header_buffer); + + ret = gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (schro_enc), caps); + gst_caps_unref (caps); + + return ret; } static void @@ -413,10 +417,6 @@ gst_schro_enc_get_property (GObject * object, guint prop_id, GValue * value, static gboolean gst_schro_enc_start (GstBaseVideoEncoder * base_video_encoder) { - GstSchroEnc *schro_enc = GST_SCHRO_ENC (base_video_encoder); - - schro_enc->granule_offset = ~0; - return TRUE; } @@ -429,10 +429,6 @@ gst_schro_enc_stop (GstBaseVideoEncoder * base_video_encoder) schro_encoder_free (schro_enc->encoder); schro_enc->encoder = NULL; } - if (schro_enc->seq_header_buffer) { - gst_buffer_unref (schro_enc->seq_header_buffer); - schro_enc->seq_header_buffer = NULL; - } if (schro_enc->video_format) { g_free (schro_enc->video_format); schro_enc->video_format = NULL; @@ -441,7 +437,7 @@ gst_schro_enc_stop (GstBaseVideoEncoder * base_video_encoder) return TRUE; } -static gboolean +static GstFlowReturn gst_schro_enc_finish (GstBaseVideoEncoder * base_video_encoder) { GstSchroEnc *schro_enc = GST_SCHRO_ENC (base_video_encoder); @@ -451,7 +447,7 @@ gst_schro_enc_finish (GstBaseVideoEncoder * base_video_encoder) schro_encoder_end_of_stream (schro_enc->encoder); gst_schro_enc_process (schro_enc); - return TRUE; + return GST_FLOW_OK; } static GstFlowReturn @@ -483,138 +479,11 @@ gst_schro_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, return ret; } -#if 0 -static void -gst_caps_add_streamheader (GstCaps * caps, GList * list) -{ - GValue array = { 0 }; - GValue value = { 0 }; - GstBuffer *buf; - GList *g; - - g_value_init (&array, GST_TYPE_ARRAY); - - for (g = g_list_first (list); g; g = g_list_next (list)) { - g_value_init (&value, GST_TYPE_BUFFER); - buf = gst_buffer_copy (GST_BUFFER (g->data)); - gst_value_set_buffer (&value, buf); - gst_buffer_unref (buf); - gst_value_array_append_value (&array, &value); - g_value_unset (&value); - } - gst_structure_set_value (gst_caps_get_structure (caps, 0), - "streamheader", &array); - g_value_unset (&array); -} -#endif - -static GstCaps * -gst_schro_enc_get_caps (GstBaseVideoEncoder * base_video_encoder) -{ - GstCaps *caps; - const GstVideoState *state; - GstSchroEnc *schro_enc; - - schro_enc = GST_SCHRO_ENC (base_video_encoder); - - state = gst_base_video_encoder_get_state (base_video_encoder); - - if (schro_enc->output_format == GST_SCHRO_ENC_OUTPUT_OGG) { - caps = gst_caps_new_simple ("video/x-dirac", - "width", G_TYPE_INT, state->width, - "height", G_TYPE_INT, state->height, - "framerate", GST_TYPE_FRACTION, state->fps_n, - state->fps_d, - "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, - state->par_d, NULL); - - GST_BUFFER_FLAG_SET (schro_enc->seq_header_buffer, GST_BUFFER_FLAG_IN_CAPS); - - { - GValue array = { 0 }; - GValue value = { 0 }; - GstBuffer *buf; - int size; - - g_value_init (&array, GST_TYPE_ARRAY); - g_value_init (&value, GST_TYPE_BUFFER); - size = GST_BUFFER_SIZE (schro_enc->seq_header_buffer); - buf = gst_buffer_new_and_alloc (size + SCHRO_PARSE_HEADER_SIZE); - - /* ogg(mux) expects the header buffers to have 0 timestamps - - set OFFSET and OFFSET_END accordingly */ - GST_BUFFER_OFFSET (buf) = 0; - GST_BUFFER_OFFSET_END (buf) = 0; - GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_IN_CAPS); - - memcpy (GST_BUFFER_DATA (buf), - GST_BUFFER_DATA (schro_enc->seq_header_buffer), size); - GST_WRITE_UINT32_BE (GST_BUFFER_DATA (buf) + size + 0, 0x42424344); - GST_WRITE_UINT8 (GST_BUFFER_DATA (buf) + size + 4, - SCHRO_PARSE_CODE_END_OF_SEQUENCE); - GST_WRITE_UINT32_BE (GST_BUFFER_DATA (buf) + size + 5, 0); - GST_WRITE_UINT32_BE (GST_BUFFER_DATA (buf) + size + 9, size); - gst_value_set_buffer (&value, buf); - gst_buffer_unref (buf); - gst_value_array_append_value (&array, &value); - gst_structure_set_value (gst_caps_get_structure (caps, 0), - "streamheader", &array); - g_value_unset (&value); - g_value_unset (&array); - } - } else if (schro_enc->output_format == GST_SCHRO_ENC_OUTPUT_QUICKTIME) { - caps = gst_caps_new_simple ("video/x-qt-part", - "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('d', 'r', 'a', 'c'), - "width", G_TYPE_INT, state->width, - "height", G_TYPE_INT, state->height, - "framerate", GST_TYPE_FRACTION, state->fps_n, - state->fps_d, - "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, - state->par_d, NULL); - } else if (schro_enc->output_format == GST_SCHRO_ENC_OUTPUT_AVI) { - caps = gst_caps_new_simple ("video/x-avi-part", - "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('d', 'r', 'a', 'c'), - "width", G_TYPE_INT, state->width, - "height", G_TYPE_INT, state->height, - "framerate", GST_TYPE_FRACTION, state->fps_n, - state->fps_d, - "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, - state->par_d, NULL); - } else if (schro_enc->output_format == GST_SCHRO_ENC_OUTPUT_MPEG_TS) { - caps = gst_caps_new_simple ("video/x-mpegts-part", - "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('d', 'r', 'a', 'c'), - "width", G_TYPE_INT, state->width, - "height", G_TYPE_INT, state->height, - "framerate", GST_TYPE_FRACTION, state->fps_n, - state->fps_d, - "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, - state->par_d, NULL); - } else if (schro_enc->output_format == GST_SCHRO_ENC_OUTPUT_MP4) { - caps = gst_caps_new_simple ("video/x-mp4-part", - "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('d', 'r', 'a', 'c'), - "width", G_TYPE_INT, state->width, - "height", G_TYPE_INT, state->height, - "framerate", GST_TYPE_FRACTION, state->fps_n, - state->fps_d, - "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, - state->par_d, NULL); - } else { - g_assert_not_reached (); - caps = NULL; - } - - return caps; -} - - - - static GstFlowReturn -gst_schro_enc_shape_output_ogg (GstBaseVideoEncoder * base_video_encoder, +gst_schro_enc_shape_output (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame) { GstSchroEnc *schro_enc; - int dpn; int delay; int dist; int pt; @@ -625,8 +494,6 @@ gst_schro_enc_shape_output_ogg (GstBaseVideoEncoder * base_video_encoder, schro_enc = GST_SCHRO_ENC (base_video_encoder); - dpn = frame->decode_frame_number; - pt = frame->presentation_frame_number * 2 + schro_enc->granule_offset; dt = frame->decode_frame_number * 2 + schro_enc->granule_offset; delay = pt - dt; @@ -648,103 +515,13 @@ gst_schro_enc_shape_output_ogg (GstBaseVideoEncoder * base_video_encoder, GST_BUFFER_OFFSET_END (buf) = schro_enc->last_granulepos; } - gst_buffer_set_caps (buf, GST_BASE_VIDEO_CODEC (base_video_encoder)->caps); - - return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), buf); -} - -static GstFlowReturn -gst_schro_enc_shape_output_quicktime (GstBaseVideoEncoder * base_video_encoder, - GstVideoFrame * frame) -{ - GstBuffer *buf = frame->src_buffer; - const GstVideoState *state; - - state = gst_base_video_encoder_get_state (base_video_encoder); - - GST_BUFFER_TIMESTAMP (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, - frame->presentation_frame_number); - GST_BUFFER_DURATION (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, - frame->presentation_frame_number + 1) - GST_BUFFER_TIMESTAMP (buf); - GST_BUFFER_OFFSET_END (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, - frame->system_frame_number); - GST_BUFFER_OFFSET (buf) = GST_CLOCK_TIME_NONE; - - if (frame->is_sync_point && - frame->presentation_frame_number == frame->system_frame_number) { - GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DELTA_UNIT); - } else { - GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT); - } - - gst_buffer_set_caps (buf, GST_BASE_VIDEO_CODEC (base_video_encoder)->caps); + gst_buffer_set_caps (buf, + GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder))); return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), buf); } static GstFlowReturn -gst_schro_enc_shape_output_mp4 (GstBaseVideoEncoder * base_video_encoder, - GstVideoFrame * frame) -{ - GstBuffer *buf = frame->src_buffer; - const GstVideoState *state; - - state = gst_base_video_encoder_get_state (base_video_encoder); - - GST_BUFFER_TIMESTAMP (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, - frame->presentation_frame_number); - GST_BUFFER_DURATION (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, - frame->presentation_frame_number + 1) - GST_BUFFER_TIMESTAMP (buf); - GST_BUFFER_OFFSET_END (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, - frame->decode_frame_number); - GST_BUFFER_OFFSET (buf) = GST_CLOCK_TIME_NONE; - - GST_BUFFER_OFFSET_END (buf) = gst_video_state_get_timestamp (state, - &GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, - frame->system_frame_number); - - if (frame->is_sync_point && - frame->presentation_frame_number == frame->system_frame_number) { - GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DELTA_UNIT); - } else { - GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT); - } - - gst_buffer_set_caps (buf, GST_BASE_VIDEO_CODEC (base_video_encoder)->caps); - - return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), buf); -} - -static GstFlowReturn -gst_schro_enc_shape_output (GstBaseVideoEncoder * base_video_encoder, - GstVideoFrame * frame) -{ - GstSchroEnc *schro_enc; - - schro_enc = GST_SCHRO_ENC (base_video_encoder); - - switch (schro_enc->output_format) { - case GST_SCHRO_ENC_OUTPUT_OGG: - return gst_schro_enc_shape_output_ogg (base_video_encoder, frame); - case GST_SCHRO_ENC_OUTPUT_QUICKTIME: - return gst_schro_enc_shape_output_quicktime (base_video_encoder, frame); - case GST_SCHRO_ENC_OUTPUT_MP4: - return gst_schro_enc_shape_output_mp4 (base_video_encoder, frame); - default: - g_assert_not_reached (); - break; - } - - return GST_FLOW_ERROR; -} - -static GstFlowReturn gst_schro_enc_process (GstSchroEnc * schro_enc) { SchroBuffer *encoded_buffer; diff --git a/ext/vp8/gstvp8dec.c b/ext/vp8/gstvp8dec.c index 3d8567f..4376f4b 100644 --- a/ext/vp8/gstvp8dec.c +++ b/ext/vp8/gstvp8dec.c @@ -98,6 +98,8 @@ static void gst_vp8_dec_get_property (GObject * object, guint prop_id, static gboolean gst_vp8_dec_start (GstBaseVideoDecoder * decoder); static gboolean gst_vp8_dec_stop (GstBaseVideoDecoder * decoder); +static gboolean gst_vp8_dec_set_format (GstBaseVideoDecoder * decoder, + GstVideoState * state); static gboolean gst_vp8_dec_reset (GstBaseVideoDecoder * decoder); static GstFlowReturn gst_vp8_dec_parse_data (GstBaseVideoDecoder * decoder, gboolean at_eos); @@ -172,11 +174,15 @@ gst_vp8_dec_class_init (GstVP8DecClass * klass) 0, 16, DEFAULT_NOISE_LEVEL, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - base_video_decoder_class->start = gst_vp8_dec_start; - base_video_decoder_class->stop = gst_vp8_dec_stop; - base_video_decoder_class->reset = gst_vp8_dec_reset; - base_video_decoder_class->parse_data = gst_vp8_dec_parse_data; - base_video_decoder_class->handle_frame = gst_vp8_dec_handle_frame; + base_video_decoder_class->start = GST_DEBUG_FUNCPTR (gst_vp8_dec_start); + base_video_decoder_class->stop = GST_DEBUG_FUNCPTR (gst_vp8_dec_stop); + base_video_decoder_class->reset = GST_DEBUG_FUNCPTR (gst_vp8_dec_reset); + base_video_decoder_class->set_format = + GST_DEBUG_FUNCPTR (gst_vp8_dec_set_format); + base_video_decoder_class->parse_data = + GST_DEBUG_FUNCPTR (gst_vp8_dec_parse_data); + base_video_decoder_class->handle_frame = + GST_DEBUG_FUNCPTR (gst_vp8_dec_handle_frame); GST_DEBUG_CATEGORY_INIT (gst_vp8dec_debug, "vp8dec", 0, "VP8 Decoder"); } @@ -275,6 +281,17 @@ gst_vp8_dec_stop (GstBaseVideoDecoder * base_video_decoder) } static gboolean +gst_vp8_dec_set_format (GstBaseVideoDecoder * decoder, GstVideoState * state) +{ + GstVP8Dec *gst_vp8_dec = GST_VP8_DEC (decoder); + + GST_DEBUG_OBJECT (gst_vp8_dec, "set_format"); + gst_vp8_dec->decoder_inited = FALSE; + + return TRUE; +} + +static gboolean gst_vp8_dec_reset (GstBaseVideoDecoder * base_video_decoder) { GstVP8Dec *decoder; @@ -381,15 +398,19 @@ gst_vp8_dec_handle_frame (GstBaseVideoDecoder * decoder, GstVideoFrame * frame) if (status != VPX_CODEC_OK || !stream_info.is_kf) { GST_WARNING_OBJECT (decoder, "No keyframe, skipping"); - gst_base_video_decoder_skip_frame (decoder, frame); + gst_base_video_decoder_finish_frame (decoder, frame); return GST_FLOW_OK; } - /* should set size here */ state->width = stream_info.w; state->height = stream_info.h; state->format = GST_VIDEO_FORMAT_I420; + if (state->par_n == 0 || state->par_d == 0) { + state->par_n = 1; + state->par_d = 1; + } gst_vp8_dec_send_tags (dec); + gst_base_video_decoder_set_src_caps (decoder); caps = vpx_codec_get_caps (&vpx_codec_vp8_dx_algo); @@ -431,21 +452,6 @@ gst_vp8_dec_handle_frame (GstBaseVideoDecoder * decoder, GstVideoFrame * frame) if (!GST_BUFFER_FLAG_IS_SET (frame->sink_buffer, GST_BUFFER_FLAG_DELTA_UNIT)) gst_base_video_decoder_set_sync_point (decoder); -#if 0 - if (GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (decoder)) == NULL) { - GstCaps *caps; - - caps = gst_video_format_new_caps (decoder->state.format, - decoder->state.width, decoder->state.height, - decoder->state.fps_n, decoder->state.fps_d, - decoder->state.par_n, decoder->state.par_d); - - GST_DEBUG ("setting caps %" GST_PTR_FORMAT, caps); - - gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (decoder), caps); - } -#endif - deadline = gst_base_video_decoder_get_max_decode_time (decoder, frame); if (deadline < 0) { decoder_deadline = 1; @@ -469,15 +475,15 @@ gst_vp8_dec_handle_frame (GstBaseVideoDecoder * decoder, GstVideoFrame * frame) if (deadline < 0) { GST_LOG_OBJECT (dec, "Skipping late frame (%f s past deadline)", (double) -deadline / GST_SECOND); - gst_base_video_decoder_skip_frame (decoder, frame); + gst_base_video_decoder_finish_frame (decoder, frame); } else { ret = gst_base_video_decoder_alloc_src_frame (decoder, frame); if (ret == GST_FLOW_OK) { gst_vp8_dec_image_to_buffer (dec, img, frame->src_buffer); - gst_base_video_decoder_finish_frame (decoder, frame); + ret = gst_base_video_decoder_finish_frame (decoder, frame); } else { - gst_base_video_decoder_skip_frame (decoder, frame); + gst_base_video_decoder_finish_frame (decoder, frame); } } @@ -489,7 +495,7 @@ gst_vp8_dec_handle_frame (GstBaseVideoDecoder * decoder, GstVideoFrame * frame) } } else { /* Invisible frame */ - gst_base_video_decoder_skip_frame (decoder, frame); + gst_base_video_decoder_finish_frame (decoder, frame); } return ret; diff --git a/ext/vp8/gstvp8enc.c b/ext/vp8/gstvp8enc.c index 17b316a..e832975 100644 --- a/ext/vp8/gstvp8enc.c +++ b/ext/vp8/gstvp8enc.c @@ -65,8 +65,30 @@ typedef struct GList *invisible; } GstVP8EncCoderHook; +static void +_gst_mini_object_unref0 (GstMiniObject * obj) +{ + if (obj) + gst_mini_object_unref (obj); +} + +static void +gst_vp8_enc_coder_hook_free (GstVP8EncCoderHook * hook) +{ + if (hook->image) + g_slice_free (vpx_image_t, hook->image); + + g_list_foreach (hook->invisible, (GFunc) _gst_mini_object_unref0, NULL); + g_list_free (hook->invisible); + g_slice_free (GstVP8EncCoderHook, hook); +} + #define DEFAULT_BITRATE 0 #define DEFAULT_MODE VPX_VBR +#define DEFAULT_MINSECTION_PCT 5 +#define DEFAULT_MAXSECTION_PCT 800 +#define DEFAULT_MIN_QUANTIZER 0 +#define DEFAULT_MAX_QUANTIZER 63 #define DEFAULT_QUALITY 5 #define DEFAULT_ERROR_RESILIENT FALSE #define DEFAULT_MAX_LATENCY 10 @@ -74,14 +96,33 @@ typedef struct #define DEFAULT_SPEED 0 #define DEFAULT_THREADS 1 #define DEFAULT_MULTIPASS_MODE VPX_RC_ONE_PASS -#define DEFAULT_MULTIPASS_CACHE_FILE NULL +#define DEFAULT_MULTIPASS_CACHE_FILE "multipass.cache" #define DEFAULT_AUTO_ALT_REF_FRAMES FALSE +#define DEFAULT_LAG_IN_FRAMES 0 +#define DEFAULT_SHARPNESS 0 +#define DEFAULT_NOISE_SENSITIVITY 0 +#ifdef HAVE_VP8ENC_TUNING +#define DEFAULT_TUNE VP8_TUNE_PSNR +#else +typedef enum +{ VP8_TUNE_NONE } vp8e_tuning; +#define DEFAULT_TUNE VP8_TUNE_NONE +#endif +#define DEFAULT_STATIC_THRESHOLD 0 +#define DEFAULT_DROP_FRAME 0 +#define DEFAULT_RESIZE_ALLOWED TRUE +#define DEFAULT_TOKEN_PARTS 0 + enum { PROP_0, PROP_BITRATE, PROP_MODE, + PROP_MINSECTION_PCT, + PROP_MAXSECTION_PCT, + PROP_MIN_QUANTIZER, + PROP_MAX_QUANTIZER, PROP_QUALITY, PROP_ERROR_RESILIENT, PROP_MAX_LATENCY, @@ -90,7 +131,15 @@ enum PROP_THREADS, PROP_MULTIPASS_MODE, PROP_MULTIPASS_CACHE_FILE, - PROP_AUTO_ALT_REF_FRAMES + PROP_AUTO_ALT_REF_FRAMES, + PROP_LAG_IN_FRAMES, + PROP_SHARPNESS, + PROP_NOISE_SENSITIVITY, + PROP_TUNE, + PROP_STATIC_THRESHOLD, + PROP_DROP_FRAME, + PROP_RESIZE_ALLOWED, + PROP_TOKEN_PARTS }; #define GST_VP8_ENC_MODE_TYPE (gst_vp8_enc_mode_get_type()) @@ -138,6 +187,32 @@ gst_vp8_enc_multipass_mode_get_type (void) return id; } +#define GST_VP8_ENC_TUNE_TYPE (gst_vp8_enc_tune_get_type()) +static GType +gst_vp8_enc_tune_get_type (void) +{ + static const GEnumValue values[] = { +#ifdef HAVE_VP8ENC_TUNING + {VP8_TUNE_PSNR, "Tune for PSNR", "psnr"}, + {VP8_TUNE_SSIM, "Tune for SSIM", "ssim"}, +#else + {VP8_TUNE_NONE, "none", "none"}, +#endif + {0, NULL, NULL} + }; + static volatile GType id = 0; + + if (g_once_init_enter ((gsize *) & id)) { + GType _id; + + _id = g_enum_register_static ("GstVP8EncTune", values); + + g_once_init_leave ((gsize *) & id, _id); + } + + return id; +} + static void gst_vp8_enc_finalize (GObject * object); static void gst_vp8_enc_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); @@ -149,13 +224,12 @@ static gboolean gst_vp8_enc_stop (GstBaseVideoEncoder * encoder); static gboolean gst_vp8_enc_set_format (GstBaseVideoEncoder * base_video_encoder, GstVideoState * state); static gboolean gst_vp8_enc_finish (GstBaseVideoEncoder * base_video_encoder); -static gboolean gst_vp8_enc_handle_frame (GstBaseVideoEncoder * +static GstFlowReturn gst_vp8_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame); static GstFlowReturn gst_vp8_enc_shape_output (GstBaseVideoEncoder * encoder, GstVideoFrame * frame); -static GstCaps *gst_vp8_enc_get_caps (GstBaseVideoEncoder * base_video_encoder); - -static gboolean gst_vp8_enc_sink_event (GstPad * pad, GstEvent * event); +static gboolean gst_vp8_enc_sink_event (GstBaseVideoEncoder * + base_video_encoder, GstEvent * event); static GstStaticPadTemplate gst_vp8_enc_sink_template = GST_STATIC_PAD_TEMPLATE ("sink", @@ -225,7 +299,7 @@ gst_vp8_enc_class_init (GstVP8EncClass * klass) base_video_encoder_class->set_format = gst_vp8_enc_set_format; base_video_encoder_class->finish = gst_vp8_enc_finish; base_video_encoder_class->shape_output = gst_vp8_enc_shape_output; - base_video_encoder_class->get_caps = gst_vp8_enc_get_caps; + base_video_encoder_class->event = gst_vp8_enc_sink_event; g_object_class_install_property (gobject_class, PROP_BITRATE, g_param_spec_int ("bitrate", "Bit rate", @@ -239,9 +313,35 @@ gst_vp8_enc_class_init (GstVP8EncClass * klass) GST_VP8_ENC_MODE_TYPE, DEFAULT_MODE, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + g_object_class_install_property (gobject_class, PROP_MINSECTION_PCT, + g_param_spec_uint ("minsection-pct", + "minimum percentage allocation per section", + "The numbers represent a percentage of the average allocation per section (frame)", + 0, 20, DEFAULT_MINSECTION_PCT, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject_class, PROP_MAXSECTION_PCT, + g_param_spec_uint ("maxsection-pct", + "maximum percentage allocation per section", + "The numbers represent a percentage of the average allocation per section (frame)", + 200, 800, DEFAULT_MAXSECTION_PCT, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject_class, PROP_MIN_QUANTIZER, + g_param_spec_int ("min-quantizer", "Minimum quantizer", + "Minimum (best) quantizer", + 0, 63, DEFAULT_MIN_QUANTIZER, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject_class, PROP_MAX_QUANTIZER, + g_param_spec_int ("max-quantizer", "Maximum quantizer", + "Maximum (worst) quantizer", + 0, 63, DEFAULT_MAX_QUANTIZER, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + g_object_class_install_property (gobject_class, PROP_QUALITY, g_param_spec_double ("quality", "Quality", - "Quality", + "Quality. This parameter sets a constant quantizer.", 0, 10.0, DEFAULT_QUALITY, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); @@ -266,7 +366,7 @@ gst_vp8_enc_class_init (GstVP8EncClass * klass) g_object_class_install_property (gobject_class, PROP_SPEED, g_param_spec_int ("speed", "Speed", "Speed", - 0, 2, DEFAULT_SPEED, + 0, 7, DEFAULT_SPEED, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); g_object_class_install_property (gobject_class, PROP_THREADS, @@ -293,6 +393,54 @@ gst_vp8_enc_class_init (GstVP8EncClass * klass) DEFAULT_AUTO_ALT_REF_FRAMES, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + g_object_class_install_property (gobject_class, PROP_LAG_IN_FRAMES, + g_param_spec_uint ("lag-in-frames", "Max number of frames to lag", + "If set, this value allows the encoder to consume a number of input " + "frames before producing output frames.", + 0, 64, DEFAULT_LAG_IN_FRAMES, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject_class, PROP_SHARPNESS, + g_param_spec_int ("sharpness", "Sharpness", + "Sharpness", + 0, 7, DEFAULT_SHARPNESS, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject_class, PROP_NOISE_SENSITIVITY, + g_param_spec_int ("noise-sensitivity", "Noise Sensitivity", + "Noise Sensitivity", + 0, 6, DEFAULT_NOISE_SENSITIVITY, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject_class, PROP_TUNE, + g_param_spec_enum ("tune", "Tune", + "Tune", + GST_VP8_ENC_TUNE_TYPE, DEFAULT_TUNE, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject_class, PROP_STATIC_THRESHOLD, + g_param_spec_int ("static-threshold", "Static Threshold", + "Static Threshold", + 0, 1000, DEFAULT_STATIC_THRESHOLD, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject_class, PROP_DROP_FRAME, + g_param_spec_int ("drop-frame", "Drop Frame", + "Drop Frame", + 0, 100, DEFAULT_DROP_FRAME, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject_class, PROP_RESIZE_ALLOWED, + g_param_spec_boolean ("resize-allowed", "Resize Allowed", + "Resize Allowed", + DEFAULT_RESIZE_ALLOWED, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject_class, PROP_TOKEN_PARTS, + g_param_spec_int ("token-parts", "Token Parts", + "Token Parts", + 0, 3, DEFAULT_TOKEN_PARTS, + (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); GST_DEBUG_CATEGORY_INIT (gst_vp8enc_debug, "vp8enc", 0, "VP8 Encoder"); } @@ -304,20 +452,19 @@ gst_vp8_enc_init (GstVP8Enc * gst_vp8_enc, GstVP8EncClass * klass) GST_DEBUG_OBJECT (gst_vp8_enc, "init"); gst_vp8_enc->bitrate = DEFAULT_BITRATE; + gst_vp8_enc->minsection_pct = DEFAULT_MINSECTION_PCT; + gst_vp8_enc->maxsection_pct = DEFAULT_MAXSECTION_PCT; + gst_vp8_enc->min_quantizer = DEFAULT_MIN_QUANTIZER; + gst_vp8_enc->max_quantizer = DEFAULT_MAX_QUANTIZER; gst_vp8_enc->mode = DEFAULT_MODE; gst_vp8_enc->quality = DEFAULT_QUALITY; gst_vp8_enc->error_resilient = DEFAULT_ERROR_RESILIENT; gst_vp8_enc->max_latency = DEFAULT_MAX_LATENCY; gst_vp8_enc->max_keyframe_distance = DEFAULT_MAX_KEYFRAME_DISTANCE; gst_vp8_enc->multipass_mode = DEFAULT_MULTIPASS_MODE; - gst_vp8_enc->multipass_cache_file = DEFAULT_MULTIPASS_CACHE_FILE; + gst_vp8_enc->multipass_cache_file = g_strdup (DEFAULT_MULTIPASS_CACHE_FILE); gst_vp8_enc->auto_alt_ref_frames = DEFAULT_AUTO_ALT_REF_FRAMES; - - /* FIXME: Add sink/src event vmethods */ - gst_vp8_enc->base_sink_event_func = - GST_PAD_EVENTFUNC (GST_BASE_VIDEO_CODEC_SINK_PAD (gst_vp8_enc)); - gst_pad_set_event_function (GST_BASE_VIDEO_CODEC_SINK_PAD (gst_vp8_enc), - gst_vp8_enc_sink_event); + gst_vp8_enc->lag_in_frames = DEFAULT_LAG_IN_FRAMES; } static void @@ -354,6 +501,18 @@ gst_vp8_enc_set_property (GObject * object, guint prop_id, case PROP_MODE: gst_vp8_enc->mode = g_value_get_enum (value); break; + case PROP_MINSECTION_PCT: + gst_vp8_enc->minsection_pct = g_value_get_uint (value); + break; + case PROP_MAXSECTION_PCT: + gst_vp8_enc->maxsection_pct = g_value_get_uint (value); + break; + case PROP_MIN_QUANTIZER: + gst_vp8_enc->min_quantizer = g_value_get_int (value); + break; + case PROP_MAX_QUANTIZER: + gst_vp8_enc->max_quantizer = g_value_get_int (value); + break; case PROP_QUALITY: gst_vp8_enc->quality = g_value_get_double (value); break; @@ -383,6 +542,35 @@ gst_vp8_enc_set_property (GObject * object, guint prop_id, case PROP_AUTO_ALT_REF_FRAMES: gst_vp8_enc->auto_alt_ref_frames = g_value_get_boolean (value); break; + case PROP_LAG_IN_FRAMES: + gst_vp8_enc->lag_in_frames = g_value_get_uint (value); + break; + case PROP_SHARPNESS: + gst_vp8_enc->sharpness = g_value_get_int (value); + break; + case PROP_NOISE_SENSITIVITY: + gst_vp8_enc->noise_sensitivity = g_value_get_int (value); + break; + case PROP_TUNE: +#ifdef HAVE_VP8ENC_TUNING + gst_vp8_enc->tuning = g_value_get_enum (value); +#else + GST_WARNING_OBJECT (gst_vp8_enc, + "The tuning property is unsupported by this libvpx"); +#endif + break; + case PROP_STATIC_THRESHOLD: + gst_vp8_enc->static_threshold = g_value_get_int (value); + break; + case PROP_DROP_FRAME: + gst_vp8_enc->drop_frame = g_value_get_int (value); + break; + case PROP_RESIZE_ALLOWED: + gst_vp8_enc->resize_allowed = g_value_get_boolean (value); + break; + case PROP_TOKEN_PARTS: + gst_vp8_enc->partitions = g_value_get_int (value); + break; default: break; } @@ -404,6 +592,18 @@ gst_vp8_enc_get_property (GObject * object, guint prop_id, GValue * value, case PROP_MODE: g_value_set_enum (value, gst_vp8_enc->mode); break; + case PROP_MINSECTION_PCT: + g_value_set_uint (value, gst_vp8_enc->minsection_pct); + break; + case PROP_MAXSECTION_PCT: + g_value_set_uint (value, gst_vp8_enc->maxsection_pct); + break; + case PROP_MIN_QUANTIZER: + g_value_set_int (value, gst_vp8_enc->min_quantizer); + break; + case PROP_MAX_QUANTIZER: + g_value_set_int (value, gst_vp8_enc->max_quantizer); + break; case PROP_QUALITY: g_value_set_double (value, gst_vp8_enc->quality); break; @@ -431,6 +631,35 @@ gst_vp8_enc_get_property (GObject * object, guint prop_id, GValue * value, case PROP_AUTO_ALT_REF_FRAMES: g_value_set_boolean (value, gst_vp8_enc->auto_alt_ref_frames); break; + case PROP_LAG_IN_FRAMES: + g_value_set_uint (value, gst_vp8_enc->lag_in_frames); + break; + case PROP_SHARPNESS: + g_value_set_int (value, gst_vp8_enc->sharpness); + break; + case PROP_NOISE_SENSITIVITY: + g_value_set_int (value, gst_vp8_enc->noise_sensitivity); + break; + case PROP_TUNE: +#ifdef HAVE_VP8ENC_TUNING + g_value_set_enum (value, gst_vp8_enc->tuning); +#else + GST_WARNING_OBJECT (gst_vp8_enc, + "The tuning property is unsupported by this libvpx"); +#endif + break; + case PROP_STATIC_THRESHOLD: + g_value_set_int (value, gst_vp8_enc->static_threshold); + break; + case PROP_DROP_FRAME: + g_value_set_int (value, gst_vp8_enc->drop_frame); + break; + case PROP_RESIZE_ALLOWED: + g_value_set_boolean (value, gst_vp8_enc->resize_allowed); + break; + case PROP_TOKEN_PARTS: + g_value_set_int (value, gst_vp8_enc->partitions); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; @@ -479,25 +708,165 @@ static gboolean gst_vp8_enc_set_format (GstBaseVideoEncoder * base_video_encoder, GstVideoState * state) { + GstVP8Enc *encoder; + vpx_codec_enc_cfg_t cfg; + vpx_codec_err_t status; + vpx_image_t *image; + guint8 *data = NULL; + GstCaps *caps; + gboolean ret; + + encoder = GST_VP8_ENC (base_video_encoder); GST_DEBUG_OBJECT (base_video_encoder, "set_format"); - return TRUE; -} + if (encoder->inited) { + GST_DEBUG_OBJECT (base_video_encoder, "refusing renegotiation"); + return FALSE; + } -static GstCaps * -gst_vp8_enc_get_caps (GstBaseVideoEncoder * base_video_encoder) -{ - GstCaps *caps; - const GstVideoState *state; - GstTagList *tags = NULL; - const GstTagList *iface_tags; - GstBuffer *stream_hdr, *vorbiscomment; - guint8 *data; - GstStructure *s; - GValue array = { 0 }; - GValue value = { 0 }; + status = vpx_codec_enc_config_default (&vpx_codec_vp8_cx_algo, &cfg, 0); + if (status != VPX_CODEC_OK) { + GST_ELEMENT_ERROR (encoder, LIBRARY, INIT, + ("Failed to get default encoder configuration"), ("%s", + gst_vpx_error_name (status))); + return FALSE; + } + + cfg.g_w = state->width; + cfg.g_h = state->height; + cfg.g_timebase.num = state->fps_d; + cfg.g_timebase.den = state->fps_n; + + cfg.g_error_resilient = encoder->error_resilient; + cfg.g_lag_in_frames = encoder->max_latency; + cfg.g_threads = encoder->threads; + cfg.rc_end_usage = encoder->mode; + cfg.rc_2pass_vbr_minsection_pct = encoder->minsection_pct; + cfg.rc_2pass_vbr_maxsection_pct = encoder->maxsection_pct; + /* Standalone qp-min do not make any sence, with bitrate=0 and qp-min=1 + * encoder will use only default qp-max=63. Also this will make + * worst possbile quality. + */ + if (encoder->bitrate != DEFAULT_BITRATE || + encoder->max_quantizer != DEFAULT_MAX_QUANTIZER) { + cfg.rc_target_bitrate = encoder->bitrate / 1000; + cfg.rc_min_quantizer = encoder->min_quantizer; + cfg.rc_max_quantizer = encoder->max_quantizer; + } else { + cfg.rc_min_quantizer = (gint) (63 - encoder->quality * 6.2); + cfg.rc_max_quantizer = (gint) (63 - encoder->quality * 6.2); + cfg.rc_target_bitrate = encoder->bitrate; + } + cfg.rc_dropframe_thresh = encoder->drop_frame; + cfg.rc_resize_allowed = encoder->resize_allowed; + + cfg.kf_mode = VPX_KF_AUTO; + cfg.kf_min_dist = 0; + cfg.kf_max_dist = encoder->max_keyframe_distance; + + cfg.g_pass = encoder->multipass_mode; + if (encoder->multipass_mode == VPX_RC_FIRST_PASS) { + encoder->first_pass_cache_content = g_byte_array_sized_new (4096); + } else if (encoder->multipass_mode == VPX_RC_LAST_PASS) { + GError *err = NULL; + + if (!encoder->multipass_cache_file) { + GST_ELEMENT_ERROR (encoder, RESOURCE, OPEN_READ, + ("No multipass cache file provided"), (NULL)); + return FALSE; + } + + if (!g_file_get_contents (encoder->multipass_cache_file, + (gchar **) & encoder->last_pass_cache_content.buf, + &encoder->last_pass_cache_content.sz, &err)) { + GST_ELEMENT_ERROR (encoder, RESOURCE, OPEN_READ, + ("Failed to read multipass cache file provided"), ("%s", + err->message)); + g_error_free (err); + return FALSE; + } + cfg.rc_twopass_stats_in = encoder->last_pass_cache_content; + } + + status = vpx_codec_enc_init (&encoder->encoder, &vpx_codec_vp8_cx_algo, + &cfg, 0); + if (status != VPX_CODEC_OK) { + GST_ELEMENT_ERROR (encoder, LIBRARY, INIT, + ("Failed to initialize encoder"), ("%s", gst_vpx_error_name (status))); + return FALSE; + } + + /* FIXME move this to a set_speed() function */ + status = vpx_codec_control (&encoder->encoder, VP8E_SET_CPUUSED, + (encoder->speed == 0) ? 0 : (encoder->speed - 1)); + if (status != VPX_CODEC_OK) { + GST_WARNING_OBJECT (encoder, "Failed to set VP8E_SET_CPUUSED to 0: %s", + gst_vpx_error_name (status)); + } + + status = vpx_codec_control (&encoder->encoder, VP8E_SET_NOISE_SENSITIVITY, + encoder->noise_sensitivity); + status = vpx_codec_control (&encoder->encoder, VP8E_SET_SHARPNESS, + encoder->sharpness); + status = vpx_codec_control (&encoder->encoder, VP8E_SET_STATIC_THRESHOLD, + encoder->static_threshold); + status = vpx_codec_control (&encoder->encoder, VP8E_SET_TOKEN_PARTITIONS, + encoder->partitions); +#if 0 + status = vpx_codec_control (&encoder->encoder, VP8E_SET_ARNR_MAXFRAMES, + encoder->arnr_maxframes); + status = vpx_codec_control (&encoder->encoder, VP8E_SET_ARNR_STRENGTH, + encoder->arnr_strength); + status = vpx_codec_control (&encoder->encoder, VP8E_SET_ARNR_TYPE, + encoder->arnr_type); +#endif +#ifdef HAVE_VP8ENC_TUNING + status = vpx_codec_control (&encoder->encoder, VP8E_SET_TUNING, + encoder->tuning); +#endif + + status = + vpx_codec_control (&encoder->encoder, VP8E_SET_ENABLEAUTOALTREF, + (encoder->auto_alt_ref_frames ? 1 : 0)); + if (status != VPX_CODEC_OK) { + GST_WARNING_OBJECT (encoder, + "Failed to set VP8E_ENABLEAUTOALTREF to %d: %s", + (encoder->auto_alt_ref_frames ? 1 : 0), gst_vpx_error_name (status)); + } + + cfg.g_lag_in_frames = encoder->lag_in_frames; + + gst_base_video_encoder_set_latency (base_video_encoder, 0, + gst_util_uint64_scale (encoder->max_latency, + state->fps_d * GST_SECOND, state->fps_n)); + encoder->inited = TRUE; + + /* prepare cached image buffer setup */ + image = &encoder->image; + memset (image, 0, sizeof (image)); + + image->fmt = VPX_IMG_FMT_I420; + image->bps = 12; + image->x_chroma_shift = image->y_chroma_shift = 1; + image->w = image->d_w = state->width; + image->h = image->d_h = state->height; + + image->stride[VPX_PLANE_Y] = + gst_video_format_get_row_stride (state->format, 0, state->width); + image->stride[VPX_PLANE_U] = + gst_video_format_get_row_stride (state->format, 1, state->width); + image->stride[VPX_PLANE_V] = + gst_video_format_get_row_stride (state->format, 2, state->width); + image->planes[VPX_PLANE_Y] = + data + gst_video_format_get_component_offset (state->format, 0, + state->width, state->height); + image->planes[VPX_PLANE_U] = + data + gst_video_format_get_component_offset (state->format, 1, + state->width, state->height); + image->planes[VPX_PLANE_V] = + data + gst_video_format_get_component_offset (state->format, 2, + state->width, state->height); - state = gst_base_video_encoder_get_state (base_video_encoder); caps = gst_caps_new_simple ("video/x-vp8", "width", G_TYPE_INT, state->width, @@ -506,86 +875,83 @@ gst_vp8_enc_get_caps (GstBaseVideoEncoder * base_video_encoder) state->fps_d, "pixel-aspect-ratio", GST_TYPE_FRACTION, state->par_n, state->par_d, NULL); - - s = gst_caps_get_structure (caps, 0); - - /* put buffers in a fixed list */ - g_value_init (&array, GST_TYPE_ARRAY); - g_value_init (&value, GST_TYPE_BUFFER); - - /* Create Ogg stream-info */ - stream_hdr = gst_buffer_new_and_alloc (26); - data = GST_BUFFER_DATA (stream_hdr); - - GST_WRITE_UINT8 (data, 0x4F); - GST_WRITE_UINT32_BE (data + 1, 0x56503830); /* "VP80" */ - GST_WRITE_UINT8 (data + 5, 0x01); /* stream info header */ - GST_WRITE_UINT8 (data + 6, 1); /* Major version 1 */ - GST_WRITE_UINT8 (data + 7, 0); /* Minor version 0 */ - GST_WRITE_UINT16_BE (data + 8, state->width); - GST_WRITE_UINT16_BE (data + 10, state->height); - GST_WRITE_UINT24_BE (data + 12, state->par_n); - GST_WRITE_UINT24_BE (data + 15, state->par_d); - GST_WRITE_UINT32_BE (data + 18, state->fps_n); - GST_WRITE_UINT32_BE (data + 22, state->fps_d); - - GST_BUFFER_FLAG_SET (stream_hdr, GST_BUFFER_FLAG_IN_CAPS); - gst_value_set_buffer (&value, stream_hdr); - gst_value_array_append_value (&array, &value); - g_value_unset (&value); - gst_buffer_unref (stream_hdr); - - iface_tags = - gst_tag_setter_get_tag_list (GST_TAG_SETTER (base_video_encoder)); - if (iface_tags) { - vorbiscomment = - gst_tag_list_to_vorbiscomment_buffer ((iface_tags) ? iface_tags : tags, - (const guint8 *) "OVP80\2 ", 7, - "Encoded with GStreamer vp8enc " PACKAGE_VERSION); - - GST_BUFFER_FLAG_SET (vorbiscomment, GST_BUFFER_FLAG_IN_CAPS); - + { + GstStructure *s; + GstBuffer *stream_hdr, *vorbiscomment; + const GstTagList *iface_tags; + GValue array = { 0, }; + GValue value = { 0, }; + s = gst_caps_get_structure (caps, 0); + + /* put buffers in a fixed list */ + g_value_init (&array, GST_TYPE_ARRAY); g_value_init (&value, GST_TYPE_BUFFER); - gst_value_set_buffer (&value, vorbiscomment); + + /* Create Ogg stream-info */ + stream_hdr = gst_buffer_new_and_alloc (26); + data = GST_BUFFER_DATA (stream_hdr); + + GST_WRITE_UINT8 (data, 0x4F); + GST_WRITE_UINT32_BE (data + 1, 0x56503830); /* "VP80" */ + GST_WRITE_UINT8 (data + 5, 0x01); /* stream info header */ + GST_WRITE_UINT8 (data + 6, 1); /* Major version 1 */ + GST_WRITE_UINT8 (data + 7, 0); /* Minor version 0 */ + GST_WRITE_UINT16_BE (data + 8, state->width); + GST_WRITE_UINT16_BE (data + 10, state->height); + GST_WRITE_UINT24_BE (data + 12, state->par_n); + GST_WRITE_UINT24_BE (data + 15, state->par_d); + GST_WRITE_UINT32_BE (data + 18, state->fps_n); + GST_WRITE_UINT32_BE (data + 22, state->fps_d); + + GST_BUFFER_FLAG_SET (stream_hdr, GST_BUFFER_FLAG_IN_CAPS); + gst_value_set_buffer (&value, stream_hdr); gst_value_array_append_value (&array, &value); g_value_unset (&value); - gst_buffer_unref (vorbiscomment); + gst_buffer_unref (stream_hdr); + + iface_tags = + gst_tag_setter_get_tag_list (GST_TAG_SETTER (base_video_encoder)); + if (iface_tags) { + vorbiscomment = + gst_tag_list_to_vorbiscomment_buffer (iface_tags, + (const guint8 *) "OVP80\2 ", 7, + "Encoded with GStreamer vp8enc " PACKAGE_VERSION); + + GST_BUFFER_FLAG_SET (vorbiscomment, GST_BUFFER_FLAG_IN_CAPS); + + g_value_init (&value, GST_TYPE_BUFFER); + gst_value_set_buffer (&value, vorbiscomment); + gst_value_array_append_value (&array, &value); + g_value_unset (&value); + gst_buffer_unref (vorbiscomment); + } + + gst_structure_set_value (s, "streamheader", &array); + g_value_unset (&array); } - gst_structure_set_value (s, "streamheader", &array); - g_value_unset (&array); + ret = gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (encoder), caps); + gst_caps_unref (caps); - return caps; + return ret; } -static gboolean -gst_vp8_enc_finish (GstBaseVideoEncoder * base_video_encoder) +static GstFlowReturn +gst_vp8_enc_process (GstVP8Enc * encoder) { - GstVP8Enc *encoder; - GstVideoFrame *frame; - int flags = 0; - vpx_codec_err_t status; vpx_codec_iter_t iter = NULL; const vpx_codec_cx_pkt_t *pkt; + GstBaseVideoEncoder *base_video_encoder; + GstVP8EncCoderHook *hook; + GstVideoFrame *frame; + GstFlowReturn ret = GST_FLOW_OK; - GST_DEBUG_OBJECT (base_video_encoder, "finish"); - - encoder = GST_VP8_ENC (base_video_encoder); - - status = - vpx_codec_encode (&encoder->encoder, NULL, encoder->n_frames, 1, flags, - 0); - if (status != 0) { - GST_ERROR_OBJECT (encoder, "encode returned %d %s", status, - gst_vpx_error_name (status)); - return FALSE; - } + base_video_encoder = GST_BASE_VIDEO_ENCODER (encoder); pkt = vpx_codec_get_cx_data (&encoder->encoder, &iter); while (pkt != NULL) { GstBuffer *buffer; - GstVP8EncCoderHook *hook; - gboolean invisible, keyframe; + gboolean invisible; GST_DEBUG_OBJECT (encoder, "packet %u type %d", (guint) pkt->data.frame.sz, pkt->kind); @@ -614,15 +980,14 @@ gst_vp8_enc_finish (GstBaseVideoEncoder * base_video_encoder) } invisible = (pkt->data.frame.flags & VPX_FRAME_IS_INVISIBLE) != 0; - keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; frame = gst_base_video_encoder_get_oldest_frame (base_video_encoder); g_assert (frame != NULL); + frame->is_sync_point = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; hook = frame->coder_hook; buffer = gst_buffer_new_and_alloc (pkt->data.frame.sz); memcpy (GST_BUFFER_DATA (buffer), pkt->data.frame.buf, pkt->data.frame.sz); - frame->is_sync_point = frame->is_sync_point || keyframe; if (hook->image) g_slice_free (vpx_image_t, hook->image); @@ -632,13 +997,38 @@ gst_vp8_enc_finish (GstBaseVideoEncoder * base_video_encoder) hook->invisible = g_list_append (hook->invisible, buffer); } else { frame->src_buffer = buffer; - gst_base_video_encoder_finish_frame (base_video_encoder, frame); - frame = NULL; + ret = gst_base_video_encoder_finish_frame (base_video_encoder, frame); } pkt = vpx_codec_get_cx_data (&encoder->encoder, &iter); } + return ret; +} + +static GstFlowReturn +gst_vp8_enc_finish (GstBaseVideoEncoder * base_video_encoder) +{ + GstVP8Enc *encoder; + int flags = 0; + vpx_codec_err_t status; + + GST_DEBUG_OBJECT (base_video_encoder, "finish"); + + encoder = GST_VP8_ENC (base_video_encoder); + + status = + vpx_codec_encode (&encoder->encoder, NULL, encoder->n_frames, 1, flags, + 0); + if (status != 0) { + GST_ERROR_OBJECT (encoder, "encode returned %d %s", status, + gst_vpx_error_name (status)); + return GST_FLOW_ERROR; + } + + /* dispatch remaining frames */ + gst_vp8_enc_process (encoder); + if (encoder->multipass_mode == VPX_RC_FIRST_PASS && encoder->multipass_cache_file) { GError *err = NULL; @@ -652,49 +1042,26 @@ gst_vp8_enc_finish (GstBaseVideoEncoder * base_video_encoder) } } - return TRUE; + return GST_FLOW_OK; } static vpx_image_t * gst_vp8_enc_buffer_to_image (GstVP8Enc * enc, GstBuffer * buffer) { - vpx_image_t *image = g_slice_new0 (vpx_image_t); + vpx_image_t *image = g_slice_new (vpx_image_t); guint8 *data = GST_BUFFER_DATA (buffer); - GstVideoState *state = &GST_BASE_VIDEO_CODEC (enc)->state; - image->fmt = VPX_IMG_FMT_I420; - image->bps = 12; - image->x_chroma_shift = image->y_chroma_shift = 1; - image->img_data = data; - image->w = image->d_w = state->width; - image->h = image->d_h = state->height; + memcpy (image, &enc->image, sizeof (*image)); - image->stride[VPX_PLANE_Y] = - gst_video_format_get_row_stride (state->format, 0, state->width); - image->stride[VPX_PLANE_U] = - gst_video_format_get_row_stride (state->format, 1, state->width); - image->stride[VPX_PLANE_V] = - gst_video_format_get_row_stride (state->format, 2, state->width); - image->planes[VPX_PLANE_Y] = - data + gst_video_format_get_component_offset (state->format, 0, - state->width, state->height); - image->planes[VPX_PLANE_U] = - data + gst_video_format_get_component_offset (state->format, 1, - state->width, state->height); - image->planes[VPX_PLANE_V] = - data + gst_video_format_get_component_offset (state->format, 2, - state->width, state->height); + image->img_data = data; + image->planes[VPX_PLANE_Y] += (data - (guint8 *) NULL); + image->planes[VPX_PLANE_U] += (data - (guint8 *) NULL); + image->planes[VPX_PLANE_V] += (data - (guint8 *) NULL); return image; } -static const int speed_table[] = { - VPX_DL_BEST_QUALITY, - VPX_DL_GOOD_QUALITY, - VPX_DL_REALTIME, -}; - -static gboolean +static GstFlowReturn gst_vp8_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame) { @@ -702,10 +1069,9 @@ gst_vp8_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, const GstVideoState *state; vpx_codec_err_t status; int flags = 0; - vpx_codec_iter_t iter = NULL; - const vpx_codec_cx_pkt_t *pkt; vpx_image_t *image; GstVP8EncCoderHook *hook; + int quality; GST_DEBUG_OBJECT (base_video_encoder, "handle_frame"); @@ -717,105 +1083,22 @@ gst_vp8_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, GST_DEBUG_OBJECT (base_video_encoder, "size %d %d", state->width, state->height); - if (!encoder->inited) { - vpx_codec_enc_cfg_t cfg; - - status = vpx_codec_enc_config_default (&vpx_codec_vp8_cx_algo, &cfg, 0); - if (status != VPX_CODEC_OK) { - GST_ELEMENT_ERROR (encoder, LIBRARY, INIT, - ("Failed to get default encoder configuration"), ("%s", - gst_vpx_error_name (status))); - return FALSE; - } - - cfg.g_w = state->width; - cfg.g_h = state->height; - cfg.g_timebase.num = state->fps_d; - cfg.g_timebase.den = state->fps_n; - - cfg.g_error_resilient = encoder->error_resilient; - cfg.g_lag_in_frames = encoder->max_latency; - cfg.g_threads = encoder->threads; - cfg.rc_end_usage = encoder->mode; - if (encoder->bitrate) { - cfg.rc_target_bitrate = encoder->bitrate / 1000; - } else { - cfg.rc_min_quantizer = 63 - encoder->quality * 5.0; - cfg.rc_max_quantizer = 63 - encoder->quality * 5.0; - cfg.rc_target_bitrate = encoder->bitrate; - } - - cfg.kf_mode = VPX_KF_AUTO; - cfg.kf_min_dist = 0; - cfg.kf_max_dist = encoder->max_keyframe_distance; - - cfg.g_pass = encoder->multipass_mode; - if (encoder->multipass_mode == VPX_RC_FIRST_PASS) { - encoder->first_pass_cache_content = g_byte_array_sized_new (4096); - } else if (encoder->multipass_mode == VPX_RC_LAST_PASS) { - GError *err = NULL; - - - if (!encoder->multipass_cache_file) { - GST_ELEMENT_ERROR (encoder, RESOURCE, OPEN_READ, - ("No multipass cache file provided"), (NULL)); - return GST_FLOW_ERROR; - } - - if (!g_file_get_contents (encoder->multipass_cache_file, - (gchar **) & encoder->last_pass_cache_content.buf, - &encoder->last_pass_cache_content.sz, &err)) { - GST_ELEMENT_ERROR (encoder, RESOURCE, OPEN_READ, - ("Failed to read multipass cache file provided"), ("%s", - err->message)); - g_error_free (err); - return GST_FLOW_ERROR; - } - cfg.rc_twopass_stats_in = encoder->last_pass_cache_content; - } - - status = vpx_codec_enc_init (&encoder->encoder, &vpx_codec_vp8_cx_algo, - &cfg, 0); - if (status != VPX_CODEC_OK) { - GST_ELEMENT_ERROR (encoder, LIBRARY, INIT, - ("Failed to initialize encoder"), ("%s", - gst_vpx_error_name (status))); - return GST_FLOW_ERROR; - } - - status = vpx_codec_control (&encoder->encoder, VP8E_SET_CPUUSED, 0); - if (status != VPX_CODEC_OK) { - GST_WARNING_OBJECT (encoder, "Failed to set VP8E_SET_CPUUSED to 0: %s", - gst_vpx_error_name (status)); - } - - status = - vpx_codec_control (&encoder->encoder, VP8E_SET_ENABLEAUTOALTREF, - (encoder->auto_alt_ref_frames ? 1 : 0)); - if (status != VPX_CODEC_OK) { - GST_WARNING_OBJECT (encoder, - "Failed to set VP8E_ENABLEAUTOALTREF to %d: %s", - (encoder->auto_alt_ref_frames ? 1 : 0), gst_vpx_error_name (status)); - } - - gst_base_video_encoder_set_latency (base_video_encoder, 0, - gst_util_uint64_scale (encoder->max_latency, - state->fps_d * GST_SECOND, state->fps_n)); - encoder->inited = TRUE; - } - image = gst_vp8_enc_buffer_to_image (encoder, frame->sink_buffer); hook = g_slice_new0 (GstVP8EncCoderHook); hook->image = image; frame->coder_hook = hook; + frame->coder_hook_destroy_notify = + (GDestroyNotify) gst_vp8_enc_coder_hook_free; - if (encoder->force_keyframe) { + if (frame->force_keyframe) { flags |= VPX_EFLAG_FORCE_KF; } + quality = (encoder->speed == 0) ? VPX_DL_BEST_QUALITY : VPX_DL_GOOD_QUALITY; + status = vpx_codec_encode (&encoder->encoder, image, - encoder->n_frames, 1, flags, speed_table[encoder->speed]); + encoder->n_frames, 1, flags, quality); if (status != 0) { GST_ELEMENT_ERROR (encoder, LIBRARY, ENCODE, ("Failed to encode frame"), ("%s", gst_vpx_error_name (status))); @@ -825,62 +1108,7 @@ gst_vp8_enc_handle_frame (GstBaseVideoEncoder * base_video_encoder, return FALSE; } - pkt = vpx_codec_get_cx_data (&encoder->encoder, &iter); - while (pkt != NULL) { - GstBuffer *buffer; - gboolean invisible; - - GST_DEBUG_OBJECT (encoder, "packet %u type %d", (guint) pkt->data.frame.sz, - pkt->kind); - - if (pkt->kind == VPX_CODEC_STATS_PKT - && encoder->multipass_mode == VPX_RC_FIRST_PASS) { - GST_LOG_OBJECT (encoder, "handling STATS packet"); - - g_byte_array_append (encoder->first_pass_cache_content, - pkt->data.twopass_stats.buf, pkt->data.twopass_stats.sz); - - frame = gst_base_video_encoder_get_oldest_frame (base_video_encoder); - if (frame != NULL) { - buffer = gst_buffer_new (); - GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_PREROLL); - frame->src_buffer = buffer; - gst_base_video_encoder_finish_frame (base_video_encoder, frame); - } - - pkt = vpx_codec_get_cx_data (&encoder->encoder, &iter); - continue; - } else if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) { - GST_LOG_OBJECT (encoder, "non frame pkt: %d", pkt->kind); - pkt = vpx_codec_get_cx_data (&encoder->encoder, &iter); - continue; - } - - invisible = (pkt->data.frame.flags & VPX_FRAME_IS_INVISIBLE) != 0; - frame = gst_base_video_encoder_get_oldest_frame (base_video_encoder); - g_assert (frame != NULL); - frame->is_sync_point = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; - hook = frame->coder_hook; - - buffer = gst_buffer_new_and_alloc (pkt->data.frame.sz); - - memcpy (GST_BUFFER_DATA (buffer), pkt->data.frame.buf, pkt->data.frame.sz); - - if (hook->image) - g_slice_free (vpx_image_t, hook->image); - hook->image = NULL; - - if (invisible) { - hook->invisible = g_list_append (hook->invisible, buffer); - } else { - frame->src_buffer = buffer; - gst_base_video_encoder_finish_frame (base_video_encoder, frame); - } - - pkt = vpx_codec_get_cx_data (&encoder->encoder, &iter); - } - - return TRUE; + return gst_vp8_enc_process (encoder); } static guint64 @@ -895,13 +1123,6 @@ _to_granulepos (guint64 frame_end_number, guint inv_count, guint keyframe_dist) return granulepos; } -static void -_gst_mini_object_unref0 (GstMiniObject * obj) -{ - if (obj) - gst_mini_object_unref (obj); -} - static GstFlowReturn gst_vp8_enc_shape_output (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame) @@ -934,6 +1155,8 @@ gst_vp8_enc_shape_output (GstBaseVideoEncoder * base_video_encoder, encoder->keyframe_distance++; } + GST_BUFFER_TIMESTAMP (buf) = GST_BUFFER_TIMESTAMP (frame->src_buffer); + GST_BUFFER_DURATION (buf) = 0; GST_BUFFER_OFFSET_END (buf) = _to_granulepos (frame->presentation_frame_number + 1, inv_count, encoder->keyframe_distance); @@ -941,7 +1164,8 @@ gst_vp8_enc_shape_output (GstBaseVideoEncoder * base_video_encoder, gst_util_uint64_scale (frame->presentation_frame_number + 1, GST_SECOND * state->fps_d, state->fps_n); - gst_buffer_set_caps (buf, GST_BASE_VIDEO_CODEC (base_video_encoder)->caps); + gst_buffer_set_caps (buf, + GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder))); ret = gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), buf); if (ret != GST_FLOW_OK) { @@ -974,21 +1198,13 @@ gst_vp8_enc_shape_output (GstBaseVideoEncoder * base_video_encoder, } done: - if (hook) { - g_list_foreach (hook->invisible, (GFunc) _gst_mini_object_unref0, NULL); - g_list_free (hook->invisible); - g_slice_free (GstVP8EncCoderHook, hook); - frame->coder_hook = NULL; - } - return ret; } static gboolean -gst_vp8_enc_sink_event (GstPad * pad, GstEvent * event) +gst_vp8_enc_sink_event (GstBaseVideoEncoder * benc, GstEvent * event) { - GstVP8Enc *enc = GST_VP8_ENC (gst_pad_get_parent (pad)); - gboolean ret; + GstVP8Enc *enc = GST_VP8_ENC (benc); if (GST_EVENT_TYPE (event) == GST_EVENT_TAG) { GstTagList *list; @@ -999,10 +1215,8 @@ gst_vp8_enc_sink_event (GstPad * pad, GstEvent * event) gst_tag_setter_merge_tags (setter, list, mode); } - ret = enc->base_sink_event_func (pad, event); - gst_object_unref (enc); - - return ret; + /* just peeked, baseclass handles the rest */ + return FALSE; } #endif /* HAVE_VP8_ENCODER */ diff --git a/ext/vp8/gstvp8enc.h b/ext/vp8/gstvp8enc.h index 0a21647..3b01512 100644 --- a/ext/vp8/gstvp8enc.h +++ b/ext/vp8/gstvp8enc.h @@ -60,6 +60,10 @@ struct _GstVP8Enc /* properties */ int bitrate; enum vpx_rc_mode mode; + unsigned int minsection_pct; + unsigned int maxsection_pct; + int min_quantizer; + int max_quantizer; double quality; gboolean error_resilient; int max_latency; @@ -71,16 +75,24 @@ struct _GstVP8Enc GByteArray *first_pass_cache_content; vpx_fixed_buf_t last_pass_cache_content; gboolean auto_alt_ref_frames; + unsigned int lag_in_frames; + int sharpness; + int noise_sensitivity; +#ifdef HAVE_VP8ENC_TUNING + vp8e_tuning tuning; +#endif + int static_threshold; + gboolean drop_frame; + gboolean resize_allowed; + gboolean partitions; /* state */ - gboolean force_keyframe; gboolean inited; + vpx_image_t image; + int n_frames; int keyframe_distance; - - /* FIXME: Get a event vfunc in BaseVideoEncoder */ - GstPadEventFunction base_sink_event_func; }; struct _GstVP8EncClass diff --git a/gst-libs/gst/Makefile.am b/gst-libs/gst/Makefile.am index f58086c..823e2e3 100644 --- a/gst-libs/gst/Makefile.am +++ b/gst-libs/gst/Makefile.am @@ -2,8 +2,7 @@ if BUILD_EXPERIMENTAL EXPERIMENTAL_LIBS=basecamerabinsrc endif -SUBDIRS = interfaces signalprocessor video $(EXPERIMENTAL_LIBS) +SUBDIRS = interfaces signalprocessor video codecparsers $(EXPERIMENTAL_LIBS) noinst_HEADERS = gst-i18n-plugin.h gettext.h DIST_SUBDIRS = interfaces signalprocessor video basecamerabinsrc - diff --git a/gst-libs/gst/basecamerabinsrc/gstbasecamerasrc.c b/gst-libs/gst/basecamerabinsrc/gstbasecamerasrc.c index b2ae992..df22e18 100644 --- a/gst-libs/gst/basecamerabinsrc/gstbasecamerasrc.c +++ b/gst-libs/gst/basecamerabinsrc/gstbasecamerasrc.c @@ -45,7 +45,7 @@ * directly in the subclass without extra elements. * * The src will receive the capture mode from #GstCameraBin2 on the - * #GstBaseCameraSrc:mode property. Possible capture modes are defined in + * #GstBaseCameraBinSrc:mode property. Possible capture modes are defined in * #GstCameraBinMode. */ @@ -84,7 +84,8 @@ static guint basecamerasrc_signals[LAST_SIGNAL]; GST_DEBUG_CATEGORY (base_camera_src_debug); #define GST_CAT_DEFAULT base_camera_src_debug -GST_BOILERPLATE (GstBaseCameraSrc, gst_base_camera_src, GstBin, GST_TYPE_BIN); +GST_BOILERPLATE (GstBaseCameraBinSrc, gst_base_camera_src, GstBin, + GST_TYPE_BIN); static GstStaticPadTemplate vfsrc_template = GST_STATIC_PAD_TEMPLATE (GST_BASE_CAMERA_SRC_VIEWFINDER_PAD_NAME, @@ -118,7 +119,7 @@ GST_STATIC_PAD_TEMPLATE (GST_BASE_CAMERA_SRC_VIDEO_PAD_NAME, * returns NULL. */ GstPhotography * -gst_base_camera_src_get_photography (GstBaseCameraSrc * self) +gst_base_camera_src_get_photography (GstBaseCameraBinSrc * self) { GstElement *elem; @@ -144,7 +145,7 @@ gst_base_camera_src_get_photography (GstBaseCameraSrc * self) * returns NULL. */ GstColorBalance * -gst_base_camera_src_get_color_balance (GstBaseCameraSrc * self) +gst_base_camera_src_get_color_balance (GstBaseCameraBinSrc * self) { GstElement *elem; @@ -169,9 +170,9 @@ gst_base_camera_src_get_color_balance (GstBaseCameraSrc * self) * Set the chosen #GstCameraBinMode capture mode. */ gboolean -gst_base_camera_src_set_mode (GstBaseCameraSrc * self, GstCameraBinMode mode) +gst_base_camera_src_set_mode (GstBaseCameraBinSrc * self, GstCameraBinMode mode) { - GstBaseCameraSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); + GstBaseCameraBinSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); g_return_val_if_fail (bclass->set_mode, FALSE); @@ -189,9 +190,9 @@ gst_base_camera_src_set_mode (GstBaseCameraSrc * self, GstCameraBinMode mode) * Apply zoom configured to camerabin to capture. */ void -gst_base_camera_src_setup_zoom (GstBaseCameraSrc * self) +gst_base_camera_src_setup_zoom (GstBaseCameraBinSrc * self) { - GstBaseCameraSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); + GstBaseCameraBinSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); g_return_if_fail (self->zoom); g_return_if_fail (bclass->set_zoom); @@ -207,10 +208,10 @@ gst_base_camera_src_setup_zoom (GstBaseCameraSrc * self) * Apply preview caps to preview pipeline and to video source. */ void -gst_base_camera_src_setup_preview (GstBaseCameraSrc * self, +gst_base_camera_src_setup_preview (GstBaseCameraBinSrc * self, GstCaps * preview_caps) { - GstBaseCameraSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); + GstBaseCameraBinSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); if (self->preview_pipeline) { GST_DEBUG_OBJECT (self, @@ -231,9 +232,9 @@ gst_base_camera_src_setup_preview (GstBaseCameraSrc * self, * Returns: caps object from videosrc */ GstCaps * -gst_base_camera_src_get_allowed_input_caps (GstBaseCameraSrc * self) +gst_base_camera_src_get_allowed_input_caps (GstBaseCameraBinSrc * self) { - GstBaseCameraSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); + GstBaseCameraBinSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); g_return_val_if_fail (bclass->get_allowed_input_caps, NULL); @@ -241,9 +242,9 @@ gst_base_camera_src_get_allowed_input_caps (GstBaseCameraSrc * self) } static void -gst_base_camera_src_start_capture (GstBaseCameraSrc * src) +gst_base_camera_src_start_capture (GstBaseCameraBinSrc * src) { - GstBaseCameraSrcClass *klass = GST_BASE_CAMERA_SRC_GET_CLASS (src); + GstBaseCameraBinSrcClass *klass = GST_BASE_CAMERA_SRC_GET_CLASS (src); g_return_if_fail (klass->start_capture != NULL); @@ -272,9 +273,9 @@ gst_base_camera_src_start_capture (GstBaseCameraSrc * src) } static void -gst_base_camera_src_stop_capture (GstBaseCameraSrc * src) +gst_base_camera_src_stop_capture (GstBaseCameraBinSrc * src) { - GstBaseCameraSrcClass *klass = GST_BASE_CAMERA_SRC_GET_CLASS (src); + GstBaseCameraBinSrcClass *klass = GST_BASE_CAMERA_SRC_GET_CLASS (src); g_return_if_fail (klass->stop_capture != NULL); @@ -289,7 +290,7 @@ gst_base_camera_src_stop_capture (GstBaseCameraSrc * src) } void -gst_base_camera_src_finish_capture (GstBaseCameraSrc * self) +gst_base_camera_src_finish_capture (GstBaseCameraBinSrc * self) { GST_DEBUG_OBJECT (self, "Finishing capture"); g_return_if_fail (self->capturing); @@ -300,7 +301,7 @@ gst_base_camera_src_finish_capture (GstBaseCameraSrc * self) static void gst_base_camera_src_dispose (GObject * object) { - GstBaseCameraSrc *src = GST_BASE_CAMERA_SRC_CAST (object); + GstBaseCameraBinSrc *src = GST_BASE_CAMERA_SRC_CAST (object); g_mutex_free (src->capturing_mutex); @@ -321,7 +322,7 @@ gst_base_camera_src_dispose (GObject * object) } static void -gst_base_camera_src_finalize (GstBaseCameraSrc * self) +gst_base_camera_src_finalize (GstBaseCameraBinSrc * self) { G_OBJECT_CLASS (parent_class)->finalize ((GObject *) (self)); } @@ -330,7 +331,7 @@ static void gst_base_camera_src_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { - GstBaseCameraSrc *self = GST_BASE_CAMERA_SRC (object); + GstBaseCameraBinSrc *self = GST_BASE_CAMERA_SRC (object); switch (prop_id) { case PROP_MODE: @@ -380,7 +381,7 @@ static void gst_base_camera_src_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { - GstBaseCameraSrc *self = GST_BASE_CAMERA_SRC (object); + GstBaseCameraBinSrc *self = GST_BASE_CAMERA_SRC (object); switch (prop_id) { case PROP_MODE: @@ -413,9 +414,9 @@ gst_base_camera_src_get_property (GObject * object, } static gboolean -construct_pipeline (GstBaseCameraSrc * self) +construct_pipeline (GstBaseCameraBinSrc * self) { - GstBaseCameraSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); + GstBaseCameraBinSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); if (bclass->construct_pipeline) { if (!bclass->construct_pipeline (self)) { @@ -428,9 +429,9 @@ construct_pipeline (GstBaseCameraSrc * self) } static gboolean -setup_pipeline (GstBaseCameraSrc * self) +setup_pipeline (GstBaseCameraBinSrc * self) { - GstBaseCameraSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); + GstBaseCameraBinSrcClass *bclass = GST_BASE_CAMERA_SRC_GET_CLASS (self); if (bclass->setup_pipeline) return bclass->setup_pipeline (self); return TRUE; @@ -441,7 +442,7 @@ gst_base_camera_src_change_state (GstElement * element, GstStateChange transition) { GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS; - GstBaseCameraSrc *self = GST_BASE_CAMERA_SRC (element); + GstBaseCameraBinSrc *self = GST_BASE_CAMERA_SRC (element); GST_DEBUG_OBJECT (self, "%d -> %d", GST_STATE_TRANSITION_CURRENT (transition), @@ -519,7 +520,7 @@ gst_base_camera_src_base_init (gpointer g_class) } static void -gst_base_camera_src_class_init (GstBaseCameraSrcClass * klass) +gst_base_camera_src_class_init (GstBaseCameraBinSrcClass * klass) { GObjectClass *gobject_class; GstElementClass *gstelement_class; @@ -550,7 +551,7 @@ gst_base_camera_src_class_init (GstBaseCameraSrcClass * klass) MAX_ZOOM, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); /** - * GstBaseCameraSrc:post-previews: + * GstBaseCameraBinSrc:post-previews: * * When %TRUE, preview images should be posted to the bus when * captures are made @@ -571,7 +572,7 @@ gst_base_camera_src_class_init (GstBaseCameraSrcClass * klass) GST_TYPE_ELEMENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); /** - * GstBaseCameraSrc:ready-for-capture: + * GstBaseCameraBinSrc:ready-for-capture: * * When TRUE new capture can be prepared. If FALSE capturing is ongoing * and starting a new capture immediately is not possible. @@ -592,14 +593,14 @@ gst_base_camera_src_class_init (GstBaseCameraSrcClass * klass) g_signal_new ("start-capture", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST | G_SIGNAL_ACTION, - G_STRUCT_OFFSET (GstBaseCameraSrcClass, private_start_capture), + G_STRUCT_OFFSET (GstBaseCameraBinSrcClass, private_start_capture), NULL, NULL, g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0); basecamerasrc_signals[STOP_CAPTURE_SIGNAL] = g_signal_new ("stop-capture", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST | G_SIGNAL_ACTION, - G_STRUCT_OFFSET (GstBaseCameraSrcClass, private_stop_capture), + G_STRUCT_OFFSET (GstBaseCameraBinSrcClass, private_stop_capture), NULL, NULL, g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0); /* TODO these should be moved to a private struct @@ -613,8 +614,8 @@ gst_base_camera_src_class_init (GstBaseCameraSrcClass * klass) } static void -gst_base_camera_src_init (GstBaseCameraSrc * self, - GstBaseCameraSrcClass * klass) +gst_base_camera_src_init (GstBaseCameraBinSrc * self, + GstBaseCameraBinSrcClass * klass) { self->width = DEFAULT_WIDTH; self->height = DEFAULT_HEIGHT; @@ -629,7 +630,7 @@ gst_base_camera_src_init (GstBaseCameraSrc * self, } void -gst_base_camera_src_post_preview (GstBaseCameraSrc * self, GstBuffer * buf) +gst_base_camera_src_post_preview (GstBaseCameraBinSrc * self, GstBuffer * buf) { if (self->post_preview) { gst_camerabin_preview_pipeline_post (self->preview_pipeline, buf); diff --git a/gst-libs/gst/basecamerabinsrc/gstbasecamerasrc.h b/gst-libs/gst/basecamerabinsrc/gstbasecamerasrc.h index 1c412e4..505e2a5 100644 --- a/gst-libs/gst/basecamerabinsrc/gstbasecamerasrc.h +++ b/gst-libs/gst/basecamerabinsrc/gstbasecamerasrc.h @@ -38,21 +38,21 @@ G_BEGIN_DECLS #define GST_TYPE_BASE_CAMERA_SRC \ (gst_base_camera_src_get_type()) #define GST_BASE_CAMERA_SRC(obj) \ - (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_BASE_CAMERA_SRC,GstBaseCameraSrc)) + (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_BASE_CAMERA_SRC,GstBaseCameraBinSrc)) #define GST_BASE_CAMERA_SRC_GET_CLASS(obj) \ - (G_TYPE_INSTANCE_GET_CLASS ((obj), GST_TYPE_BASE_CAMERA_SRC, GstBaseCameraSrcClass)) + (G_TYPE_INSTANCE_GET_CLASS ((obj), GST_TYPE_BASE_CAMERA_SRC, GstBaseCameraBinSrcClass)) #define GST_BASE_CAMERA_SRC_CLASS(klass) \ - (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_BASE_CAMERA_SRC,GstBaseCameraSrcClass)) + (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_BASE_CAMERA_SRC,GstBaseCameraBinSrcClass)) #define GST_IS_BASE_CAMERA_SRC(obj) \ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_BASE_CAMERA_SRC)) #define GST_IS_BASE_CAMERA_SRC_CLASS(klass) \ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_BASE_CAMERA_SRC)) #define GST_BASE_CAMERA_SRC_CAST(obj) \ - ((GstBaseCameraSrc *) (obj)) + ((GstBaseCameraBinSrc *) (obj)) GType gst_base_camera_src_get_type (void); -typedef struct _GstBaseCameraSrc GstBaseCameraSrc; -typedef struct _GstBaseCameraSrcClass GstBaseCameraSrcClass; +typedef struct _GstBaseCameraBinSrc GstBaseCameraBinSrc; +typedef struct _GstBaseCameraBinSrcClass GstBaseCameraBinSrcClass; #define GST_BASE_CAMERA_SRC_VIEWFINDER_PAD_NAME "vfsrc" #define GST_BASE_CAMERA_SRC_IMAGE_PAD_NAME "imgsrc" @@ -61,9 +61,9 @@ typedef struct _GstBaseCameraSrcClass GstBaseCameraSrcClass; #define GST_BASE_CAMERA_SRC_PREVIEW_MESSAGE_NAME "preview-image" /** - * GstBaseCameraSrc: + * GstBaseCameraBinSrc: */ -struct _GstBaseCameraSrc +struct _GstBaseCameraBinSrc { GstBin parent; @@ -91,40 +91,40 @@ struct _GstBaseCameraSrc /** - * GstBaseCameraSrcClass: + * GstBaseCameraBinSrcClass: * @construct_pipeline: construct pipeline must be implemented by derived class * @setup_pipeline: configure pipeline for the chosen settings * @set_zoom: set the zoom * @set_mode: set the mode */ -struct _GstBaseCameraSrcClass +struct _GstBaseCameraBinSrcClass { GstBinClass parent; /* construct pipeline must be implemented by derived class */ - gboolean (*construct_pipeline) (GstBaseCameraSrc *self); + gboolean (*construct_pipeline) (GstBaseCameraBinSrc *self); /* optional */ - gboolean (*setup_pipeline) (GstBaseCameraSrc *self); + gboolean (*setup_pipeline) (GstBaseCameraBinSrc *self); /* set the zoom */ - void (*set_zoom) (GstBaseCameraSrc *self, gfloat zoom); + void (*set_zoom) (GstBaseCameraBinSrc *self, gfloat zoom); /* set the mode */ - gboolean (*set_mode) (GstBaseCameraSrc *self, + gboolean (*set_mode) (GstBaseCameraBinSrc *self, GstCameraBinMode mode); /* set preview caps */ - gboolean (*set_preview) (GstBaseCameraSrc *self, + gboolean (*set_preview) (GstBaseCameraBinSrc *self, GstCaps *preview_caps); /* */ - GstCaps * (*get_allowed_input_caps) (GstBaseCameraSrc * self); + GstCaps * (*get_allowed_input_caps) (GstBaseCameraBinSrc * self); - void (*private_start_capture) (GstBaseCameraSrc * src); - void (*private_stop_capture) (GstBaseCameraSrc * src); - gboolean (*start_capture) (GstBaseCameraSrc * src); - void (*stop_capture) (GstBaseCameraSrc * src); + void (*private_start_capture) (GstBaseCameraBinSrc * src); + void (*private_stop_capture) (GstBaseCameraBinSrc * src); + gboolean (*start_capture) (GstBaseCameraBinSrc * src); + void (*stop_capture) (GstBaseCameraBinSrc * src); gpointer _gst_reserved[GST_PADDING_LARGE]; }; @@ -134,17 +134,17 @@ struct _GstBaseCameraSrcClass #define MAX_ZOOM 10.0f #define ZOOM_1X MIN_ZOOM -GstPhotography * gst_base_camera_src_get_photography (GstBaseCameraSrc *self); -GstColorBalance * gst_base_camera_src_get_color_balance (GstBaseCameraSrc *self); +GstPhotography * gst_base_camera_src_get_photography (GstBaseCameraBinSrc *self); +GstColorBalance * gst_base_camera_src_get_color_balance (GstBaseCameraBinSrc *self); -gboolean gst_base_camera_src_set_mode (GstBaseCameraSrc *self, GstCameraBinMode mode); -void gst_base_camera_src_setup_zoom (GstBaseCameraSrc * self); -void gst_base_camera_src_setup_preview (GstBaseCameraSrc * self, GstCaps * preview_caps); -GstCaps * gst_base_camera_src_get_allowed_input_caps (GstBaseCameraSrc * self); -void gst_base_camera_src_finish_capture (GstBaseCameraSrc *self); +gboolean gst_base_camera_src_set_mode (GstBaseCameraBinSrc *self, GstCameraBinMode mode); +void gst_base_camera_src_setup_zoom (GstBaseCameraBinSrc * self); +void gst_base_camera_src_setup_preview (GstBaseCameraBinSrc * self, GstCaps * preview_caps); +GstCaps * gst_base_camera_src_get_allowed_input_caps (GstBaseCameraBinSrc * self); +void gst_base_camera_src_finish_capture (GstBaseCameraBinSrc *self); -void gst_base_camera_src_post_preview (GstBaseCameraSrc *self, GstBuffer * buf); +void gst_base_camera_src_post_preview (GstBaseCameraBinSrc *self, GstBuffer * buf); // XXX add methods to get/set img capture and vid capture caps.. #endif /* __GST_BASE_CAMERA_SRC_H__ */ diff --git a/gst-libs/gst/codecparsers/Makefile.am b/gst-libs/gst/codecparsers/Makefile.am new file mode 100644 index 0000000..a29d406 --- /dev/null +++ b/gst-libs/gst/codecparsers/Makefile.am @@ -0,0 +1,44 @@ +lib_LTLIBRARIES = libgstcodecparsers-@GST_MAJORMINOR@.la + +libgstcodecparsers_@GST_MAJORMINOR@_la_SOURCES = \ + parserutils.c \ + gsth264parser.c \ + gstvc1parser.c + +libgstcodecparsers_@GST_MAJORMINOR@includedir = \ + $(includedir)/gstreamer-@GST_MAJORMINOR@/gst/codecparsers + +libgstcodecparsers_@GST_MAJORMINOR@include_HEADERS = \ + parserutils.h \ + gsth264parser.h \ + gstvc1parser.h + +libgstcodecparsers_@GST_MAJORMINOR@_la_CFLAGS = \ + $(GST_PLUGINS_BAD_CFLAGS) \ + -DGST_USE_UNSTABLE_API \ + $(GST_CFLAGS) + +libgstcodecparsers_@GST_MAJORMINOR@_la_LIBADD = \ + $(GST_BASE_LIBS) \ + $(GST_LIBS) + +libgstcodecparsers_@GST_MAJORMINOR@_la_LDFLAGS = \ + $(GST_LIB_LDFLAGS) \ + $(GST_ALL_LDFLAGS) \ + $(GST_LT_LDFLAGS) + +Android.mk: $(BUILT_SOURCES) Makefile.am + androgenizer -:PROJECT libgstcodecparsers -:STATIC libgstcodecparsers-@GST_MAJORMINOR@ \ + -:TAGS eng debug \ + -:REL_TOP $(top_srcdir) -:ABS_TOP $(abs_top_srcdir) \ + -:SOURCES $(libgstcodecparsers_@GST_MAJORMINOR@_la_SOURCES) \ + $(built_sources) \ + -:CFLAGS $(DEFS) $(libgstcodecparsers_@GST_MAJORMINOR@_la_CFLAGS) \ + -:LDFLAGS $(libgstcodecparsers_@GST_MAJORMINOR@_la_LDFLAGS) \ + $(libgstcodecparsers@GST_MAJORMINOR@_la_LIBADD) \ + -ldl \ + -:HEADER_TARGET gstreamer-@GST_MAJORMINOR@/gst/codecparsers \ + -:HEADERS $(libgstcodecparsersinclude_HEADERS) \ + $(built_headers) \ + -:PASSTHROUGH LOCAL_ARM_MODE:=arm \ + > $@ diff --git a/gst-libs/gst/codecparsers/gsth264parser.c b/gst-libs/gst/codecparsers/gsth264parser.c new file mode 100644 index 0000000..572a28d --- /dev/null +++ b/gst-libs/gst/codecparsers/gsth264parser.c @@ -0,0 +1,1955 @@ +/* Gstreamer + * Copyright (C) <2011> Intel Corporation + * Copyright (C) <2011> Collabora Ltd. + * Copyright (C) <2011> Thibault Saunier <thibault.saunier@collabora.com> + * + * Some bits C-c,C-v'ed and s/4/3 from h264parse and videoparsers/h264parse.c: + * Copyright (C) <2010> Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk> + * Copyright (C) <2010> Collabora Multimedia + * Copyright (C) <2010> Nokia Corporation + * + * (C) 2005 Michal Benes <michal.benes@itonis.tv> + * (C) 2008 Wim Taymans <wim.taymans@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +/** + * SECTION:gsth264parser + * @short_description: Convenience library for h264 video + * bitstream parsing. + * + * It offers you bitstream parsing in AVC mode or not. To identify Nals in a bitstream and + * parse its headers, you should call: + * <itemizedlist> + * <listitem> + * #gst_h264_parser_identify_nalu to identify the following nalu in not AVC bitstreams + * </listitem> + * <listitem> + * #gst_h264_parser_identify_nalu_avc to identify the nalu in AVC bitstreams + * </listitem> + * </itemizedlist> + * + * Then, depending on the #GstH264NalUnitType of the newly parsed #GstH264NalUnit, you should + * call the differents functions to parse the structure: + * <itemizedlist> + * <listitem> + * From #GST_H264_NAL_SLICE to #GST_H264_NAL_SLICE_IDR: #gst_h264_parser_parse_slice_hdr + * </listitem> + * <listitem> + * #GST_H264_NAL_SEI: #gst_h264_parser_parse_sei + * </listitem> + * <listitem> + * #GST_H264_NAL_SPS: #gst_h264_parser_parse_sps + * </listitem> + * <listitem> + * #GST_H264_NAL_PPS: #gst_h264_parser_parse_pps + * </listitem> + * <listitem> + * Any other: #gst_h264_parser_parse_nal + * </listitem> + * </itemizedlist> + * + * Note: You should always call gst_h264_parser_parse_nal if you don't actually need + * #GstH264NalUnitType to be parsed for your personnal use, in order to guarantee that the + * #GstH264NalParser is always up to date. + * + * For more details about the structures, look at the ITU-T H.264 and ISO/IEC 14496-10 – MPEG-4 + * Part 10 specifications, you can download them from: + * + * <itemizedlist> + * <listitem> + * ITU-T H.264: http://www.itu.int/rec/T-REC-H.264 + * </listitem> + * <listitem> + * ISO/IEC 14496-10: http://www.iso.org/iso/iso_catalogue/catalogue_tc/catalogue_detail.htm?csnumber=56538 + * </listitem> + * </itemizedlist> + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "gsth264parser.h" + +#include <gst/base/gstbytereader.h> +#include <gst/base/gstbitreader.h> +#include <string.h> + +GST_DEBUG_CATEGORY (h264_parser_debug); +#define GST_CAT_DEFAULT h264_parser_debug + +/**** Default scaling_lists according to Table 7-2 *****/ +static const guint8 default_4x4_intra[16] = { + 6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32, + 32, 37, 37, 42 +}; + +static const guint8 default_4x4_inter[16] = { + 10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27, + 27, 30, 30, 34 +}; + +static const guint8 default_8x8_intra[64] = { + 6, 10, 10, 13, 11, 13, 16, 16, 16, 16, 18, 18, + 18, 18, 18, 23, 23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, + 27, 27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31, 31, 33, + 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42 +}; + +static const guint8 default_8x8_inter[64] = { + 9, 13, 13, 15, 13, 15, 17, 17, 17, 17, 19, 19, + 19, 19, 19, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24, + 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27, 27, 28, + 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35 +}; + +static const guint8 zigzag_8x8[64] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63 +}; + +static const guint8 zigzag_4x4[16] = { + 0, 1, 4, 8, + 5, 2, 3, 6, + 9, 12, 13, 10, + 7, 11, 14, 15, +}; + +/* Compute Ceil(Log2(v)) */ +/* Derived from branchless code for integer log2(v) from: + <http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog> */ +static guint +ceil_log2 (guint32 v) +{ + guint r, shift; + + v--; + r = (v > 0xFFFF) << 4; + v >>= r; + shift = (v > 0xFF) << 3; + v >>= shift; + r |= shift; + shift = (v > 0xF) << 2; + v >>= shift; + r |= shift; + shift = (v > 0x3) << 1; + v >>= shift; + r |= shift; + r |= (v >> 1); + return r + 1; +} + +/****** Nal parser ******/ + +typedef struct +{ + const guint8 *data; + guint size; + + guint byte; /* Byte position */ + guint bits_in_cache; /* bitpos in the cache of next bit */ + guint8 first_byte; + guint64 cache; /* cached bytes */ +} NalReader; + +static void +nal_reader_init (NalReader * nr, const guint8 * data, guint size) +{ + nr->data = data; + nr->size = size; + + nr->byte = 0; + nr->bits_in_cache = 0; + /* fill with something other than 0 to detect emulation prevention bytes */ + nr->first_byte = 0xff; + nr->cache = 0xff; +} + +static gboolean +nal_reader_read (NalReader * nr, guint nbits) +{ + if (G_UNLIKELY (nr->byte * 8 + (nbits - nr->bits_in_cache) > nr->size * 8)) { + GST_DEBUG ("Can not read %u bits, bits in cache %u, Byte * 8 %u, size in " + "bits %u", nbits, nr->bits_in_cache, nr->byte * 8, nr->size * 8); + return FALSE; + } + + while (nr->bits_in_cache < nbits) { + guint8 byte; + gboolean check_three_byte; + + check_three_byte = TRUE; + next_byte: + if (G_UNLIKELY (nr->byte >= nr->size)) + return FALSE; + + byte = nr->data[nr->byte++]; + + /* check if the byte is a emulation_prevention_three_byte */ + if (check_three_byte && byte == 0x03 && nr->first_byte == 0x00 && + ((nr->cache & 0xff) == 0)) { + /* next byte goes unconditionally to the cache, even if it's 0x03 */ + check_three_byte = FALSE; + goto next_byte; + } + nr->cache = (nr->cache << 8) | nr->first_byte; + nr->first_byte = byte; + nr->bits_in_cache += 8; + } + + return TRUE; +} + +static inline gboolean +nal_reader_skip (NalReader * nr, guint nbits) +{ + g_return_val_if_fail (nr != NULL, FALSE); + + if (G_UNLIKELY (!nal_reader_read (nr, nbits))) + return FALSE; + + nr->bits_in_cache -= nbits; + + return TRUE; +} + +static inline gboolean +nal_reader_skip_to_byte (NalReader * nr) +{ + g_return_val_if_fail (nr != NULL, FALSE); + + if (nr->bits_in_cache == 0) { + if (G_LIKELY ((nr->size - nr->byte) > 0)) + nr->byte++; + else + return FALSE; + } + + nr->bits_in_cache = 0; + + return TRUE; +} + +static inline guint +nal_reader_get_pos (const NalReader * nr) +{ + return nr->byte * 8 - nr->bits_in_cache; +} + +static inline guint +nal_reader_get_remaining (const NalReader * nr) +{ + return (nr->size - nr->byte) * 8 + nr->bits_in_cache; +} + +#define GST_NAL_READER_READ_BITS(bits) \ +static gboolean \ +nal_reader_get_bits_uint##bits (NalReader *nr, guint##bits *val, guint nbits) \ +{ \ + guint shift; \ + \ + g_return_val_if_fail (nr != NULL, FALSE); \ + g_return_val_if_fail (val != NULL, FALSE); \ + g_return_val_if_fail (nbits <= bits, FALSE); \ + \ + if (!nal_reader_read (nr, nbits)) \ + return FALSE; \ + \ + /* bring the required bits down and truncate */ \ + shift = nr->bits_in_cache - nbits; \ + *val = nr->first_byte >> shift; \ + \ + *val |= nr->cache << (8 - shift); \ + /* mask out required bits */ \ + if (nbits < bits) \ + *val &= ((guint##bits)1 << nbits) - 1; \ + \ + nr->bits_in_cache = shift; \ + \ + return TRUE; \ +} \ + +GST_NAL_READER_READ_BITS (8); +GST_NAL_READER_READ_BITS (16); +GST_NAL_READER_READ_BITS (32); + +#define GST_NAL_READER_PEAK_BITS(bits) \ +static gboolean \ +nal_reader_peek_bits_uint##bits (const NalReader *nr, guint##bits *val, guint nbits) \ +{ \ + NalReader tmp; \ + \ + g_return_val_if_fail (nr != NULL, FALSE); \ + tmp = *nr; \ + return nal_reader_get_bits_uint##bits (&tmp, val, nbits); \ +} + +GST_NAL_READER_PEAK_BITS (8); + +static gboolean +nal_reader_get_ue (NalReader * nr, guint32 * val) +{ + guint i = 0; + guint8 bit; + guint32 value; + + if (G_UNLIKELY (!nal_reader_get_bits_uint8 (nr, &bit, 1))) { + + return FALSE; + } + + while (bit == 0) { + i++; + if G_UNLIKELY + ((!nal_reader_get_bits_uint8 (nr, &bit, 1))) + return FALSE; + } + + g_return_val_if_fail (i <= 32, FALSE); + + if (G_UNLIKELY (!nal_reader_get_bits_uint32 (nr, &value, i))) + return FALSE; + + *val = (1 << i) - 1 + value; + + return TRUE; +} + +static gboolean +nal_reader_get_se (NalReader * nr, gint32 * val) +{ + guint32 value; + + if (G_UNLIKELY (!nal_reader_get_ue (nr, &value))) + return FALSE; + + if (value % 2) + *val = (value / 2) + 1; + else + *val = -(value / 2); + + return TRUE; +} + +#define CHECK_ALLOWED(val, min, max) { \ + if (val < min || val > max) { \ + GST_WARNING ("value not in allowed range. value: %d, range %d-%d", \ + val, min, max); \ + goto error; \ + } \ +} + +#define READ_UINT8(nr, val, nbits) { \ + if (!nal_reader_get_bits_uint8 (nr, &val, nbits)) { \ + GST_WARNING ("failed to read uint8, nbits: %d", nbits); \ + goto error; \ + } \ +} + +#define READ_UINT16(nr, val, nbits) { \ + if (!nal_reader_get_bits_uint16 (nr, &val, nbits)) { \ + GST_WARNING ("failed to read uint16, nbits: %d", nbits); \ + goto error; \ + } \ +} + +#define READ_UINT32(nr, val, nbits) { \ + if (!nal_reader_get_bits_uint32 (nr, &val, nbits)) { \ + GST_WARNING ("failed to read uint32, nbits: %d", nbits); \ + goto error; \ + } \ +} + +#define READ_UINT64(nr, val, nbits) { \ + if (!nal_reader_get_bits_uint64 (nr, &val, nbits)) { \ + GST_WARNING ("failed to read uint32, nbits: %d", nbits); \ + goto error; \ + } \ +} + +#define READ_UE(nr, val) { \ + if (!nal_reader_get_ue (nr, &val)) { \ + GST_WARNING ("failed to read UE"); \ + goto error; \ + } \ +} + +#define READ_UE_ALLOWED(nr, val, min, max) { \ + guint32 tmp; \ + READ_UE (nr, tmp); \ + CHECK_ALLOWED (tmp, min, max); \ + val = tmp; \ +} + +#define READ_SE(nr, val) { \ + if (!nal_reader_get_se (nr, &val)) { \ + GST_WARNING ("failed to read SE"); \ + goto error; \ + } \ +} + +#define READ_SE_ALLOWED(nr, val, min, max) { \ + gint32 tmp; \ + READ_SE (nr, tmp); \ + CHECK_ALLOWED (tmp, min, max); \ + val = tmp; \ +} + +/*********** end of nal parser ***************/ + +/***** Utils ****/ +#define EXTENDED_SAR 255 + +static GstH264SPS * +gst_h264_parser_get_sps (GstH264NalParser * nalparser, guint8 sps_id) +{ + GstH264SPS *sps; + + sps = &nalparser->sps[sps_id]; + + if (sps->valid) + return sps; + + return NULL; +} + +static GstH264PPS * +gst_h264_parser_get_pps (GstH264NalParser * nalparser, guint8 pps_id) +{ + GstH264PPS *pps; + + pps = &nalparser->pps[pps_id]; + + if (pps->valid) + return pps; + + return NULL; +} + +static inline void +set_nalu_datas (GstH264NalUnit * nalu) +{ + guint8 *data = nalu->data + nalu->offset; + + nalu->type = (data[0] & 0x1f); + nalu->ref_idc = (data[0] & 0x60) >> 5; + nalu->idr_pic_flag = (nalu->type == 5 ? 1 : 0); + + GST_DEBUG ("Nal type %u, ref_idc %u", nalu->type, nalu->ref_idc); +} + +static inline gint +scan_for_start_codes (const guint8 * data, guint size) +{ + GstByteReader br; + gst_byte_reader_init (&br, data, size); + + /* NALU not empty, so we can at least expect 1 (even 2) bytes following sc */ + return gst_byte_reader_masked_scan_uint32 (&br, 0xffffff00, 0x00000100, + 0, size); +} + +static gboolean +gst_h264_parser_more_data (NalReader * nr) +{ + guint remaining; + + remaining = nal_reader_get_remaining (nr); + if (remaining == 0) + return FALSE; + + if (remaining <= 8) { + guint8 rbsp_stop_one_bit; + + if (!nal_reader_peek_bits_uint8 (nr, &rbsp_stop_one_bit, 1)) + return FALSE; + + if (rbsp_stop_one_bit == 1) { + guint8 zero_bits; + + if (remaining == 1) + return FALSE; + + if (!nal_reader_peek_bits_uint8 (nr, &zero_bits, remaining)) + return FALSE; + + if ((zero_bits - (1 << (remaining - 1))) == 0) + return FALSE; + } + } + + return TRUE; +} + +/****** Parsing functions *****/ + +static gboolean +gst_h264_parse_hrd_parameters (GstH264HRDParams * hrd, NalReader * nr) +{ + guint sched_sel_idx; + + GST_DEBUG ("parsing \"HRD Parameters\""); + + READ_UE_ALLOWED (nr, hrd->cpb_cnt_minus1, 0, 31); + READ_UINT8 (nr, hrd->bit_rate_scale, 4); + READ_UINT8 (nr, hrd->cpb_size_scale, 4); + + for (sched_sel_idx = 0; sched_sel_idx <= hrd->cpb_cnt_minus1; sched_sel_idx++) { + READ_UE (nr, hrd->bit_rate_value_minus1[sched_sel_idx]); + READ_UE (nr, hrd->cpb_size_value_minus1[sched_sel_idx]); + READ_UINT8 (nr, hrd->cbr_flag[sched_sel_idx], 1); + } + + READ_UINT8 (nr, hrd->initial_cpb_removal_delay_length_minus1, 5); + READ_UINT8 (nr, hrd->cpb_removal_delay_length_minus1, 5); + READ_UINT8 (nr, hrd->dpb_output_delay_length_minus1, 5); + READ_UINT8 (nr, hrd->time_offset_length, 5); + + return TRUE; + +error: + GST_WARNING ("error parsing \"HRD Parameters\""); + return FALSE; +} + +static gboolean +gst_h264_parse_vui_parameters (GstH264SPS * sps, NalReader * nr) +{ + GstH264VUIParams *vui = &sps->vui_parameters; + + GST_DEBUG ("parsing \"VUI Parameters\""); + + /* set default values for fields that might not be present in the bitstream + and have valid defaults */ + vui->aspect_ratio_idc = 0; + vui->video_format = 5; + vui->video_full_range_flag = 0; + vui->colour_primaries = 2; + vui->transfer_characteristics = 2; + vui->matrix_coefficients = 2; + vui->chroma_sample_loc_type_top_field = 0; + vui->chroma_sample_loc_type_bottom_field = 0; + vui->low_delay_hrd_flag = 0; + + READ_UINT8 (nr, vui->aspect_ratio_info_present_flag, 1); + if (vui->aspect_ratio_info_present_flag) { + READ_UINT8 (nr, vui->aspect_ratio_idc, 8); + if (vui->aspect_ratio_idc == EXTENDED_SAR) { + READ_UINT16 (nr, vui->sar_width, 16); + READ_UINT16 (nr, vui->sar_height, 16); + } + } + + READ_UINT8 (nr, vui->overscan_info_present_flag, 1); + if (vui->overscan_info_present_flag) + READ_UINT8 (nr, vui->overscan_appropriate_flag, 1); + + READ_UINT8 (nr, vui->video_signal_type_present_flag, 1); + if (vui->video_signal_type_present_flag) { + + READ_UINT8 (nr, vui->video_format, 3); + READ_UINT8 (nr, vui->video_full_range_flag, 1); + READ_UINT8 (nr, vui->colour_description_present_flag, 1); + if (vui->colour_description_present_flag) { + READ_UINT8 (nr, vui->colour_primaries, 8); + READ_UINT8 (nr, vui->transfer_characteristics, 8); + READ_UINT8 (nr, vui->matrix_coefficients, 8); + } + } + + READ_UINT8 (nr, vui->chroma_loc_info_present_flag, 1); + if (vui->chroma_loc_info_present_flag) { + READ_UE_ALLOWED (nr, vui->chroma_sample_loc_type_top_field, 0, 5); + READ_UE_ALLOWED (nr, vui->chroma_sample_loc_type_bottom_field, 0, 5); + } + + READ_UINT8 (nr, vui->timing_info_present_flag, 1); + if (vui->timing_info_present_flag) { + READ_UINT32 (nr, vui->num_units_in_tick, 32); + if (vui->num_units_in_tick == 0) + GST_WARNING ("num_units_in_tick = 0 detected in stream " + "(incompliant to H.264 E.2.1)."); + + READ_UINT32 (nr, vui->time_scale, 32); + if (vui->time_scale == 0) + GST_WARNING ("time_scale = 0 detected in stream " + "(incompliant to H.264 E.2.1)."); + + READ_UINT8 (nr, vui->fixed_frame_rate_flag, 1); + } + + READ_UINT8 (nr, vui->nal_hrd_parameters_present_flag, 1); + if (vui->nal_hrd_parameters_present_flag) { + if (!gst_h264_parse_hrd_parameters (&vui->nal_hrd_parameters, nr)) + goto error; + } + + READ_UINT8 (nr, vui->vcl_hrd_parameters_present_flag, 1); + if (vui->vcl_hrd_parameters_present_flag) { + if (!gst_h264_parse_hrd_parameters (&vui->vcl_hrd_parameters, nr)) + goto error; + } + + if (vui->nal_hrd_parameters_present_flag || + vui->vcl_hrd_parameters_present_flag) + READ_UINT8 (nr, vui->low_delay_hrd_flag, 1); + + READ_UINT8 (nr, vui->pic_struct_present_flag, 1); + READ_UINT8 (nr, vui->bitstream_restriction_flag, 1); + if (vui->bitstream_restriction_flag) { + READ_UINT8 (nr, vui->motion_vectors_over_pic_boundaries_flag, 1); + READ_UE (nr, vui->max_bytes_per_pic_denom); + READ_UE_ALLOWED (nr, vui->max_bits_per_mb_denom, 0, 16); + READ_UE_ALLOWED (nr, vui->log2_max_mv_length_horizontal, 0, 16); + READ_UE_ALLOWED (nr, vui->log2_max_mv_length_vertical, 0, 16); + READ_UE (nr, vui->num_reorder_frames); + READ_UE (nr, vui->max_dec_frame_buffering); + } + + return TRUE; + +error: + GST_WARNING ("error parsing \"VUI Parameters\""); + return FALSE; +} + +static gboolean +gst_h264_parser_parse_scaling_list (NalReader * nr, + guint8 scaling_lists_4x4[6][16], guint8 scaling_lists_8x8[6][64], + const guint8 fallback_4x4_inter[16], const guint8 fallback_4x4_intra[16], + const guint8 fallback_8x8_inter[64], const guint8 fallback_8x8_intra[64], + guint8 n_lists) +{ + guint i; + + GST_DEBUG ("parsing scaling lists"); + + for (i = 0; i < 12; i++) { + gboolean use_default = FALSE; + + if (i < n_lists) { + guint8 scaling_list_present_flag; + + READ_UINT8 (nr, scaling_list_present_flag, 1); + if (scaling_list_present_flag) { + guint8 *scaling_list; + const guint8 *scan; + guint size; + guint j; + guint8 last_scale, next_scale; + + if (i < 6) { + scaling_list = scaling_lists_4x4[i]; + scan = zigzag_4x4; + size = 16; + } else { + scaling_list = scaling_lists_8x8[i - 6]; + scan = zigzag_8x8; + size = 64; + } + + last_scale = 8; + next_scale = 8; + for (j = 0; j < size; j++) { + if (next_scale != 0) { + gint32 delta_scale; + + READ_SE (nr, delta_scale); + next_scale = (last_scale + delta_scale) & 0xff; + } + if (j == 0 && next_scale == 0) { + use_default = TRUE; + break; + } + last_scale = scaling_list[scan[j]] = + (next_scale == 0) ? last_scale : next_scale; + } + } else + use_default = TRUE; + } else + use_default = TRUE; + + if (use_default) { + switch (i) { + case 0: + memcpy (scaling_lists_4x4[0], fallback_4x4_intra, 16); + break; + case 1: + memcpy (scaling_lists_4x4[1], scaling_lists_4x4[0], 16); + break; + case 2: + memcpy (scaling_lists_4x4[2], scaling_lists_4x4[1], 16); + break; + case 3: + memcpy (scaling_lists_4x4[3], fallback_4x4_inter, 16); + break; + case 4: + memcpy (scaling_lists_4x4[4], scaling_lists_4x4[3], 16); + break; + case 5: + memcpy (scaling_lists_4x4[5], scaling_lists_4x4[4], 16); + break; + case 6: + memcpy (scaling_lists_8x8[0], fallback_8x8_intra, 64); + break; + case 7: + memcpy (scaling_lists_8x8[1], fallback_8x8_inter, 64); + break; + case 8: + memcpy (scaling_lists_8x8[2], scaling_lists_8x8[0], 64); + break; + case 9: + memcpy (scaling_lists_8x8[3], scaling_lists_8x8[1], 64); + break; + case 10: + memcpy (scaling_lists_8x8[4], scaling_lists_8x8[2], 64); + break; + case 11: + memcpy (scaling_lists_8x8[5], scaling_lists_8x8[3], 64); + break; + + default: + break; + } + } + } + + return TRUE; + +error: + GST_WARNING ("error parsing scaling lists"); + return FALSE; +} + +static gboolean +slice_parse_ref_pic_list_modification_1 (GstH264SliceHdr * slice, + NalReader * nr, guint list) +{ + GstH264RefPicListModification *entries; + guint8 *ref_pic_list_modification_flag, *n_ref_pic_list_modification; + guint32 modification_of_pic_nums_idc; + guint i = 0; + + if (list == 0) { + entries = slice->ref_pic_list_modification_l0; + ref_pic_list_modification_flag = &slice->ref_pic_list_modification_flag_l0; + n_ref_pic_list_modification = &slice->n_ref_pic_list_modification_l0; + } else { + entries = slice->ref_pic_list_modification_l1; + ref_pic_list_modification_flag = &slice->ref_pic_list_modification_flag_l1; + n_ref_pic_list_modification = &slice->n_ref_pic_list_modification_l1; + } + + READ_UINT8 (nr, *ref_pic_list_modification_flag, 1); + if (*ref_pic_list_modification_flag) { + while (1) { + READ_UE (nr, modification_of_pic_nums_idc); + if (modification_of_pic_nums_idc == 3) + break; + if (modification_of_pic_nums_idc == 0 || + modification_of_pic_nums_idc == 1) { + READ_UE_ALLOWED (nr, entries[i].value.abs_diff_pic_num_minus1, 0, + slice->max_pic_num - 1); + } else if (modification_of_pic_nums_idc == 2) { + READ_UE (nr, entries[i].value.long_term_pic_num); + } + entries[i++].modification_of_pic_nums_idc = modification_of_pic_nums_idc; + } + } + *n_ref_pic_list_modification = i; + return TRUE; + +error: + GST_WARNING ("error parsing \"Reference picture list %u modification\"", + list); + return FALSE; +} + +static gboolean +slice_parse_ref_pic_list_modification (GstH264SliceHdr * slice, NalReader * nr) +{ + if (!GST_H264_IS_I_SLICE (slice) && !GST_H264_IS_SI_SLICE (slice)) { + if (!slice_parse_ref_pic_list_modification_1 (slice, nr, 0)) + return FALSE; + } + + if (GST_H264_IS_B_SLICE (slice)) { + if (!slice_parse_ref_pic_list_modification_1 (slice, nr, 1)) + return FALSE; + } + return TRUE; +} + +static gboolean +gst_h264_slice_parse_dec_ref_pic_marking (GstH264SliceHdr * slice, + GstH264NalUnit * nalu, NalReader * nr) +{ + GstH264DecRefPicMarking *dec_ref_pic_m; + + GST_DEBUG ("parsing \"Decoded reference picture marking\""); + + dec_ref_pic_m = &slice->dec_ref_pic_marking; + + if (nalu->idr_pic_flag) { + READ_UINT8 (nr, dec_ref_pic_m->no_output_of_prior_pics_flag, 1); + READ_UINT8 (nr, dec_ref_pic_m->long_term_reference_flag, 1); + } else { + READ_UINT8 (nr, dec_ref_pic_m->adaptive_ref_pic_marking_mode_flag, 1); + if (dec_ref_pic_m->adaptive_ref_pic_marking_mode_flag) { + guint32 mem_mgmt_ctrl_op; + GstH264RefPicMarking *refpicmarking; + + dec_ref_pic_m->n_ref_pic_marking = 0; + while (1) { + refpicmarking = + &dec_ref_pic_m->ref_pic_marking[dec_ref_pic_m->n_ref_pic_marking]; + + READ_UE (nr, mem_mgmt_ctrl_op); + if (mem_mgmt_ctrl_op == 0) + break; + + refpicmarking->memory_management_control_operation = mem_mgmt_ctrl_op; + + if (mem_mgmt_ctrl_op == 1 || mem_mgmt_ctrl_op == 3) + READ_UE (nr, refpicmarking->difference_of_pic_nums_minus1); + + if (mem_mgmt_ctrl_op == 2) + READ_UE (nr, refpicmarking->long_term_pic_num); + + if (mem_mgmt_ctrl_op == 3 || mem_mgmt_ctrl_op == 6) + READ_UE (nr, refpicmarking->long_term_frame_idx); + + if (mem_mgmt_ctrl_op == 4) + READ_UE (nr, refpicmarking->max_long_term_frame_idx_plus1); + + dec_ref_pic_m->n_ref_pic_marking++; + } + } + } + + return TRUE; + +error: + GST_WARNING ("error parsing \"Decoded reference picture marking\""); + return FALSE; +} + +static gboolean +gst_h264_slice_parse_pred_weight_table (GstH264SliceHdr * slice, + NalReader * nr, guint8 chroma_array_type) +{ + GstH264PredWeightTable *p; + gint16 default_luma_weight, default_chroma_weight; + gint i; + + GST_DEBUG ("parsing \"Prediction weight table\""); + + p = &slice->pred_weight_table; + + READ_UE_ALLOWED (nr, p->luma_log2_weight_denom, 0, 7); + /* set default values */ + default_luma_weight = 1 << p->luma_log2_weight_denom; + for (i = 0; i < G_N_ELEMENTS (p->luma_weight_l0); i++) + p->luma_weight_l0[i] = default_luma_weight; + memset (p->luma_offset_l0, 0, sizeof (p->luma_offset_l0)); + if (GST_H264_IS_B_SLICE (slice)) { + for (i = 0; i < G_N_ELEMENTS (p->luma_weight_l1); i++) + p->luma_weight_l1[i] = default_luma_weight; + memset (p->luma_offset_l1, 0, sizeof (p->luma_offset_l1)); + } + + if (chroma_array_type != 0) { + READ_UE_ALLOWED (nr, p->chroma_log2_weight_denom, 0, 7); + /* set default values */ + default_chroma_weight = 1 << p->chroma_log2_weight_denom; + for (i = 0; i < G_N_ELEMENTS (p->chroma_weight_l0); i++) { + p->chroma_weight_l0[i][0] = default_chroma_weight; + p->chroma_weight_l0[i][1] = default_chroma_weight; + } + memset (p->chroma_offset_l0, 0, sizeof (p->chroma_offset_l0)); + if (GST_H264_IS_B_SLICE (slice)) { + for (i = 0; i < G_N_ELEMENTS (p->chroma_weight_l1); i++) { + p->chroma_weight_l1[i][0] = default_chroma_weight; + p->chroma_weight_l1[i][1] = default_chroma_weight; + } + memset (p->chroma_offset_l1, 0, sizeof (p->chroma_offset_l1)); + } + } + + for (i = 0; i <= slice->num_ref_idx_l0_active_minus1; i++) { + guint8 luma_weight_l0_flag; + + READ_UINT8 (nr, luma_weight_l0_flag, 1); + if (luma_weight_l0_flag) { + READ_SE_ALLOWED (nr, p->luma_weight_l0[i], -128, 127); + READ_SE_ALLOWED (nr, p->luma_offset_l0[i], -128, 127); + } + if (chroma_array_type != 0) { + guint8 chroma_weight_l0_flag; + gint j; + + READ_UINT8 (nr, chroma_weight_l0_flag, 1); + if (chroma_weight_l0_flag) { + for (j = 0; j < 2; j++) { + READ_SE_ALLOWED (nr, p->chroma_weight_l0[i][j], -128, 127); + READ_SE_ALLOWED (nr, p->chroma_offset_l0[i][j], -128, 127); + } + } + } + } + + if (GST_H264_IS_B_SLICE (slice)) { + for (i = 0; i <= slice->num_ref_idx_l1_active_minus1; i++) { + guint8 luma_weight_l1_flag; + + READ_UINT8 (nr, luma_weight_l1_flag, 1); + if (luma_weight_l1_flag) { + READ_SE_ALLOWED (nr, p->luma_weight_l1[i], -128, 127); + READ_SE_ALLOWED (nr, p->luma_offset_l1[i], -128, 127); + } + if (chroma_array_type != 0) { + guint8 chroma_weight_l1_flag; + gint j; + + READ_UINT8 (nr, chroma_weight_l1_flag, 1); + if (chroma_weight_l1_flag) { + for (j = 0; j < 2; j++) { + READ_SE_ALLOWED (nr, p->chroma_weight_l1[i][j], -128, 127); + READ_SE_ALLOWED (nr, p->chroma_offset_l1[i][j], -128, 127); + } + } + } + } + } + + return TRUE; + +error: + GST_WARNING ("error parsing \"Prediction weight table\""); + return FALSE; +} + +static gboolean +gst_h264_parser_parse_buffering_period (GstH264NalParser * nalparser, + GstH264BufferingPeriod * per, NalReader * nr) +{ + GstH264SPS *sps; + guint8 sps_id; + + GST_DEBUG ("parsing \"Buffering period\""); + + READ_UE_ALLOWED (nr, sps_id, 0, GST_H264_MAX_SPS_COUNT - 1); + sps = gst_h264_parser_get_sps (nalparser, sps_id); + if (!sps) { + GST_WARNING ("couldn't find associated sequence parameter set with id: %d", + sps_id); + return GST_H264_PARSER_BROKEN_LINK; + } + per->sps = sps; + + if (sps->vui_parameters_present_flag) { + GstH264VUIParams *vui = &sps->vui_parameters; + + if (vui->nal_hrd_parameters_present_flag) { + GstH264HRDParams *hrd = &vui->nal_hrd_parameters; + guint8 sched_sel_idx; + + for (sched_sel_idx = 0; sched_sel_idx <= hrd->cpb_cnt_minus1; + sched_sel_idx++) { + READ_UINT8 (nr, per->nal_initial_cpb_removal_delay[sched_sel_idx], 5); + READ_UINT8 (nr, + per->nal_initial_cpb_removal_delay_offset[sched_sel_idx], 5); + } + } + + if (vui->vcl_hrd_parameters_present_flag) { + GstH264HRDParams *hrd = &vui->vcl_hrd_parameters; + guint8 sched_sel_idx; + + for (sched_sel_idx = 0; sched_sel_idx <= hrd->cpb_cnt_minus1; + sched_sel_idx++) { + READ_UINT8 (nr, per->vcl_initial_cpb_removal_delay[sched_sel_idx], 5); + READ_UINT8 (nr, + per->vcl_initial_cpb_removal_delay_offset[sched_sel_idx], 5); + } + } + } + + return GST_H264_PARSER_OK; + +error: + GST_WARNING ("error parsing \"Buffering period\""); + return GST_H264_PARSER_ERROR; +} + +static gboolean +gst_h264_parse_clock_timestamp (GstH264ClockTimestamp * tim, + GstH264VUIParams * vui, NalReader * nr) +{ + guint8 full_timestamp_flag; + guint8 time_offset_length; + + GST_DEBUG ("parsing \"Clock timestamp\""); + + /* defalt values */ + tim->time_offset = 0; + + READ_UINT8 (nr, tim->ct_type, 2); + READ_UINT8 (nr, tim->nuit_field_based_flag, 1); + READ_UINT8 (nr, tim->counting_type, 5); + READ_UINT8 (nr, full_timestamp_flag, 1); + READ_UINT8 (nr, tim->discontinuity_flag, 1); + READ_UINT8 (nr, tim->cnt_dropped_flag, 1); + READ_UINT8 (nr, tim->n_frames, 8); + + if (full_timestamp_flag) { + tim->seconds_flag = TRUE; + READ_UINT8 (nr, tim->seconds_value, 6); + + tim->minutes_flag = TRUE; + READ_UINT8 (nr, tim->minutes_value, 6); + + tim->hours_flag = TRUE; + READ_UINT8 (nr, tim->hours_value, 5); + } else { + READ_UINT8 (nr, tim->seconds_flag, 1); + if (tim->seconds_flag) { + READ_UINT8 (nr, tim->seconds_value, 6); + READ_UINT8 (nr, tim->minutes_flag, 1); + if (tim->minutes_flag) { + READ_UINT8 (nr, tim->minutes_value, 6); + READ_UINT8 (nr, tim->hours_flag, 1); + if (tim->hours_flag) + READ_UINT8 (nr, tim->hours_value, 5); + } + } + } + + time_offset_length = 0; + if (vui->nal_hrd_parameters_present_flag) + time_offset_length = vui->nal_hrd_parameters.time_offset_length; + else if (vui->vcl_hrd_parameters_present_flag) + time_offset_length = vui->vcl_hrd_parameters.time_offset_length; + + if (time_offset_length > 0) + READ_UINT32 (nr, tim->time_offset, time_offset_length); + + return TRUE; + +error: + GST_WARNING ("error parsing \"Clock timestamp\""); + return FALSE; +} + +static gboolean +gst_h264_parser_parse_pic_timing (GstH264NalParser * nalparser, + GstH264PicTiming * tim, NalReader * nr) +{ + GST_DEBUG ("parsing \"Picture timing\""); + if (!nalparser->last_sps || !nalparser->last_sps->valid) { + GST_WARNING ("didn't get the associated sequence paramater set for the " + "current access unit"); + goto error; + } + + /* default values */ + memset (tim->clock_timestamp_flag, 0, 3); + + if (nalparser->last_sps->vui_parameters_present_flag) { + GstH264VUIParams *vui = &nalparser->last_sps->vui_parameters; + + if (vui->nal_hrd_parameters_present_flag) { + READ_UINT32 (nr, tim->cpb_removal_delay, + vui->nal_hrd_parameters.cpb_removal_delay_length_minus1 + 1); + READ_UINT32 (nr, tim->dpb_output_delay, + vui->nal_hrd_parameters.dpb_output_delay_length_minus1 + 1); + } else if (vui->nal_hrd_parameters_present_flag) { + READ_UINT32 (nr, tim->cpb_removal_delay, + vui->vcl_hrd_parameters.cpb_removal_delay_length_minus1 + 1); + READ_UINT32 (nr, tim->dpb_output_delay, + vui->vcl_hrd_parameters.dpb_output_delay_length_minus1 + 1); + } + + if (vui->pic_struct_present_flag) { + const guint8 num_clock_ts_table[9] = { + 1, 1, 1, 2, 2, 3, 3, 2, 3 + }; + guint8 num_clock_num_ts; + guint i; + + tim->pic_struct_present_flag = TRUE; + READ_UINT8 (nr, tim->pic_struct, 4); + CHECK_ALLOWED ((gint8) tim->pic_struct, 0, 8); + + num_clock_num_ts = num_clock_ts_table[tim->pic_struct]; + for (i = 0; i < num_clock_num_ts; i++) { + READ_UINT8 (nr, tim->clock_timestamp_flag[i], 1); + if (tim->clock_timestamp_flag[i]) { + if (!gst_h264_parse_clock_timestamp (&tim->clock_timestamp[i], vui, + nr)) + goto error; + } + } + } + } + + return GST_H264_PARSER_OK; + +error: + GST_WARNING ("error parsing \"Picture timing\""); + return GST_H264_PARSER_ERROR; +} + +/******** API *************/ + +/** + * gst_h264_nal_parser_new: + * + * Creates a new #GstH264NalParser. It should be freed with + * gst_h264_nal_parser_free after use. + * + * Returns: a new #GstH264NalParser + */ +GstH264NalParser * +gst_h264_nal_parser_new (void) +{ + GstH264NalParser *nalparser; + + nalparser = g_slice_new0 (GstH264NalParser); + GST_DEBUG_CATEGORY_INIT (h264_parser_debug, "codecparsers_h264", 0, + "h264 parser library"); + + return nalparser; +} + +/** + * gst_h264_nal_parser_free: + * @nalparser: the #GstH264NalParser to free + * + * Frees @nalparser and sets it to %NULL + */ +void +gst_h264_nal_parser_free (GstH264NalParser * nalparser) +{ + g_slice_free (GstH264NalParser, nalparser); + + nalparser = NULL; +} + +/** + * gst_h264_parser_identify_nalu_unchecked: + * @nalparser: a #GstH264NalParser + * @data: The data to parse + * @offset: the offset from which to parse @data + * @size: the size of @data + * @nalu: The #GstH264NalUnit where to store parsed nal headers + * + * Parses @data and fills @nalu from the next nalu data from @data. + * + * This differs from @gst_h264_parser_identify_nalu in that it doesn't + * check whether the packet is complete or not. + * + * Note: Only use this function if you already know the provided @data + * is a complete NALU, else use @gst_h264_parser_identify_nalu. + * + * Returns: a #GstH264ParserResult + */ +GstH264ParserResult +gst_h264_parser_identify_nalu_unchecked (GstH264NalParser * nalparser, + const guint8 * data, guint offset, gsize size, GstH264NalUnit * nalu) +{ + gint off1; + + if (size < offset + 4) { + GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT + ", offset %u", size, offset); + return GST_H264_PARSER_ERROR; + } + + off1 = scan_for_start_codes (data + offset, size - offset); + + if (off1 < 0) { + GST_DEBUG ("No start code prefix in this buffer"); + return GST_H264_PARSER_NO_NAL; + } + + if (offset + off1 == size - 1) { + GST_DEBUG ("Missing data to identify nal unit"); + + return GST_H264_PARSER_ERROR; + } + + nalu->valid = TRUE; + nalu->sc_offset = offset + off1; + + /* sc might have 2 or 3 0-bytes */ + if (nalu->sc_offset > 0 && data[nalu->sc_offset - 1] == 00) + nalu->sc_offset--; + + nalu->offset = offset + off1 + 3; + nalu->data = (guint8 *) data; + + set_nalu_datas (nalu); + + if (nalu->type == GST_H264_NAL_SEQ_END || + nalu->type == GST_H264_NAL_STREAM_END) { + GST_DEBUG ("end-of-seq or end-of-stream nal found"); + nalu->size = 0; + return GST_H264_PARSER_OK; + } + + nalu->size = size - nalu->offset; + + return GST_H264_PARSER_OK; +} + +/** + * gst_h264_parser_identify_nalu: + * @nalparser: a #GstH264NalParser + * @data: The data to parse + * @offset: the offset from which to parse @data + * @size: the size of @data + * @nalu: The #GstH264NalUnit where to store parsed nal headers + * + * Parses @data and fills @nalu from the next nalu data from @data + * + * Returns: a #GstH264ParserResult + */ +GstH264ParserResult +gst_h264_parser_identify_nalu (GstH264NalParser * nalparser, + const guint8 * data, guint offset, gsize size, GstH264NalUnit * nalu) +{ + GstH264ParserResult res; + gint off2; + + res = + gst_h264_parser_identify_nalu_unchecked (nalparser, data, offset, size, + nalu); + + if (res != GST_H264_PARSER_OK || nalu->size == 0) + goto beach; + + off2 = scan_for_start_codes (data + nalu->offset, size - nalu->offset); + if (off2 < 0) { + GST_DEBUG ("Nal start %d, No end found", nalu->offset); + + return GST_H264_PARSER_NO_NAL_END; + } + + if (off2 > 0 && data[nalu->offset + off2 - 1] == 00) + off2--; + + nalu->size = off2; + if (nalu->size < 2) + return GST_H264_PARSER_BROKEN_DATA; + + GST_DEBUG ("Complete nal found. Off: %d, Size: %d", nalu->offset, nalu->size); + +beach: + return res; +} + + +/** + * gst_h264_parser_identify_nalu_avc: + * @nalparser: a #GstH264NalParser + * @data: The data to parse, must be the beging of the Nal unit + * @offset: the offset from which to parse @data + * @size: the size of @data + * @nal_length_size: the size in bytes of the AVC nal length prefix. + * @nalu: The #GstH264NalUnit where to store parsed nal headers + * + * Parses @data and sets @nalu. + * + * Returns: a #GstH264ParserResult + */ +GstH264ParserResult +gst_h264_parser_identify_nalu_avc (GstH264NalParser * nalparser, + const guint8 * data, guint offset, gsize size, guint8 nal_length_size, + GstH264NalUnit * nalu) +{ + GstBitReader br; + + if (size < offset + nal_length_size) { + GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT + ", offset %u", size, offset); + return GST_H264_PARSER_ERROR; + } + + size = size - offset; + gst_bit_reader_init (&br, data + offset, size); + + gst_bit_reader_get_bits_uint32 (&br, &nalu->size, nal_length_size * 8); + nalu->sc_offset = offset; + nalu->offset = offset + nal_length_size; + + if (size < nalu->size + nal_length_size) { + nalu->size = 0; + + return GST_H264_PARSER_NO_NAL_END; + } + + nalu->data = (guint8 *) data; + + set_nalu_datas (nalu); + + if (nalu->size < 2) + return GST_H264_PARSER_BROKEN_DATA; + + nalu->valid = TRUE; + + return GST_H264_PARSER_OK; +} + +/** + * gst_h264_parser_parse_nal: + * @nalparser: a #GstH264NalParser + * @nalu: The #GstH264NalUnit to parse + * + * This function should be called in the case one doesn't need to + * parse a specific structure. It is necessary to do so to make + * sure @nalparser is up to date. + * + * Returns: a #GstH264ParserResult + */ +GstH264ParserResult +gst_h264_parser_parse_nal (GstH264NalParser * nalparser, GstH264NalUnit * nalu) +{ + GstH264SPS sps; + GstH264PPS pps; + + switch (nalu->type) { + case GST_H264_NAL_SPS: + return gst_h264_parser_parse_sps (nalparser, nalu, &sps, FALSE); + break; + case GST_H264_NAL_PPS: + return gst_h264_parser_parse_pps (nalparser, nalu, &pps); + } + + return GST_H264_PARSER_OK; +} + +/** + * gst_h264_parser_parse_sps: + * @nalparser: a #GstH264NalParser + * @nalu: The #GST_H264_NAL_SPS #GstH264NalUnit to parse + * @sps: The #GstH264SPS to fill. + * @parse_vui_params: Whether to parse the vui_params or not + * + * Parses @data, and fills the @sps structure. + * + * Returns: a #GstH264ParserResult + */ +GstH264ParserResult +gst_h264_parser_parse_sps (GstH264NalParser * nalparser, GstH264NalUnit * nalu, + GstH264SPS * sps, gboolean parse_vui_params) +{ + GstH264ParserResult res = gst_h264_parse_sps (nalu, sps, parse_vui_params); + + if (res == GST_H264_PARSER_OK) { + GST_DEBUG ("adding sequence parameter set with id: %d to array", sps->id); + + nalparser->sps[sps->id] = *sps; + nalparser->last_sps = &nalparser->sps[sps->id]; + } + + + + return res; +} + +/** + * gst_h264_parse_sps: + * @nalu: The #GST_H264_NAL_SPS #GstH264NalUnit to parse + * @sps: The #GstH264SPS to fill. + * @parse_vui_params: Whether to parse the vui_params or not + * + * Parses @data, and fills the @sps structure. + * + * Returns: a #GstH264ParserResult + */ +GstH264ParserResult +gst_h264_parse_sps (GstH264NalUnit * nalu, GstH264SPS * sps, + gboolean parse_vui_params) +{ + NalReader nr; + gint width, height; + guint8 frame_cropping_flag; + guint subwc[] = { 1, 2, 2, 1 }; + guint subhc[] = { 1, 2, 1, 1 }; + GstH264VUIParams *vui = NULL; + + GST_DEBUG ("parsing SPS"); + nal_reader_init (&nr, nalu->data + nalu->offset + 1, nalu->size - 1); + + /* set default values for fields that might not be present in the bitstream + and have valid defaults */ + sps->chroma_format_idc = 1; + sps->separate_colour_plane_flag = 0; + sps->bit_depth_luma_minus8 = 0; + sps->bit_depth_chroma_minus8 = 0; + memset (sps->scaling_lists_4x4, 16, 96); + memset (sps->scaling_lists_8x8, 16, 384); + sps->mb_adaptive_frame_field_flag = 0; + sps->frame_crop_left_offset = 0; + sps->frame_crop_right_offset = 0; + sps->frame_crop_top_offset = 0; + sps->frame_crop_bottom_offset = 0; + sps->delta_pic_order_always_zero_flag = 0; + + READ_UINT8 (&nr, sps->profile_idc, 8); + READ_UINT8 (&nr, sps->constraint_set0_flag, 1); + READ_UINT8 (&nr, sps->constraint_set1_flag, 1); + READ_UINT8 (&nr, sps->constraint_set2_flag, 1); + READ_UINT8 (&nr, sps->constraint_set3_flag, 1); + + /* skip reserved_zero_4bits */ + if (!nal_reader_skip (&nr, 4)) + goto error; + + READ_UINT8 (&nr, sps->level_idc, 8); + + READ_UE_ALLOWED (&nr, sps->id, 0, GST_H264_MAX_SPS_COUNT - 1); + + if (sps->profile_idc == 100 || sps->profile_idc == 110 || + sps->profile_idc == 122 || sps->profile_idc == 244 || + sps->profile_idc == 44 || sps->profile_idc == 83 || + sps->profile_idc == 86) { + READ_UE_ALLOWED (&nr, sps->chroma_format_idc, 0, 3); + if (sps->chroma_format_idc == 3) + READ_UINT8 (&nr, sps->separate_colour_plane_flag, 1); + + READ_UE_ALLOWED (&nr, sps->bit_depth_luma_minus8, 0, 6); + READ_UE_ALLOWED (&nr, sps->bit_depth_chroma_minus8, 0, 6); + READ_UINT8 (&nr, sps->qpprime_y_zero_transform_bypass_flag, 1); + + READ_UINT8 (&nr, sps->scaling_matrix_present_flag, 1); + if (sps->scaling_matrix_present_flag) { + guint8 n_lists; + + n_lists = (sps->chroma_format_idc != 3) ? 8 : 12; + if (!gst_h264_parser_parse_scaling_list (&nr, + sps->scaling_lists_4x4, sps->scaling_lists_8x8, + default_4x4_inter, default_4x4_intra, + default_8x8_inter, default_8x8_intra, n_lists)) + goto error; + } + } + + READ_UE_ALLOWED (&nr, sps->log2_max_frame_num_minus4, 0, 12); + + sps->max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4); + + READ_UE_ALLOWED (&nr, sps->pic_order_cnt_type, 0, 2); + if (sps->pic_order_cnt_type == 0) { + READ_UE_ALLOWED (&nr, sps->log2_max_pic_order_cnt_lsb_minus4, 0, 12); + } else if (sps->pic_order_cnt_type == 1) { + guint i; + + READ_UINT8 (&nr, sps->delta_pic_order_always_zero_flag, 1); + READ_SE (&nr, sps->offset_for_non_ref_pic); + READ_SE (&nr, sps->offset_for_top_to_bottom_field); + READ_UE_ALLOWED (&nr, sps->num_ref_frames_in_pic_order_cnt_cycle, 0, 255); + + for (i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++) + READ_SE (&nr, sps->offset_for_ref_frame[i]); + } + + READ_UE (&nr, sps->num_ref_frames); + READ_UINT8 (&nr, sps->gaps_in_frame_num_value_allowed_flag, 1); + READ_UE (&nr, sps->pic_width_in_mbs_minus1); + READ_UE (&nr, sps->pic_height_in_map_units_minus1); + READ_UINT8 (&nr, sps->frame_mbs_only_flag, 1); + + if (!sps->frame_mbs_only_flag) + READ_UINT8 (&nr, sps->mb_adaptive_frame_field_flag, 1); + + READ_UINT8 (&nr, sps->direct_8x8_inference_flag, 1); + READ_UINT8 (&nr, frame_cropping_flag, 1); + if (frame_cropping_flag) { + READ_UE (&nr, sps->frame_crop_left_offset); + READ_UE (&nr, sps->frame_crop_right_offset); + READ_UE (&nr, sps->frame_crop_top_offset); + READ_UE (&nr, sps->frame_crop_bottom_offset); + } + + READ_UINT8 (&nr, sps->vui_parameters_present_flag, 1); + if (sps->vui_parameters_present_flag && parse_vui_params) { + if (!gst_h264_parse_vui_parameters (sps, &nr)) + goto error; + vui = &sps->vui_parameters; + } + + /* calculate ChromaArrayType */ + if (sps->separate_colour_plane_flag) + sps->chroma_array_type = 0; + else + sps->chroma_array_type = sps->chroma_format_idc; + + /* Calculate width and height */ + width = (sps->pic_width_in_mbs_minus1 + 1); + width *= 16; + height = (sps->pic_height_in_map_units_minus1 + 1); + height *= 16 * (2 - sps->frame_mbs_only_flag); + GST_LOG ("initial width=%d, height=%d", width, height); + + width -= (sps->frame_crop_left_offset + sps->frame_crop_right_offset) + * subwc[sps->chroma_format_idc]; + height -= (sps->frame_crop_top_offset + sps->frame_crop_bottom_offset + * subhc[sps->chroma_format_idc] * (2 - sps->frame_mbs_only_flag)); + if (width < 0 || height < 0) { + GST_WARNING ("invalid width/height in SPS"); + return FALSE; + } + GST_LOG ("final width=%u, height=%u", width, height); + sps->width = width; + sps->height = height; + + sps->fps_num = 0; + sps->fps_den = 1; + + if (vui && vui->timing_info_present_flag) { + /* derive framerate */ + /* FIXME verify / also handle other cases */ + GST_LOG ("Framerate: %u %u %u %u", parse_vui_params, + vui->fixed_frame_rate_flag, sps->frame_mbs_only_flag, + vui->pic_struct_present_flag); + + if (parse_vui_params && vui->fixed_frame_rate_flag && + sps->frame_mbs_only_flag && !vui->pic_struct_present_flag) { + sps->fps_num = vui->time_scale; + sps->fps_den = vui->num_units_in_tick; + /* picture is a frame = 2 fields */ + sps->fps_den *= 2; + GST_LOG ("framerate %d/%d", sps->fps_num, sps->fps_den); + } + } else { + GST_LOG ("No VUI, unknown framerate"); + } + + sps->valid = TRUE; + + return GST_H264_PARSER_OK; + +error: + GST_WARNING ("error parsing \"Sequence parameter set\""); + + return GST_H264_PARSER_ERROR; +} + +/** + * gst_h264_parse_pps: + * @nalparser: a #GstH264NalParser + * @nalu: The #GST_H264_NAL_PPS #GstH264NalUnit to parse + * @pps: The #GstH264PPS to fill. + * + * Parses @data, and fills the @pps structure. + * + * Returns: a #GstH264ParserResult + */ +GstH264ParserResult +gst_h264_parse_pps (GstH264NalParser * nalparser, GstH264NalUnit * nalu, + GstH264PPS * pps) +{ + NalReader nr; + GstH264SPS *sps; + gint sps_id; + guint8 pic_scaling_matrix_present_flag; + gint qp_bd_offset; + + GST_DEBUG ("parsing PPS"); + + nal_reader_init (&nr, nalu->data + nalu->offset + 1, nalu->size - 1); + + READ_UE_ALLOWED (&nr, pps->id, 0, GST_H264_MAX_PPS_COUNT - 1); + READ_UE_ALLOWED (&nr, sps_id, 0, GST_H264_MAX_SPS_COUNT - 1); + + sps = gst_h264_parser_get_sps (nalparser, sps_id); + if (!sps) { + GST_WARNING ("couldn't find associated sequence parameter set with id: %d", + sps_id); + return GST_H264_PARSER_BROKEN_LINK; + } + pps->sequence = sps; + qp_bd_offset = 6 * (sps->bit_depth_luma_minus8 + + sps->separate_colour_plane_flag); + + /* set default values for fields that might not be present in the bitstream + and have valid defaults */ + pps->slice_group_id = NULL; + pps->transform_8x8_mode_flag = 0; + memcpy (&pps->scaling_lists_4x4, &sps->scaling_lists_4x4, 96); + memcpy (&pps->scaling_lists_8x8, &sps->scaling_lists_8x8, 384); + + READ_UINT8 (&nr, pps->entropy_coding_mode_flag, 1); + READ_UINT8 (&nr, pps->pic_order_present_flag, 1); + READ_UE_ALLOWED (&nr, pps->num_slice_groups_minus1, 0, 7); + if (pps->num_slice_groups_minus1 > 0) { + READ_UE_ALLOWED (&nr, pps->slice_group_map_type, 0, 6); + + if (pps->slice_group_map_type == 0) { + gint i; + + for (i = 0; i <= pps->num_slice_groups_minus1; i++) + READ_UE (&nr, pps->run_length_minus1[i]); + } else if (pps->slice_group_map_type == 2) { + gint i; + + for (i = 0; i <= pps->num_slice_groups_minus1; i++) { + READ_UE (&nr, pps->top_left[i]); + READ_UE (&nr, pps->bottom_right[i]); + } + } else if (pps->slice_group_map_type >= 3 && pps->slice_group_map_type <= 5) { + READ_UINT8 (&nr, pps->slice_group_change_direction_flag, 1); + READ_UE (&nr, pps->slice_group_change_rate_minus1); + } else if (pps->slice_group_map_type == 6) { + gint bits; + gint i; + + READ_UE (&nr, pps->pic_size_in_map_units_minus1); + bits = g_bit_storage (pps->num_slice_groups_minus1); + + pps->slice_group_id = + g_new (guint8, pps->pic_size_in_map_units_minus1 + 1); + for (i = 0; i <= pps->pic_size_in_map_units_minus1; i++) + READ_UINT8 (&nr, pps->slice_group_id[i], bits); + } + } + + READ_UE_ALLOWED (&nr, pps->num_ref_idx_l0_active_minus1, 0, 31); + READ_UE_ALLOWED (&nr, pps->num_ref_idx_l1_active_minus1, 0, 31); + READ_UINT8 (&nr, pps->weighted_pred_flag, 1); + READ_UINT8 (&nr, pps->weighted_bipred_idc, 2); + READ_SE_ALLOWED (&nr, pps->pic_init_qp_minus26, -(26 + qp_bd_offset), 25); + READ_SE_ALLOWED (&nr, pps->pic_init_qs_minus26, -26, 25); + READ_SE_ALLOWED (&nr, pps->chroma_qp_index_offset, -12, 12); + pps->second_chroma_qp_index_offset = pps->chroma_qp_index_offset; + READ_UINT8 (&nr, pps->deblocking_filter_control_present_flag, 1); + READ_UINT8 (&nr, pps->constrained_intra_pred_flag, 1); + READ_UINT8 (&nr, pps->redundant_pic_cnt_present_flag, 1); + + if (!gst_h264_parser_more_data (&nr)) + goto done; + + READ_UINT8 (&nr, pps->transform_8x8_mode_flag, 1); + + READ_UINT8 (&nr, pic_scaling_matrix_present_flag, 1); + if (pic_scaling_matrix_present_flag) { + guint8 n_lists; + + n_lists = 6 + ((sps->chroma_format_idc != 3) ? 2 : 6) * + pps->transform_8x8_mode_flag; + + if (sps->scaling_matrix_present_flag) { + if (!gst_h264_parser_parse_scaling_list (&nr, + pps->scaling_lists_4x4, pps->scaling_lists_8x8, + sps->scaling_lists_4x4[0], sps->scaling_lists_4x4[3], + sps->scaling_lists_8x8[0], sps->scaling_lists_8x8[3], n_lists)) + goto error; + } else { + if (!gst_h264_parser_parse_scaling_list (&nr, + pps->scaling_lists_4x4, pps->scaling_lists_8x8, + default_4x4_inter, default_4x4_intra, + default_8x8_inter, default_8x8_intra, n_lists)) + goto error; + } + } + + READ_SE_ALLOWED (&nr, pps->second_chroma_qp_index_offset, -12, 12); + +done: + pps->valid = TRUE; + return GST_H264_PARSER_OK; + +error: + GST_WARNING ("error parsing \"Picture parameter set\""); + return GST_H264_PARSER_ERROR; +} + +/** + * gst_h264_parser_parse_pps: + * @nalparser: a #GstH264NalParser + * @nalu: The #GST_H264_NAL_PPS #GstH264NalUnit to parse + * @pps: The #GstH264PPS to fill. + * + * Parses @data, and fills the @pps structure. + * + * Returns: a #GstH264ParserResult + */ +GstH264ParserResult +gst_h264_parser_parse_pps (GstH264NalParser * nalparser, + GstH264NalUnit * nalu, GstH264PPS * pps) +{ + GstH264ParserResult res = gst_h264_parse_pps (nalparser, nalu, pps); + + if (res == GST_H264_PARSER_OK) { + GST_DEBUG ("adding picture parameter set with id: %d to array", pps->id); + + nalparser->pps[pps->id] = *pps; + nalparser->last_pps = &nalparser->pps[pps->id]; + } + + return res; +} + +/** + * gst_h264_parser_parse_slice_hdr: + * @nalparser: a #GstH264NalParser + * @nalu: The #GST_H264_NAL_SLICE #GstH264NalUnit to parse + * @slice: The #GstH264SliceHdr to fill. + * @parse_pred_weight_table: Whether to parse the pred_weight_table or not + * @parse_dec_ref_pic_marking: Whether to parse the dec_ref_pic_marking or not + * + * Parses @data, and fills the @slice structure. + * + * Returns: a #GstH264ParserResult + */ +GstH264ParserResult +gst_h264_parser_parse_slice_hdr (GstH264NalParser * nalparser, + GstH264NalUnit * nalu, GstH264SliceHdr * slice, + gboolean parse_pred_weight_table, gboolean parse_dec_ref_pic_marking) +{ + NalReader nr; + gint pps_id; + GstH264PPS *pps; + GstH264SPS *sps; + + if (!nalu->size) { + GST_DEBUG ("Invalid Nal Unit"); + return GST_H264_PARSER_ERROR; + } + + + nal_reader_init (&nr, nalu->data + nalu->offset + 1, nalu->size - 1); + + READ_UE (&nr, slice->first_mb_in_slice); + READ_UE (&nr, slice->type); + + GST_DEBUG ("parsing \"Slice header\", slice type %u", slice->type); + + READ_UE_ALLOWED (&nr, pps_id, 0, GST_H264_MAX_PPS_COUNT); + pps = gst_h264_parser_get_pps (nalparser, pps_id); + + if (!pps) { + GST_WARNING ("couldn't find associated picture parameter set with id: %d", + pps_id); + + return GST_H264_PARSER_BROKEN_LINK; + } + + slice->pps = pps; + sps = pps->sequence; + if (!sps) { + GST_WARNING ("couldn't find associated sequence parameter set with id: %d", + pps->id); + return GST_H264_PARSER_BROKEN_LINK; + } + + /* set default values for fields that might not be present in the bitstream + and have valid defaults */ + slice->field_pic_flag = 0; + slice->bottom_field_flag = 0; + slice->delta_pic_order_cnt_bottom = 0; + slice->delta_pic_order_cnt[0] = 0; + slice->delta_pic_order_cnt[1] = 0; + slice->redundant_pic_cnt = 0; + slice->num_ref_idx_l0_active_minus1 = pps->num_ref_idx_l0_active_minus1; + slice->num_ref_idx_l1_active_minus1 = pps->num_ref_idx_l1_active_minus1; + slice->disable_deblocking_filter_idc = 0; + slice->slice_alpha_c0_offset_div2 = 0; + + if (sps->separate_colour_plane_flag) + READ_UINT8 (&nr, slice->colour_plane_id, 2); + + READ_UINT16 (&nr, slice->frame_num, sps->log2_max_frame_num_minus4 + 4); + + if (!sps->frame_mbs_only_flag) { + READ_UINT8 (&nr, slice->field_pic_flag, 1); + if (slice->field_pic_flag) + READ_UINT8 (&nr, slice->bottom_field_flag, 1); + } + + /* calculate MaxPicNum */ + if (slice->field_pic_flag) + slice->max_pic_num = sps->max_frame_num; + else + slice->max_pic_num = 2 * sps->max_frame_num; + + if (nalu->type == 5) + READ_UE_ALLOWED (&nr, slice->idr_pic_id, 0, G_MAXUINT16); + + if (sps->pic_order_cnt_type == 0) { + READ_UINT16 (&nr, slice->pic_order_cnt_lsb, + sps->log2_max_pic_order_cnt_lsb_minus4 + 4); + + if (pps->pic_order_present_flag && !slice->field_pic_flag) + READ_SE (&nr, slice->delta_pic_order_cnt_bottom); + } + + if (sps->pic_order_cnt_type == 1 && !sps->delta_pic_order_always_zero_flag) { + READ_SE (&nr, slice->delta_pic_order_cnt[0]); + if (pps->pic_order_present_flag && !slice->field_pic_flag) + READ_SE (&nr, slice->delta_pic_order_cnt[1]); + } + + if (pps->redundant_pic_cnt_present_flag) + READ_UE_ALLOWED (&nr, slice->redundant_pic_cnt, 0, G_MAXINT8); + + if (GST_H264_IS_B_SLICE (slice)) + READ_UINT8 (&nr, slice->direct_spatial_mv_pred_flag, 1); + + if (GST_H264_IS_P_SLICE (slice) || GST_H264_IS_SP_SLICE (slice) || + GST_H264_IS_B_SLICE (slice)) { + guint8 num_ref_idx_active_override_flag; + + READ_UINT8 (&nr, num_ref_idx_active_override_flag, 1); + if (num_ref_idx_active_override_flag) { + READ_UE_ALLOWED (&nr, slice->num_ref_idx_l0_active_minus1, 0, 31); + + if (GST_H264_IS_B_SLICE (slice)) + READ_UE_ALLOWED (&nr, slice->num_ref_idx_l1_active_minus1, 0, 31); + } + } + + if (!slice_parse_ref_pic_list_modification (slice, &nr)) + goto error; + + if ((pps->weighted_pred_flag && (GST_H264_IS_P_SLICE (slice) + || GST_H264_IS_SP_SLICE (slice))) + || (pps->weighted_bipred_idc == 1 && GST_H264_IS_B_SLICE (slice))) { + if (!gst_h264_slice_parse_pred_weight_table (slice, &nr, + sps->chroma_array_type)) + goto error; + } + + if (nalu->ref_idc != 0) { + if (!gst_h264_slice_parse_dec_ref_pic_marking (slice, nalu, &nr)) + goto error; + } + + if (pps->entropy_coding_mode_flag && !GST_H264_IS_I_SLICE (slice) && + !GST_H264_IS_SI_SLICE (slice)) + READ_UE_ALLOWED (&nr, slice->cabac_init_idc, 0, 2); + + READ_SE_ALLOWED (&nr, slice->slice_qp_delta, -87, 77); + + if (GST_H264_IS_SP_SLICE (slice) || GST_H264_IS_SI_SLICE (slice)) { + guint8 sp_for_switch_flag; + + if (GST_H264_IS_SP_SLICE (slice)) + READ_UINT8 (&nr, sp_for_switch_flag, 1); + READ_SE_ALLOWED (&nr, slice->slice_qs_delta, -51, 51); + } + + if (pps->deblocking_filter_control_present_flag) { + READ_UE_ALLOWED (&nr, slice->disable_deblocking_filter_idc, 0, 2); + if (slice->disable_deblocking_filter_idc != 1) { + READ_SE_ALLOWED (&nr, slice->slice_alpha_c0_offset_div2, -6, 6); + READ_SE_ALLOWED (&nr, slice->slice_beta_offset_div2, -6, 6); + } + } + + if (pps->num_slice_groups_minus1 > 0 && + pps->slice_group_map_type >= 3 && pps->slice_group_map_type <= 5) { + /* Ceil(Log2(PicSizeInMapUnits / SliceGroupChangeRate + 1)) [7-33] */ + guint32 PicWidthInMbs = sps->pic_width_in_mbs_minus1 + 1; + guint32 PicHeightInMapUnits = sps->pic_height_in_map_units_minus1 + 1; + guint32 PicSizeInMapUnits = PicWidthInMbs * PicHeightInMapUnits; + guint32 SliceGroupChangeRate = pps->slice_group_change_rate_minus1 + 1; + const guint n = ceil_log2 (PicSizeInMapUnits / SliceGroupChangeRate + 1); + READ_UINT16 (&nr, slice->slice_group_change_cycle, n); + } + + slice->header_size = nal_reader_get_pos (&nr); + + return GST_H264_PARSER_OK; + +error: + GST_WARNING ("error parsing \"Slice header\""); + return GST_H264_PARSER_ERROR; +} + +/** + * gst_h264_parser_parse_sei: + * @nalparser: a #GstH264NalParser + * @nalu: The #GST_H264_NAL_SEI #GstH264NalUnit to parse + * @sei: The #GstH264SEIMessage to fill. + * + * Parses @data, and fills the @sei structures. + * + * Returns: a #GstH264ParserResult + */ +GstH264ParserResult +gst_h264_parser_parse_sei (GstH264NalParser * nalparser, GstH264NalUnit * nalu, + GstH264SEIMessage * sei) +{ + NalReader nr; + + guint32 payloadSize; + guint8 payload_type_byte, payload_size_byte; + guint remaining, payload_size; + gboolean res; + + GST_DEBUG ("parsing \"Sei message\""); + + nal_reader_init (&nr, nalu->data + nalu->offset + 1, nalu->size - 1); + + /* init */ + memset (sei, 0, sizeof (*sei)); + + sei->payloadType = 0; + do { + READ_UINT8 (&nr, payload_type_byte, 8); + sei->payloadType += payload_type_byte; + } while (payload_type_byte == 0xff); + + payloadSize = 0; + do { + READ_UINT8 (&nr, payload_size_byte, 8); + payloadSize += payload_size_byte; + } + while (payload_size_byte == 0xff); + + remaining = nal_reader_get_remaining (&nr) * 8; + payload_size = payloadSize < remaining ? payloadSize : remaining; + + GST_DEBUG ("SEI message received: payloadType %u, payloadSize = %u bytes", + sei->payloadType, payload_size); + + if (sei->payloadType == GST_H264_SEI_BUF_PERIOD) { + /* size not set; might depend on emulation_prevention_three_byte */ + res = gst_h264_parser_parse_buffering_period (nalparser, + &sei->buffering_period, &nr); + } else if (sei->payloadType == GST_H264_SEI_PIC_TIMING) { + /* size not set; might depend on emulation_prevention_three_byte */ + res = gst_h264_parser_parse_pic_timing (nalparser, &sei->pic_timing, &nr); + } else + res = GST_H264_PARSER_OK; + + return res; + +error: + GST_WARNING ("error parsing \"Sei message\""); + return GST_H264_PARSER_ERROR; +} diff --git a/gst-libs/gst/codecparsers/gsth264parser.h b/gst-libs/gst/codecparsers/gsth264parser.h new file mode 100644 index 0000000..3c22156 --- /dev/null +++ b/gst-libs/gst/codecparsers/gsth264parser.h @@ -0,0 +1,716 @@ +/* Gstreamer + * Copyright (C) <2011> Intel Corporation + * Copyright (C) <2011> Collabora Ltd. + * Copyright (C) <2011> Thibault Saunier <thibault.saunier@collabora.com> + * + * Some bits C-c,C-v'ed and s/4/3 from h264parse and videoparsers/h264parse.c: + * Copyright (C) <2010> Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk> + * Copyright (C) <2010> Collabora Multimedia + * Copyright (C) <2010> Nokia Corporation + * + * (C) 2005 Michal Benes <michal.benes@itonis.tv> + * (C) 2008 Wim Taymans <wim.taymans@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifndef __GST_H264_PARSER_H__ +#define __GST_H264_PARSER_H__ + +#ifndef GST_USE_UNSTABLE_API +#warning "The H.264 parsing library is unstable API and may change in future." +#warning "You can define GST_USE_UNSTABLE_API to avoid this warning." +#endif + +#include <gst/gst.h> + +G_BEGIN_DECLS + +#define GST_H264_MAX_SPS_COUNT 32 +#define GST_H264_MAX_PPS_COUNT 256 + +#define GST_H264_IS_P_SLICE(slice) (((slice)->type % 5) == GST_H264_P_SLICE) +#define GST_H264_IS_B_SLICE(slice) (((slice)->type % 5) == GST_H264_B_SLICE) +#define GST_H264_IS_I_SLICE(slice) (((slice)->type % 5) == GST_H264_I_SLICE) +#define GST_H264_IS_SP_SLICE(slice) (((slice)->type % 5) == GST_H264_SP_SLICE) +#define GST_H264_IS_SI_SLICE(slice) (((slice)->type % 5) == GST_H264_SI_SLICE) + +/** + * GstH264NalUnitType: + * @GST_H264_NAL_UNKNOWN: Unknown nal type + * @GST_H264_NAL_SLICE: Slice nal + * @GST_H264_NAL_SLICE_DPA: DPA slice nal + * @GST_H264_NAL_SLICE_DPB: DPB slice nal + * @GST_H264_NAL_SLICE_DPC: DPC slice nal + * @GST_H264_NAL_SLICE_IDR: DPR slice nal + * @GST_H264_NAL_SEI: Supplemental enhancement information (SEI) nal unit + * @GST_H264_NAL_SPS: Sequence parameter set (SPS) nal unit + * @GST_H264_NAL_PPS: Picture parameter set (PPS) nal unit + * @GST_H264_NAL_AU_DELIMITER: Access unit (AU) delimiter nal unit + * @GST_H264_NAL_SEQ_END: End of sequence nal unit + * @GST_H264_NAL_STREAM_END: End of stream nal unit + * @GST_H264_NAL_FILLER_DATA: Filler data nal lunit + * + * Indicates the type of H264 Nal Units + */ +typedef enum +{ + GST_H264_NAL_UNKNOWN = 0, + GST_H264_NAL_SLICE = 1, + GST_H264_NAL_SLICE_DPA = 2, + GST_H264_NAL_SLICE_DPB = 3, + GST_H264_NAL_SLICE_DPC = 4, + GST_H264_NAL_SLICE_IDR = 5, + GST_H264_NAL_SEI = 6, + GST_H264_NAL_SPS = 7, + GST_H264_NAL_PPS = 8, + GST_H264_NAL_AU_DELIMITER = 9, + GST_H264_NAL_SEQ_END = 10, + GST_H264_NAL_STREAM_END = 11, + GST_H264_NAL_FILLER_DATA = 12 +} GstH264NalUnitType; + +/** + * GstH264ParserResult: + * @GST_H264_PARSER_OK: The parsing succeded + * @GST_H264_PARSER_BROKEN_DATA: The data to parse is broken + * @GST_H264_PARSER_BROKEN_LINK: The link to structure needed for the parsing couldn't be found + * @GST_H264_PARSER_ERROR: An error accured when parsing + * @GST_H264_PARSER_NO_NAL: No nal found during the parsing + * @GST_H264_PARSER_NO_NAL_END: Start of the nal found, but not the end. + * + * The result of parsing H264 data. + */ +typedef enum +{ + GST_H264_PARSER_OK, + GST_H264_PARSER_BROKEN_DATA, + GST_H264_PARSER_BROKEN_LINK, + GST_H264_PARSER_ERROR, + GST_H264_PARSER_NO_NAL, + GST_H264_PARSER_NO_NAL_END +} GstH264ParserResult; + +/** + * GstH264SEIPayloadType: + * @GST_H264_SEI_BUF_PERIOD: Buffering Period SEI Message + * @GST_H264_SEI_PIC_TIMING: Picture Timing SEI Message + * ... + * + * The type of SEI message. + */ +typedef enum +{ + GST_H264_SEI_BUF_PERIOD = 0, + GST_H264_SEI_PIC_TIMING = 1 + /* and more... */ +} GstH264SEIPayloadType; + +/** + * GstH264SEIPicStructType: + * @GST_H264_SEI_PIC_STRUCT_FRAME: Picture is a frame + * @GST_H264_SEI_PIC_STRUCT_TOP_FIELD: Top field of frame + * @GST_H264_SEI_PIC_STRUCT_BOTTOM_FIELD: Botom field of frame + * @GST_H264_SEI_PIC_STRUCT_TOP_BOTTOM: Top bottom field of frame + * @GST_H264_SEI_PIC_STRUCT_BOTTOM_TOP: bottom top field of frame + * @GST_H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP: top bottom top field of frame + * @GST_H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: bottom top bottom field of frame + * @GST_H264_SEI_PIC_STRUCT_FRAME_DOUBLING: indicates that the frame should + * be displayed two times consecutively + * @GST_H264_SEI_PIC_STRUCT_FRAME_TRIPLING: indicates that the frame should be + * displayed three times consecutively + * + * SEI pic_struct type + */ +typedef enum +{ + GST_H264_SEI_PIC_STRUCT_FRAME = 0, + GST_H264_SEI_PIC_STRUCT_TOP_FIELD = 1, + GST_H264_SEI_PIC_STRUCT_BOTTOM_FIELD = 2, + GST_H264_SEI_PIC_STRUCT_TOP_BOTTOM = 3, + GST_H264_SEI_PIC_STRUCT_BOTTOM_TOP = 4, + GST_H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP = 5, + GST_H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM = 6, + GST_H264_SEI_PIC_STRUCT_FRAME_DOUBLING = 7, + GST_H264_SEI_PIC_STRUCT_FRAME_TRIPLING = 8 +} GstH264SEIPicStructType; + +/** + * GstH264SliceType: + * + * Type of Picture slice + */ + +typedef enum +{ + GST_H264_P_SLICE = 0, + GST_H264_B_SLICE = 1, + GST_H264_I_SLICE = 2, + GST_H264_SP_SLICE = 3, + GST_H264_SI_SLICE = 4, + GST_H264_S_P_SLICE = 5, + GST_H264_S_B_SLICE = 6, + GST_H264_S_I_SLICE = 7, + GST_H264_S_SP_SLICE = 8, + GST_H264_S_SI_SLICE = 9 +} GstH264SliceType; + +typedef struct _GstH264NalParser GstH264NalParser; + +typedef struct _GstH264NalUnit GstH264NalUnit; + +typedef struct _GstH264SPS GstH264SPS; +typedef struct _GstH264PPS GstH264PPS; +typedef struct _GstH264HRDParams GstH264HRDParams; +typedef struct _GstH264VUIParams GstH264VUIParams; + +typedef struct _GstH264RefPicListModification GstH264RefPicListModification; +typedef struct _GstH264DecRefPicMarking GstH264DecRefPicMarking; +typedef struct _GstH264RefPicMarking GstH264RefPicMarking; +typedef struct _GstH264PredWeightTable GstH264PredWeightTable; +typedef struct _GstH264SliceHdr GstH264SliceHdr; + +typedef struct _GstH264ClockTimestamp GstH264ClockTimestamp; +typedef struct _GstH264PicTiming GstH264PicTiming; +typedef struct _GstH264BufferingPeriod GstH264BufferingPeriod; +typedef struct _GstH264SEIMessage GstH264SEIMessage; + +/** + * GstH264NalUnit: + * @ref_idc: not equal to 0 specifies that the content of the NAL unit contains a sequence + * parameter set, a sequence * parameter set extension, a subset sequence parameter set, a + * picture parameter set, a slice of a reference picture, a slice data partition of a + * reference picture, or a prefix NAL unit preceding a slice of a reference picture. + * @type: A #GstH264NalUnitType + * @idr_pic_flag: calculated idr_pic_flag + * @size: The size of the nal unit starting from @offset + * @offset: The offset of the actual start of the nal unit + * @sc_offset:The offset of the start code of the nal unit + * @valid: If the nal unit is valid, which mean it has + * already been parsed + * @data: The data from which the Nalu has been parsed + * + * Structure defining the Nal unit headers + */ +struct _GstH264NalUnit +{ + guint16 ref_idc; + guint16 type; + + /* calculated values */ + guint8 idr_pic_flag; + guint size; + guint offset; + guint sc_offset; + gboolean valid; + + guint8 *data; +}; + +/** + * GstH264HRDParams: + * @cpb_cnt_minus1: plus 1 specifies the number of alternative + * CPB specifications in the bitstream + * @bit_rate_scale: specifies the maximum input bit rate of the + * SchedSelIdx-th CPB + * @cpb_size_scale: specifies the CPB size of the SchedSelIdx-th CPB + * @guint32 bit_rate_value_minus1: specifies the maximum input bit rate for the + * SchedSelIdx-th CPB + * @cpb_size_value_minus1: is used together with cpb_size_scale to specify the + * SchedSelIdx-th CPB size + * @cbr_flag: Specifies if running in itermediate bitrate mode or constant + * @initial_cpb_removal_delay_length_minus1: specifies the length in bits of + * the cpb_removal_delay syntax element + * @cpb_removal_delay_length_minus1: specifies the length in bits of the + * dpb_output_delay syntax element + * @dpb_output_delay_length_minus1: >0 specifies the length in bits of the time_offset syntax element. + * =0 specifies that the time_offset syntax element is not present + * @time_offset_length: Length of the time offset + * + * Defines the HRD parameters + */ +struct _GstH264HRDParams +{ + guint8 cpb_cnt_minus1; + guint8 bit_rate_scale; + guint8 cpb_size_scale; + + guint32 bit_rate_value_minus1[32]; + guint32 cpb_size_value_minus1[32]; + guint8 cbr_flag[32]; + + guint8 initial_cpb_removal_delay_length_minus1; + guint8 cpb_removal_delay_length_minus1; + guint8 dpb_output_delay_length_minus1; + guint8 time_offset_length; +}; + +/** + * GstH264VUIParams: + * @aspect_ratio_info_present_flag: %TRUE specifies that aspect_ratio_idc is present. + * %FALSE specifies that aspect_ratio_idc is not present + * @aspect_ratio_idc specifies the value of the sample aspect ratio of the luma samples + * @sar_width indicates the horizontal size of the sample aspect ratio + * @sar_height indicates the vertical size of the sample aspect ratio + * @overscan_info_present_flag: %TRUE overscan_appropriate_flag is present %FALSE otherwize + * @overscan_appropriate_flag: %TRUE indicates that the cropped decoded pictures + * output are suitable for display using overscan. %FALSE the cropped decoded pictures + * output contain visually important information + * @video_signal_type_present_flag: %TRUE specifies that video_format, video_full_range_flag and + * colour_description_present_flag are present. + * @video_format: indicates the representation of the picture + * @video_full_range_flag: indicates the black level and range of the luma and chroma signals + * @colour_description_present_flag: %TRUE specifies that colour_primaries, + * transfer_characteristics and matrix_coefficients are present + * @colour_primaries: indicates the chromaticity coordinates of the source primaries + * @transfer_characteristics: indicates the opto-electronic transfer characteristic + * @matrix_coefficients: describes the matrix coefficients used in deriving luma and chroma signals + * @chroma_loc_info_present_flag: %TRUE specifies that chroma_sample_loc_type_top_field and + * chroma_sample_loc_type_bottom_field are present, %FALSE otherwize + * @chroma_sample_loc_type_top_field: specify the location of chroma for top field + * @chroma_sample_loc_type_bottom_field specify the location of chroma for bottom field + * @timing_info_present_flag: %TRUE specifies that num_units_in_tick, + * time_scale and fixed_frame_rate_flag are present in the bitstream + * @num_units_in_tick: is the number of time units of a clock operating at the frequency time_scale Hz + * time_scale: is the number of time units that pass in one second + * @fixed_frame_rate_flag: %TRUE indicates that the temporal distance between the HRD output times + * of any two consecutive pictures in output order is constrained as specified in the spec, %FALSE + * otherwize. + * @nal_hrd_parameters_present_flag: %TRUE if nal hrd parameters present in the bitstream + * @vcl_hrd_parameters_present_flag: %TRUE if nal vlc hrd parameters present in the bitstream + * @low_delay_hrd_flag: specifies the HRD operational mode + * @pic_struct_present_flag: %TRUE specifies that picture timing SEI messages are present or not + * @bitstream_restriction_flag: %TRUE specifies that the following coded video sequence bitstream restriction + * parameters are present + * @motion_vectors_over_pic_boundaries_flag: %FALSE indicates that no sample outside the + * picture boundaries and no sample at a fractional sample position, %TRUE indicates that one or more + * samples outside picture boundaries may be used in inter prediction + * @max_bytes_per_pic_denom: indicates a number of bytes not exceeded by the sum of the sizes of + * the VCL NAL units associated with any coded picture in the coded video sequence. + * @max_bits_per_mb_denom: indicates the maximum number of coded bits of macroblock_layer + * @log2_max_mv_length_horizontal: indicate the maximum absolute value of a decoded horizontal + * motion vector component + * @log2_max_mv_length_vertical: indicate the maximum absolute value of a decoded vertical + * motion vector component + * @num_reorder_frames: indicates the maximum number of frames, complementary field pairs, + * or non-paired fields that precede any frame, + * @max_dec_frame_buffering: specifies the required size of the HRD decoded picture buffer in + * units of frame buffers. + * + * The structure representing the VUI parameters. + */ +struct _GstH264VUIParams +{ + guint8 aspect_ratio_info_present_flag; + guint8 aspect_ratio_idc; + /* if aspect_ratio_idc == 255 */ + guint16 sar_width; + guint16 sar_height; + + guint8 overscan_info_present_flag; + /* if overscan_info_present_flag */ + guint8 overscan_appropriate_flag; + + guint8 video_signal_type_present_flag; + guint8 video_format; + guint8 video_full_range_flag; + guint8 colour_description_present_flag; + guint8 colour_primaries; + guint8 transfer_characteristics; + guint8 matrix_coefficients; + + guint8 chroma_loc_info_present_flag; + guint8 chroma_sample_loc_type_top_field; + guint8 chroma_sample_loc_type_bottom_field; + + guint8 timing_info_present_flag; + /* if timing_info_present_flag */ + guint32 num_units_in_tick; + guint32 time_scale; + guint8 fixed_frame_rate_flag; + + guint8 nal_hrd_parameters_present_flag; + /* if nal_hrd_parameters_present_flag */ + GstH264HRDParams nal_hrd_parameters; + + guint8 vcl_hrd_parameters_present_flag; + /* if nal_hrd_parameters_present_flag */ + GstH264HRDParams vcl_hrd_parameters; + + guint8 low_delay_hrd_flag; + guint8 pic_struct_present_flag; + + guint8 bitstream_restriction_flag; + /* if bitstream_restriction_flag */ + guint8 motion_vectors_over_pic_boundaries_flag; + guint32 max_bytes_per_pic_denom; + guint32 max_bits_per_mb_denom; + guint32 log2_max_mv_length_horizontal; + guint32 log2_max_mv_length_vertical; + guint32 num_reorder_frames; + guint32 max_dec_frame_buffering; +}; + +/** + * GstH264SPS: + * @id: The ID of the sequence parameter set + * @profile_idc: indicate the profile to which the coded video sequence conforms + * + * H264 Sequence Parameter Set (SPS) + */ +struct _GstH264SPS +{ + gint id; + + guint8 profile_idc; + guint8 constraint_set0_flag; + guint8 constraint_set1_flag; + guint8 constraint_set2_flag; + guint8 constraint_set3_flag; + guint8 level_idc; + + guint8 chroma_format_idc; + guint8 separate_colour_plane_flag; + guint8 bit_depth_luma_minus8; + guint8 bit_depth_chroma_minus8; + guint8 qpprime_y_zero_transform_bypass_flag; + + guint8 scaling_matrix_present_flag; + guint8 scaling_lists_4x4[6][16]; + guint8 scaling_lists_8x8[6][64]; + + guint8 log2_max_frame_num_minus4; + guint8 pic_order_cnt_type; + + /* if pic_order_cnt_type == 0 */ + guint8 log2_max_pic_order_cnt_lsb_minus4; + + /* else if pic_order_cnt_type == 1 */ + guint8 delta_pic_order_always_zero_flag; + gint32 offset_for_non_ref_pic; + gint32 offset_for_top_to_bottom_field; + guint8 num_ref_frames_in_pic_order_cnt_cycle; + gint32 offset_for_ref_frame[255]; + + guint32 num_ref_frames; + guint8 gaps_in_frame_num_value_allowed_flag; + guint32 pic_width_in_mbs_minus1; + guint32 pic_height_in_map_units_minus1; + guint8 frame_mbs_only_flag; + + guint8 mb_adaptive_frame_field_flag; + + guint8 direct_8x8_inference_flag; + + guint8 frame_cropping_flag; + + /* if frame_cropping_flag */ + guint32 frame_crop_left_offset; + guint32 frame_crop_right_offset; + guint32 frame_crop_top_offset; + guint32 frame_crop_bottom_offset; + + guint8 vui_parameters_present_flag; + /* if vui_parameters_present_flag */ + GstH264VUIParams vui_parameters; + + /* calculated values */ + guint8 chroma_array_type; + guint32 max_frame_num; + gint width, height; + gint fps_num, fps_den; + gboolean valid; +}; + +/** + * GstH264PPS: + * + * H264 Picture Parameter Set + */ +struct _GstH264PPS +{ + gint id; + + GstH264SPS *sequence; + + guint8 entropy_coding_mode_flag; + guint8 pic_order_present_flag; + + guint32 num_slice_groups_minus1; + + /* if num_slice_groups_minus1 > 0 */ + guint8 slice_group_map_type; + /* and if slice_group_map_type == 0 */ + guint32 run_length_minus1[8]; + /* or if slice_group_map_type == 2 */ + guint32 top_left[8]; + guint32 bottom_right[8]; + /* or if slice_group_map_type == (3, 4, 5) */ + guint8 slice_group_change_direction_flag; + guint32 slice_group_change_rate_minus1; + /* or if slice_group_map_type == 6 */ + guint32 pic_size_in_map_units_minus1; + guint8 *slice_group_id; + + guint8 num_ref_idx_l0_active_minus1; + guint8 num_ref_idx_l1_active_minus1; + guint8 weighted_pred_flag; + guint8 weighted_bipred_idc; + gint8 pic_init_qp_minus26; + gint8 pic_init_qs_minus26; + gint8 chroma_qp_index_offset; + guint8 deblocking_filter_control_present_flag; + guint8 constrained_intra_pred_flag; + guint8 redundant_pic_cnt_present_flag; + + guint8 transform_8x8_mode_flag; + + guint8 scaling_lists_4x4[6][16]; + guint8 scaling_lists_8x8[6][64]; + + guint8 second_chroma_qp_index_offset; + + gboolean valid; +}; + +struct _GstH264RefPicListModification +{ + guint8 modification_of_pic_nums_idc; + union + { + /* if modification_of_pic_nums_idc == 0 || 1 */ + guint32 abs_diff_pic_num_minus1; + /* if modification_of_pic_nums_idc == 2 */ + guint32 long_term_pic_num; + } value; +}; + +struct _GstH264PredWeightTable +{ + guint8 luma_log2_weight_denom; + guint8 chroma_log2_weight_denom; + + gint16 luma_weight_l0[32]; + gint8 luma_offset_l0[32]; + + /* if seq->ChromaArrayType != 0 */ + gint16 chroma_weight_l0[32][2]; + gint8 chroma_offset_l0[32][2]; + + /* if slice->slice_type % 5 == 1 */ + gint16 luma_weight_l1[32]; + gint8 luma_offset_l1[32]; + + /* and if seq->ChromaArrayType != 0 */ + gint16 chroma_weight_l1[32][2]; + gint8 chroma_offset_l1[32][2]; +}; + +struct _GstH264RefPicMarking +{ + guint8 memory_management_control_operation; + + guint32 difference_of_pic_nums_minus1; + guint32 long_term_pic_num; + guint32 long_term_frame_idx; + guint32 max_long_term_frame_idx_plus1; +}; + +struct _GstH264DecRefPicMarking +{ + /* if slice->nal_unit.IdrPicFlag */ + guint8 no_output_of_prior_pics_flag; + guint8 long_term_reference_flag; + + guint8 adaptive_ref_pic_marking_mode_flag; + GstH264RefPicMarking ref_pic_marking[10]; + guint8 n_ref_pic_marking; +}; + + +struct _GstH264SliceHdr +{ + guint32 first_mb_in_slice; + guint32 type; + GstH264PPS *pps; + + /* if seq->separate_colour_plane_flag */ + guint8 colour_plane_id; + + guint16 frame_num; + + guint8 field_pic_flag; + guint8 bottom_field_flag; + + /* if nal_unit.type == 5 */ + guint16 idr_pic_id; + + /* if seq->pic_order_cnt_type == 0 */ + guint16 pic_order_cnt_lsb; + /* if seq->pic_order_present_flag && !field_pic_flag */ + gint32 delta_pic_order_cnt_bottom; + + gint32 delta_pic_order_cnt[2]; + guint8 redundant_pic_cnt; + + /* if slice_type == B_SLICE */ + guint8 direct_spatial_mv_pred_flag; + + guint8 num_ref_idx_l0_active_minus1; + guint8 num_ref_idx_l1_active_minus1; + + guint8 ref_pic_list_modification_flag_l0; + guint8 n_ref_pic_list_modification_l0; + GstH264RefPicListModification ref_pic_list_modification_l0[32]; + guint8 ref_pic_list_modification_flag_l1; + guint8 n_ref_pic_list_modification_l1; + GstH264RefPicListModification ref_pic_list_modification_l1[32]; + + GstH264PredWeightTable pred_weight_table; + /* if nal_unit.ref_idc != 0 */ + GstH264DecRefPicMarking dec_ref_pic_marking; + + guint8 cabac_init_idc; + gint8 slice_qp_delta; + gint8 slice_qs_delta; + + guint8 disable_deblocking_filter_idc; + gint8 slice_alpha_c0_offset_div2; + gint8 slice_beta_offset_div2; + + guint16 slice_group_change_cycle; + + /* calculated values */ + guint32 max_pic_num; + gboolean valid; + + /* Size of the slice_header() in bits */ + guint header_size; +}; + + +struct _GstH264ClockTimestamp +{ + guint8 ct_type; + guint8 nuit_field_based_flag; + guint8 counting_type; + guint8 discontinuity_flag; + guint8 cnt_dropped_flag; + guint8 n_frames; + + guint8 seconds_flag; + guint8 seconds_value; + + guint8 minutes_flag; + guint8 minutes_value; + + guint8 hours_flag; + guint8 hours_value; + + guint32 time_offset; +}; + +struct _GstH264PicTiming +{ + guint32 cpb_removal_delay; + guint32 dpb_output_delay; + + guint8 pic_struct_present_flag; + /* if pic_struct_present_flag */ + guint8 pic_struct; + + guint8 clock_timestamp_flag[3]; + GstH264ClockTimestamp clock_timestamp[3]; +}; + +struct _GstH264BufferingPeriod +{ + GstH264SPS *sps; + + /* seq->vui_parameters->nal_hrd_parameters_present_flag */ + guint8 nal_initial_cpb_removal_delay[32]; + guint8 nal_initial_cpb_removal_delay_offset[32]; + + /* seq->vui_parameters->vcl_hrd_parameters_present_flag */ + guint8 vcl_initial_cpb_removal_delay[32]; + guint8 vcl_initial_cpb_removal_delay_offset[32]; +}; + +struct _GstH264SEIMessage +{ + GstH264SEIPayloadType payloadType; + + union { + GstH264BufferingPeriod buffering_period; + GstH264PicTiming pic_timing; + /* ... could implement more */ + }; +}; + +/** + * GstH264NalParser: + * + * H264 NAL Parser (opaque structure). + */ +struct _GstH264NalParser +{ + /*< private >*/ + GstH264SPS sps[GST_H264_MAX_SPS_COUNT]; + GstH264PPS pps[GST_H264_MAX_PPS_COUNT]; + GstH264SPS *last_sps; + GstH264PPS *last_pps; +}; + +GstH264NalParser *gst_h264_nal_parser_new (void); + +GstH264ParserResult gst_h264_parser_identify_nalu (GstH264NalParser *nalparser, + const guint8 *data, guint offset, + gsize size, GstH264NalUnit *nalu); + +GstH264ParserResult gst_h264_parser_identify_nalu_unchecked (GstH264NalParser *nalparser, + const guint8 *data, guint offset, + gsize size, GstH264NalUnit *nalu); + +GstH264ParserResult gst_h264_parser_identify_nalu_avc (GstH264NalParser *nalparser, const guint8 *data, + guint offset, gsize size, guint8 nal_length_size, + GstH264NalUnit *nalu); + +GstH264ParserResult gst_h264_parser_parse_nal (GstH264NalParser *nalparser, + GstH264NalUnit *nalu); + +GstH264ParserResult gst_h264_parser_parse_slice_hdr (GstH264NalParser *nalparser, GstH264NalUnit *nalu, + GstH264SliceHdr *slice, gboolean parse_pred_weight_table, + gboolean parse_dec_ref_pic_marking); + +GstH264ParserResult gst_h264_parser_parse_sps (GstH264NalParser *nalparser, GstH264NalUnit *nalu, + GstH264SPS *sps, gboolean parse_vui_params); + +GstH264ParserResult gst_h264_parser_parse_pps (GstH264NalParser *nalparser, + GstH264NalUnit *nalu, GstH264PPS *pps); + +GstH264ParserResult gst_h264_parser_parse_sei (GstH264NalParser *nalparser, + GstH264NalUnit *nalu, GstH264SEIMessage *sei); + +void gst_h264_nal_parser_free (GstH264NalParser *nalparser); + +GstH264ParserResult gst_h264_parse_sps (GstH264NalUnit *nalu, + GstH264SPS *sps, gboolean parse_vui_params); + +GstH264ParserResult gst_h264_parse_pps (GstH264NalParser *nalparser, + GstH264NalUnit *nalu, GstH264PPS *pps); + +G_END_DECLS +#endif diff --git a/gst-libs/gst/codecparsers/gstvc1parser.c b/gst-libs/gst/codecparsers/gstvc1parser.c new file mode 100644 index 0000000..8f2937f --- /dev/null +++ b/gst-libs/gst/codecparsers/gstvc1parser.c @@ -0,0 +1,2132 @@ +/* Gstreamer + * Copyright (C) <2011> Intel + * Copyright (C) <2011> Collabora Ltd. + * Copyright (C) <2011> Thibault Saunier <thibault.saunier@collabora.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ +/** + * SECTION:gstvc1parser + * @short_description: Convenience library for parsing vc1 video + * bitstream. + * + * For more details about the structures, look at the + * smpte specifications (S421m-2006.pdf). + * + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "gstvc1parser.h" +#include "parserutils.h" +#include <gst/base/gstbytereader.h> +#include <gst/base/gstbitreader.h> +#include <string.h> + +#ifndef GST_DISABLE_GST_DEBUG + +#define GST_CAT_DEFAULT ensure_debug_category() + +static GstDebugCategory * +ensure_debug_category (void) +{ + static gsize cat_gonce = 0; + + if (g_once_init_enter (&cat_gonce)) { + gsize cat_done; + + cat_done = (gsize) _gst_debug_category_new ("codecparsers_vc1", 0, + "VC1 codec parsing library"); + + g_once_init_leave (&cat_gonce, cat_done); + } + + return (GstDebugCategory *) cat_gonce; +} + +#else + +#define ensure_debug_category() /* NOOP */ + +#endif /* GST_DISABLE_GST_DEBUG */ + +static const guint8 vc1_pquant_table[3][32] = { + { /* Implicit quantizer */ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31}, + { /* Explicit quantizer, pquantizer uniform */ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + { /* Explicit quantizer, pquantizer non-uniform */ + 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31} +}; + +static const guint8 vc1_mvmode_table[2][5] = { + /* Table 47: P Picture High rate (PQUANT <= 12) MVMODE code table */ + { + GST_VC1_MVMODE_1MV, + GST_VC1_MVMODE_MIXED_MV, + GST_VC1_MVMODE_1MV_HPEL, + GST_VC1_MVMODE_INTENSITY_COMP, + GST_VC1_MVMODE_1MV_HPEL_BILINEAR}, + /* Table 46: P Picture Low rate (PQUANT > 12) MVMODE code table */ + { + GST_VC1_MVMODE_1MV_HPEL_BILINEAR, + GST_VC1_MVMODE_1MV, + GST_VC1_MVMODE_1MV_HPEL, + GST_VC1_MVMODE_INTENSITY_COMP, + GST_VC1_MVMODE_MIXED_MV} +}; + +static const guint8 vc1_mvmode2_table[2][4] = { + /* Table 50: P Picture High rate (PQUANT <= 12) MVMODE2 code table */ + { + GST_VC1_MVMODE_1MV, + GST_VC1_MVMODE_MIXED_MV, + GST_VC1_MVMODE_1MV_HPEL, + GST_VC1_MVMODE_1MV_HPEL_BILINEAR}, + /* Table 49: P Picture Low rate (PQUANT > 12) MVMODE2 code table */ + { + GST_VC1_MVMODE_1MV_HPEL_BILINEAR, + GST_VC1_MVMODE_1MV, + GST_VC1_MVMODE_1MV_HPEL, + GST_VC1_MVMODE_MIXED_MV} +}; + +/* Table 40: BFRACTION VLC Table */ +static const VLCTable vc1_bfraction_vlc_table[] = { + {GST_VC1_BFRACTION_BASIS / 2, 0x00, 3}, + {GST_VC1_BFRACTION_BASIS / 3, 0x01, 3}, + {(GST_VC1_BFRACTION_BASIS * 2) / 3, 0x02, 3}, + {GST_VC1_BFRACTION_BASIS / 4, 0x02, 3}, + {(GST_VC1_BFRACTION_BASIS * 3) / 4, 0x04, 3}, + {GST_VC1_BFRACTION_BASIS / 5, 0x05, 3}, + {(GST_VC1_BFRACTION_BASIS * 2) / 5, 0x06, 3}, + {(GST_VC1_BFRACTION_BASIS * 3) / 5, 0x70, 7}, + {(GST_VC1_BFRACTION_BASIS * 4) / 5, 0x71, 7}, + {GST_VC1_BFRACTION_BASIS / 6, 0x72, 7}, + {(GST_VC1_BFRACTION_BASIS * 5) / 6, 0x73, 7}, + {GST_VC1_BFRACTION_BASIS / 7, 0x74, 7}, + {(GST_VC1_BFRACTION_BASIS * 2) / 7, 0x75, 7}, + {(GST_VC1_BFRACTION_BASIS * 3) / 7, 0x76, 7}, + {(GST_VC1_BFRACTION_BASIS * 4) / 7, 0x77, 7}, + {(GST_VC1_BFRACTION_BASIS * 5) / 7, 0x78, 7}, + {(GST_VC1_BFRACTION_BASIS * 6) / 7, 0x79, 7}, + {GST_VC1_BFRACTION_BASIS / 8, 0x7a, 7}, + {(GST_VC1_BFRACTION_BASIS * 3) / 8, 0x7b, 7}, + {(GST_VC1_BFRACTION_BASIS * 5) / 8, 0x7c, 7}, + {(GST_VC1_BFRACTION_BASIS * 7) / 8, 0x7d, 7}, + {GST_VC1_BFRACTION_RESERVED, 0x7e, 7}, + {GST_VC1_BFRACTION_PTYPE_BI, 0x7f, 7} +}; + +/* Imode types */ +enum +{ + IMODE_RAW, + IMODE_NORM2, + IMODE_DIFF2, + IMODE_NORM6, + IMODE_DIFF6, + IMODE_ROWSKIP, + IMODE_COLSKIP +}; + +/* Table 69: IMODE VLC Codetable */ +static const VLCTable vc1_imode_vlc_table[] = { + {IMODE_NORM2, 0x02, 2}, + {IMODE_NORM6, 0x03, 2}, + {IMODE_ROWSKIP, 0x02, 3}, + {IMODE_COLSKIP, 0x03, 3}, + {IMODE_DIFF2, 0x01, 3}, + {IMODE_DIFF6, 0x01, 4}, + {IMODE_RAW, 0x00, 4} +}; + +/* Table 80: Norm-2/Diff-2 Code Table */ +static const VLCTable vc1_norm2_vlc_table[4] = { + {0, 0, 1}, + {2, 4, 3}, + {1, 5, 3}, + {3, 3, 2} +}; + +/* Table 81: Code table for 3x2 and 2x3 tiles */ +static const VLCTable vc1_norm6_vlc_table[64] = { + {0, 1, 1}, + {1, 2, 4}, + {2, 3, 4}, + {3, 0, 8}, + {4, 4, 4}, + {5, 1, 8}, + {6, 2, 8}, + {7, (2 << 5) | 7, 10}, + {8, 5, 4}, + {9, 3, 8}, + {10, 4, 8}, + {11, (2 << 5) | 11, 10}, + {12, 5, 8}, + {13, (2 << 5) | 13, 10}, + {14, (2 << 5) | 14, 10}, + {15, (3 << 8) | 14, 13}, + {16, 6, 4}, + {17, 6, 8}, + {18, 7, 8}, + {19, (2 << 5) | 19, 10}, + {20, 8, 8}, + {21, (2 << 5) | 21, 10}, + {22, (2 << 5) | 22, 10}, + {23, (3 << 8) | 13, 13}, + {24, 9, 8}, + {25, (2 << 5) | 25, 10}, + {26, (2 << 5) | 26, 10}, + {27, (3 << 8) | 12, 13}, + {28, (2 << 5) | 28, 10}, + {29, (3 << 8) | 11, 13}, + {30, (3 << 8) | 10, 13}, + {31, (3 << 4) | 7, 9}, + {32, 7, 4}, + {33, 10, 8}, + {34, 11, 8}, + {35, (2 << 5) | 3, 10}, + {36, 12, 8}, + {37, (2 << 5) | 5, 10}, + {38, (2 << 5) | 6, 10}, + {39, (3 << 8) | 9, 13}, + {40, 13, 8}, + {41, (2 << 5) | 9, 10}, + {42, (2 << 5) | 10, 10}, + {43, (3 << 8) | 8, 13}, + {44, (2 << 5) | 12, 10}, + {45, (3 << 8) | 7, 13}, + {46, (3 << 8) | 6, 13}, + {47, (3 << 4) | 6, 9}, + {48, 14, 8}, + {49, (2 << 5) | 17, 10}, + {50, (2 << 5) | 18, 10}, + {51, (3 << 8) | 5, 13}, + {52, (2 << 5) | 20, 10}, + {53, (3 << 8) | 4, 13}, + {54, (3 << 8) | 3, 13}, + {55, (3 << 4) | 5, 9}, + {56, (2 << 5) | 24, 10}, + {57, (3 << 8) | 2, 13}, + {58, (3 << 8) | 1, 13}, + {59, (3 << 4) | 4, 9}, + {60, (3 << 8) | 0, 13}, + {61, (3 << 4) | 3, 9}, + {62, (3 << 4) | 2, 9}, + {63, (3 << 1) | 1, 6} +}; + +/* SMPTE 421M Table 7 */ +typedef struct +{ + gint par_n, par_d; +} PAR; + +static PAR aspect_ratios[] = { + {0, 0}, + {1, 1}, + {12, 11}, + {10, 11}, + {16, 11}, + {40, 33}, + {24, 11}, + {20, 11}, + {32, 11}, + {80, 33}, + {18, 11}, + {15, 11}, + {64, 33}, + {160, 99}, + {0, 0}, + {0, 0} +}; + +/* SMPTE 421M Table 8 */ +static const guint framerates_n[] = { + 0, + 24 * 1000, + 25 * 1000, + 30 * 1000, + 50 * 1000, + 60 * 1000, + 48 * 1000, + 72 * 1000 +}; + +/* SMPTE 421M Table 9 */ +static const guint framerates_d[] = { + 0, + 1000, + 1001 +}; + + +static inline gboolean +decode_colskip (GstBitReader * br, guint8 * data, guint width, guint height, + guint stride, guint invert) +{ + guint x, y; + guint8 colskip, v; + + GST_DEBUG ("Parsing colskip"); + + invert &= 1; + for (x = 0; x < width; x++) { + READ_UINT8 (br, colskip, 1); + + if (data) { + if (colskip) { + for (y = 0; y < height; y++) { + READ_UINT8 (br, v, 1); + data[y * stride] = v ^ invert; + } + } else { + for (y = 0; y < height; y++) + data[y * stride] = invert; + } + data++; + } else if (colskip) + SKIP (br, height); + } + + return TRUE; + +failed: + GST_WARNING ("Failed to parse colskip"); + + return FALSE; +} + +static inline gboolean +decode_rowskip (GstBitReader * br, guint8 * data, guint width, guint height, + guint stride, guint invert) +{ + guint x, y; + guint8 rowskip, v; + + GST_DEBUG ("Parsing rowskip"); + + invert &= 1; + for (y = 0; y < height; y++) { + READ_UINT8 (br, rowskip, 1); + + if (data) { + if (!rowskip) + memset (data, invert, width); + else { + for (x = 0; x < width; x++) { + READ_UINT8 (br, v, 1); + data[x] = v ^ invert; + } + } + data += stride; + } else if (rowskip) + SKIP (br, width); + } + + return TRUE; + +failed: + GST_WARNING ("Failed to parse rowskip"); + + return FALSE; +} + +static inline gint8 +decode012 (GstBitReader * br) +{ + guint8 n; + + READ_UINT8 (br, n, 1); + + if (n == 0) + return 0; + + READ_UINT8 (br, n, 1); + + return n + 1; + +failed: + GST_WARNING ("Could not decode 0 1 2 returning -1"); + + return -1; +} + +static inline guint +calculate_nb_pan_scan_win (GstVC1AdvancedSeqHdr * advseqhdr, + GstVC1PicAdvanced * pic) +{ + if (advseqhdr->interlace && !advseqhdr->psf) { + if (advseqhdr->pulldown) + return pic->rff + 2; + + return 2; + + } else { + if (advseqhdr->pulldown) + return pic->rptfrm + 1; + + return 1; + } +} + +static gboolean +decode_refdist (GstBitReader * br, guint16 * value) +{ + guint16 tmp; + gint i = 2; + + if (!gst_bit_reader_peek_bits_uint16 (br, &tmp, i)) + goto failed; + + if (tmp < 0x03) { + READ_UINT16 (br, *value, i); + + return TRUE; + } + + do { + i++; + + if (!gst_bit_reader_peek_bits_uint16 (br, &tmp, i)) + goto failed; + + if (!(tmp >> i)) { + READ_UINT16 (br, *value, i); + + return TRUE; + } + } while (i < 16); + + +failed: + { + GST_WARNING ("Could not decode end 0 returning"); + + return FALSE; + } +} + +/*** bitplanes decoding ***/ +static gboolean +bitplane_decoding (GstBitReader * br, guint8 * data, + GstVC1SeqHdr * seqhdr, guint8 * is_raw) +{ + const guint width = seqhdr->mb_width; + const guint height = seqhdr->mb_height; + const guint stride = seqhdr->mb_stride; + guint imode, invert, invert_mask; + guint x, y, v; + guint8 *pdata = data; + + *is_raw = FALSE; + + GET_BITS (br, 1, &invert); + invert_mask = -invert; + + if (!decode_vlc (br, &imode, vc1_imode_vlc_table, + G_N_ELEMENTS (vc1_imode_vlc_table))) + goto failed; + + switch (imode) { + case IMODE_RAW: + + GST_DEBUG ("Parsing IMODE_RAW"); + + *is_raw = TRUE; + return TRUE; + + case IMODE_DIFF2: + invert_mask = 0; + /* fall-through */ + case IMODE_NORM2: + invert_mask &= 3; + + GST_DEBUG ("Parsing IMODE_DIFF2 or IMODE_NORM2 biplane"); + + x = 0; + if ((height * width) & 1) { + GET_BITS (br, 1, &v); + if (pdata) { + *pdata++ = (v ^ invert_mask) & 1; + if (++x == width) { + x = 0; + pdata += stride - width; + } + } + } + + for (y = 0; y < height * width; y += 2) { + if (!decode_vlc (br, &v, vc1_norm2_vlc_table, + G_N_ELEMENTS (vc1_norm2_vlc_table))) + goto failed; + if (pdata) { + v ^= invert_mask; + *pdata++ = v >> 1; + if (++x == width) { + x = 0; + pdata += stride - width; + } + *pdata++ = v & 1; + if (++x == width) { + x = 0; + pdata += stride - width; + } + } + } + break; + + case IMODE_DIFF6: + invert_mask = 0; + /* fall-through */ + case IMODE_NORM6: + + GST_DEBUG ("Parsing IMODE_DIFF6 or IMODE_NORM6 biplane"); + + if (!(height % 3) && (width % 3)) { /* decode 2x3 "vertical" tiles */ + for (y = 0; y < height; y += 3) { + for (x = width & 1; x < width; x += 2) { + if (!decode_vlc (br, &v, vc1_norm6_vlc_table, + G_N_ELEMENTS (vc1_norm6_vlc_table))) + goto failed; + + if (pdata) { + v ^= invert_mask; + pdata[x + 0] = v & 1; + pdata[x + 1] = (v >> 1) & 1; + pdata[x + 0 + stride] = (v >> 2) & 1; + pdata[x + 1 + stride] = (v >> 3) & 1; + pdata[x + 0 + stride * 2] = (v >> 4) & 1; + pdata[x + 1 + stride * 2] = (v >> 5) & 1; + } + } + if (pdata) + pdata += 3 * stride; + } + + x = width & 1; + y = 0; + } else { /* decode 3x2 "horizontal" tiles */ + + if (pdata) + pdata += (height & 1) * width; + for (y = height & 1; y < height; y += 2) { + for (x = width % 3; x < width; x += 3) { + if (!decode_vlc (br, &v, vc1_norm6_vlc_table, + G_N_ELEMENTS (vc1_norm6_vlc_table))) + goto failed; + + if (pdata) { + v ^= invert_mask; + pdata[x + 0] = v & 1; + pdata[x + 1] = (v >> 1) & 1; + pdata[x + 2] = (v >> 2) & 1; + pdata[x + 0 + stride] = (v >> 3) & 1; + pdata[x + 1 + stride] = (v >> 4) & 1; + pdata[x + 2 + stride] = (v >> 5) & 1; + } + } + if (pdata) + pdata += 2 * stride; + } + + x = width % 3; + y = height & 1; + } + + if (x) { + if (data) + pdata = data + y * stride; + if (!decode_colskip (br, pdata, x, height, stride, invert_mask)) + goto failed; + } + + if (y) { + if (data) + pdata = data + x; + if (!decode_rowskip (br, pdata, width, y, stride, invert_mask)) + goto failed; + } + break; + case IMODE_ROWSKIP: + + GST_DEBUG ("Parsing IMODE_ROWSKIP biplane"); + + if (!decode_rowskip (br, data, width, height, stride, invert_mask)) + goto failed; + break; + case IMODE_COLSKIP: + + GST_DEBUG ("Parsing IMODE_COLSKIP biplane"); + + if (!decode_colskip (br, data, width, height, stride, invert_mask)) + goto failed; + break; + } + + if (!data) + return TRUE; + + /* Applying diff operator */ + if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6) { + pdata = data; + pdata[0] ^= invert; + + for (x = 1; x < width; x++) + pdata[x] ^= pdata[x - 1]; + + for (y = 1; y < height; y++) { + pdata[stride] ^= pdata[0]; + + for (x = 1; x < width; x++) { + if (pdata[stride + x - 1] != pdata[x]) + pdata[stride + x] ^= invert; + else + pdata[stride + x] ^= pdata[stride + x - 1]; + } + pdata += stride; + } + } + + return TRUE; + +failed: + GST_WARNING ("Failed to decode bitplane"); + + return FALSE; +} + +static gboolean +parse_vopdquant (GstBitReader * br, GstVC1FrameHdr * framehdr, guint8 dquant) +{ + GstVC1VopDquant *vopdquant = &framehdr->vopdquant; + + GST_DEBUG ("Parsing vopdquant"); + + vopdquant->dqbilevel = 0; + + if (dquant == 2) { + READ_UINT8 (br, vopdquant->dquantfrm, 1); + + READ_UINT8 (br, vopdquant->pqdiff, 3); + + if (vopdquant->pqdiff != 7) + vopdquant->altpquant = framehdr->pquant + vopdquant->pqdiff + 1; + else { + READ_UINT8 (br, vopdquant->abspq, 5); + vopdquant->altpquant = vopdquant->abspq; + } + } else { + READ_UINT8 (br, vopdquant->dquantfrm, 1); + GST_DEBUG (" %u DquantFrm %u", gst_bit_reader_get_pos (br), + vopdquant->dquantfrm); + + if (vopdquant->dquantfrm) { + READ_UINT8 (br, vopdquant->dqprofile, 1); + + switch (vopdquant->dqprofile) { + case GST_VC1_DQPROFILE_SINGLE_EDGE: + case GST_VC1_DQPROFILE_DOUBLE_EDGES: + READ_UINT8 (br, vopdquant->dqsbedge, 2); + break; + + case GST_VC1_DQPROFILE_ALL_MBS: + READ_UINT8 (br, vopdquant->dqbilevel, 1); + break; + } + + if (vopdquant->dqbilevel + || vopdquant->dqprofile != GST_VC1_DQPROFILE_ALL_MBS) { + { + READ_UINT8 (br, vopdquant->pqdiff, 3); + + if (vopdquant->pqdiff == 7) + READ_UINT8 (br, vopdquant->abspq, 5); + } + } + } + } + + return TRUE; + +failed: + GST_WARNING ("Failed to parse vopdquant"); + + return FALSE; +} + +static inline gint +scan_for_start_codes (const guint8 * data, guint size) +{ + GstByteReader br; + gst_byte_reader_init (&br, data, size); + + /* NALU not empty, so we can at least expect 1 (even 2) bytes following sc */ + return gst_byte_reader_masked_scan_uint32 (&br, 0xffffff00, 0x00000100, + 0, size); +} + +static inline gint +get_unary (GstBitReader * br, gint stop, gint len) +{ + int i; + guint8 current = 0xff; + + for (i = 0; i < len; i++) { + gst_bit_reader_get_bits_uint8 (br, ¤t, 1); + if (current == stop) + return i; + } + + return i; +} + +static inline void +calculate_framerate_bitrate (guint8 frmrtq_postproc, guint8 bitrtq_postproc, + guint * framerate, guint * bitrate) +{ + if (frmrtq_postproc == 0 && bitrtq_postproc == 31) { + *framerate = 0; + *bitrate = 0; + } else if (frmrtq_postproc == 0 && bitrtq_postproc == 30) { + *framerate = 2; + *bitrate = 1952; + } else if (frmrtq_postproc == 1 && bitrtq_postproc == 31) { + *framerate = 6; + *bitrate = 2016; + } else { + if (frmrtq_postproc == 7) { + *framerate = 30; + } else { + *framerate = 2 + (frmrtq_postproc * 4); + } + if (bitrtq_postproc == 31) { + *bitrate = 2016; + } else { + *bitrate = 32 + (bitrtq_postproc * 64); + } + } +} + +static inline void +calculate_mb_size (GstVC1SeqHdr * seqhdr, guint width, guint height) +{ + seqhdr->mb_width = (width + 15) >> 4; + seqhdr->mb_height = (height + 15) >> 4; + seqhdr->mb_stride = seqhdr->mb_width + 1; +} + +static GstVC1ParserResult +parse_hrd_param_flag (GstBitReader * br, GstVC1HrdParam * hrd_param) +{ + guint i; + + GST_DEBUG ("Parsing Hrd param flag"); + + + if (gst_bit_reader_get_remaining (br) < 13) + goto failed; + + hrd_param->hrd_num_leaky_buckets = + gst_bit_reader_get_bits_uint8_unchecked (br, 5); + hrd_param->bit_rate_exponent = + gst_bit_reader_get_bits_uint8_unchecked (br, 4); + hrd_param->buffer_size_exponent = + gst_bit_reader_get_bits_uint8_unchecked (br, 4); + + if (gst_bit_reader_get_remaining (br) < + (32 * hrd_param->hrd_num_leaky_buckets)) + goto failed; + + for (i = 0; i < hrd_param->hrd_num_leaky_buckets; i++) { + hrd_param->hrd_rate[i] = gst_bit_reader_get_bits_uint16_unchecked (br, 16); + hrd_param->hrd_buffer[i] = + gst_bit_reader_get_bits_uint16_unchecked (br, 16); + } + + return GST_VC1_PARSER_OK; + +failed: + GST_WARNING ("Failed to parse hrd param flag"); + + return GST_VC1_PARSER_ERROR; +} + +static GstVC1ParserResult +parse_sequence_header_advanced (GstVC1SeqHdr * seqhdr, GstBitReader * br) +{ + GstVC1AdvancedSeqHdr *advanced = &seqhdr->advanced; + guint8 tmp; + + GST_DEBUG ("Parsing sequence header in advanced mode"); + + READ_UINT8 (br, tmp, 3); + advanced->level = tmp; + advanced->par_n = 0; + advanced->par_d = 0; + advanced->fps_n = 0; + advanced->fps_d = 0; + + READ_UINT8 (br, advanced->colordiff_format, 2); + READ_UINT8 (br, advanced->frmrtq_postproc, 3); + READ_UINT8 (br, advanced->bitrtq_postproc, 5); + + calculate_framerate_bitrate (advanced->frmrtq_postproc, + advanced->bitrtq_postproc, &advanced->framerate, &advanced->bitrate); + + GST_DEBUG ("level %u, colordiff_format %u , frmrtq_postproc %u," + " bitrtq_postproc %u", advanced->level, advanced->colordiff_format, + advanced->frmrtq_postproc, advanced->bitrtq_postproc); + + if (gst_bit_reader_get_remaining (br) < 32) + goto failed; + + advanced->postprocflag = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + advanced->max_coded_width = gst_bit_reader_get_bits_uint16_unchecked (br, 12); + advanced->max_coded_height = + gst_bit_reader_get_bits_uint16_unchecked (br, 12); + advanced->max_coded_width = (advanced->max_coded_width + 1) << 1; + advanced->max_coded_height = (advanced->max_coded_height + 1) << 1; + calculate_mb_size (seqhdr, advanced->max_coded_width, + advanced->max_coded_height); + advanced->pulldown = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + advanced->interlace = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + advanced->tfcntrflag = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + advanced->finterpflag = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + + GST_DEBUG ("postprocflag %u, max_coded_width %u, max_coded_height %u," + "pulldown %u, interlace %u, tfcntrflag %u, finterpflag %u", + advanced->postprocflag, advanced->max_coded_width, + advanced->max_coded_height, advanced->pulldown, + advanced->interlace, advanced->tfcntrflag, advanced->finterpflag); + + /* Skipping reserved bit */ + gst_bit_reader_skip_unchecked (br, 1); + + advanced->psf = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + advanced->display_ext = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + if (advanced->display_ext) { + READ_UINT16 (br, advanced->disp_horiz_size, 14); + READ_UINT16 (br, advanced->disp_vert_size, 14); + + advanced->disp_horiz_size++; + advanced->disp_vert_size++; + + READ_UINT8 (br, advanced->aspect_ratio_flag, 1); + + if (advanced->aspect_ratio_flag) { + READ_UINT8 (br, advanced->aspect_ratio, 4); + + if (advanced->aspect_ratio == 15) { + READ_UINT8 (br, advanced->aspect_horiz_size, 8); + READ_UINT8 (br, advanced->aspect_vert_size, 8); + advanced->par_n = advanced->aspect_horiz_size; + advanced->par_d = advanced->aspect_vert_size; + } else { + advanced->par_n = aspect_ratios[advanced->aspect_ratio].par_n; + advanced->par_d = aspect_ratios[advanced->aspect_ratio].par_d; + } + } + READ_UINT8 (br, advanced->framerate_flag, 1); + if (advanced->framerate_flag) { + READ_UINT8 (br, advanced->framerateind, 1); + + if (!advanced->framerateind) { + READ_UINT8 (br, advanced->frameratenr, 8); + READ_UINT8 (br, advanced->frameratedr, 4); + } else { + READ_UINT16 (br, advanced->framerateexp, 16); + } + if (advanced->frameratenr > 0 && + advanced->frameratenr < 8 && + advanced->frameratedr > 0 && advanced->frameratedr < 3) { + advanced->fps_n = framerates_n[advanced->frameratenr]; + advanced->fps_d = framerates_d[advanced->frameratedr]; + } else { + advanced->fps_n = advanced->framerateexp + 1; + advanced->fps_d = 32; + } + } + READ_UINT8 (br, advanced->color_format_flag, 1); + + if (advanced->color_format_flag) { + if (gst_bit_reader_get_remaining (br) < 24) + goto failed; + + advanced->color_prim = gst_bit_reader_get_bits_uint8_unchecked (br, 8); + advanced->transfer_char = gst_bit_reader_get_bits_uint8_unchecked (br, 8); + advanced->matrix_coef = gst_bit_reader_get_bits_uint8_unchecked (br, 8); + } + } + READ_UINT8 (br, advanced->hrd_param_flag, 1); + if (advanced->hrd_param_flag) + return parse_hrd_param_flag (br, &advanced->hrd_param); + + return GST_VC1_PARSER_OK; + +failed: + GST_WARNING ("Failed to parse advanced headers"); + + return GST_VC1_PARSER_ERROR; +} + +static GstVC1ParserResult +parse_frame_header_advanced (GstBitReader * br, GstVC1FrameHdr * framehdr, + GstVC1SeqHdr * seqhdr, GstVC1BitPlanes * bitplanes, gboolean field2) +{ + GstVC1AdvancedSeqHdr *advhdr = &seqhdr->advanced; + GstVC1PicAdvanced *pic = &framehdr->pic.advanced; + GstVC1EntryPointHdr *entrypthdr = &advhdr->entrypoint; + guint8 mvmodeidx; + + GST_DEBUG ("Parsing Frame header advanced %u", advhdr->interlace); + + /* Set the conveninence fields */ + framehdr->profile = seqhdr->profile; + framehdr->dquant = entrypthdr->dquant; + + if (advhdr->interlace) { + gint8 fcm = decode012 (br); + + if (fcm < 0) + goto failed; + + pic->fcm = (guint8) fcm; + } else + pic->fcm = GST_VC1_FRAME_PROGRESSIVE; + + if (pic->fcm == GST_VC1_FIELD_INTERLACE) { + READ_UINT8 (br, pic->fptype, 3); + if (field2) { + switch (pic->fptype) { + case 0x00: + case 0x02: + framehdr->ptype = GST_VC1_PICTURE_TYPE_I; + case 0x01: + case 0x03: + framehdr->ptype = GST_VC1_PICTURE_TYPE_P; + case 0x04: + case 0x06: + framehdr->ptype = GST_VC1_PICTURE_TYPE_B; + case 0x05: + case 0x07: + framehdr->ptype = GST_VC1_PICTURE_TYPE_BI; + } + } else { + switch (pic->fptype) { + case 0x00: + case 0x01: + framehdr->ptype = GST_VC1_PICTURE_TYPE_I; + case 0x02: + case 0x03: + framehdr->ptype = GST_VC1_PICTURE_TYPE_P; + case 0x04: + case 0x05: + framehdr->ptype = GST_VC1_PICTURE_TYPE_B; + case 0x06: + case 0x07: + framehdr->ptype = GST_VC1_PICTURE_TYPE_BI; + } + } + } else + framehdr->ptype = (guint8) get_unary (br, 0, 4); + + if (advhdr->tfcntrflag) { + READ_UINT8 (br, pic->tfcntr, 8); + GST_DEBUG ("tfcntr %u", pic->tfcntr); + } + + if (advhdr->pulldown) { + if (!advhdr->interlace || advhdr->psf) { + + READ_UINT8 (br, pic->rptfrm, 2); + GST_DEBUG ("rptfrm %u", pic->rptfrm); + + } else { + + READ_UINT8 (br, pic->tff, 1); + READ_UINT8 (br, pic->rff, 1); + GST_DEBUG ("tff %u, rff %u", pic->tff, pic->rff); + } + } + + if (entrypthdr->panscan_flag) { + READ_UINT8 (br, pic->ps_present, 1); + + if (pic->ps_present) { + guint i, nb_pan_scan_win = calculate_nb_pan_scan_win (advhdr, pic); + + if (gst_bit_reader_get_remaining (br) < 64 * nb_pan_scan_win) + goto failed; + + for (i = 0; i < nb_pan_scan_win; i++) { + pic->ps_hoffset = gst_bit_reader_get_bits_uint32_unchecked (br, 18); + pic->ps_voffset = gst_bit_reader_get_bits_uint32_unchecked (br, 18); + pic->ps_width = gst_bit_reader_get_bits_uint16_unchecked (br, 14); + pic->ps_height = gst_bit_reader_get_bits_uint16_unchecked (br, 14); + } + } + } + + if (framehdr->ptype == GST_VC1_PICTURE_TYPE_SKIPPED) + return GST_VC1_PARSER_OK; + + READ_UINT8 (br, pic->rndctrl, 1); + + if (advhdr->interlace) { + READ_UINT8 (br, pic->uvsamp, 1); + GST_DEBUG ("uvsamp %u", pic->uvsamp); + if (pic->fcm == GST_VC1_FIELD_INTERLACE && entrypthdr->refdist_flag && + pic->fptype < 4) + decode_refdist (br, &pic->refdist); + else + pic->refdist = 0; + } + + if (advhdr->finterpflag) { + READ_UINT8 (br, framehdr->interpfrm, 1); + GST_DEBUG ("interpfrm %u", framehdr->interpfrm); + } + + if ((pic->fcm != GST_VC1_FIELD_INTERLACE && + framehdr->ptype == GST_VC1_PICTURE_TYPE_B) || + (pic->fcm == GST_VC1_FIELD_INTERLACE && (pic->fptype > 4))) { + + guint bfraction; + + if (!decode_vlc (br, &bfraction, vc1_bfraction_vlc_table, + G_N_ELEMENTS (vc1_bfraction_vlc_table))) + goto failed; + + pic->bfraction = bfraction; + GST_DEBUG ("bfraction %u", pic->bfraction); + + if (pic->bfraction == GST_VC1_BFRACTION_PTYPE_BI) { + framehdr->ptype = GST_VC1_PICTURE_TYPE_BI; + } + + } + + READ_UINT8 (br, framehdr->pqindex, 5); + if (!framehdr->pqindex) + goto failed; + + /* compute pquant */ + if (entrypthdr->quantizer == GST_VC1_QUANTIZER_IMPLICITLY) + framehdr->pquant = vc1_pquant_table[0][framehdr->pqindex]; + else + framehdr->pquant = vc1_pquant_table[1][framehdr->pqindex]; + + framehdr->pquantizer = 1; + if (entrypthdr->quantizer == GST_VC1_QUANTIZER_IMPLICITLY) + framehdr->pquantizer = framehdr->pqindex < 9; + if (entrypthdr->quantizer == GST_VC1_QUANTIZER_NON_UNIFORM) + framehdr->pquantizer = 0; + + if (framehdr->pqindex <= 8) + READ_UINT8 (br, framehdr->halfqp, 1); + else + framehdr->halfqp = 0; + + if (entrypthdr->quantizer == GST_VC1_QUANTIZER_EXPLICITLY) { + READ_UINT8 (br, framehdr->pquantizer, 1); + } + + if (advhdr->postprocflag) + READ_UINT8 (br, pic->postproc, 2); + + GST_DEBUG ("Parsing %u picture, pqindex %u, pquant %u pquantizer %u" + "halfqp %u", framehdr->ptype, framehdr->pqindex, framehdr->pquant, + framehdr->pquantizer, framehdr->halfqp); + + switch (framehdr->ptype) { + case GST_VC1_PICTURE_TYPE_I: + case GST_VC1_PICTURE_TYPE_BI: + if (pic->fcm == GST_VC1_FRAME_INTERLACE) { + if (!bitplane_decoding (br, bitplanes ? bitplanes->fieldtx : NULL, + seqhdr, &pic->fieldtx)) + goto failed; + } + + if (!bitplane_decoding (br, bitplanes ? bitplanes->acpred : NULL, + seqhdr, &pic->acpred)) + goto failed; + + if (entrypthdr->overlap && framehdr->pquant <= 8) { + pic->condover = decode012 (br); + + if (pic->condover == (guint8) - 1) + goto failed; + + else if (pic->condover == GST_VC1_CONDOVER_SELECT) { + if (!bitplane_decoding (br, bitplanes ? bitplanes->overflags : NULL, + seqhdr, &pic->overflags)) + goto failed; + + GST_DEBUG ("overflags %u", pic->overflags); + } + } + + framehdr->transacfrm = get_unary (br, 0, 2); + pic->transacfrm2 = get_unary (br, 0, 2); + READ_UINT8 (br, framehdr->transdctab, 1); + + if (framehdr->dquant) + parse_vopdquant (br, framehdr, framehdr->dquant); + + GST_DEBUG + ("acpred %u, condover %u, transacfrm %u, transacfrm2 %u, transdctab %u", + pic->acpred, pic->condover, framehdr->transacfrm, pic->transacfrm2, + framehdr->transdctab); + break; + + case GST_VC1_PICTURE_TYPE_B: + if (entrypthdr->extended_mv) + pic->mvrange = get_unary (br, 0, 3); + else + pic->mvrange = 0; + + if (pic->fcm != GST_VC1_FRAME_PROGRESSIVE) { + if (entrypthdr->extended_dmv) + pic->dmvrange = get_unary (br, 0, 3); + } + + if (pic->fcm == GST_VC1_FRAME_INTERLACE) + READ_UINT8 (br, pic->intcomp, 1); + else + READ_UINT8 (br, pic->mvmode, 1); + + if (pic->fcm == GST_VC1_FIELD_INTERLACE) { + + if (!bitplane_decoding (br, bitplanes ? bitplanes->forwardmb : NULL, + seqhdr, &pic->forwardmb)) + goto failed; + + } else { + if (!bitplane_decoding (br, bitplanes ? bitplanes->directmb : NULL, + seqhdr, &pic->directmb)) + goto failed; + + if (!bitplane_decoding (br, bitplanes ? bitplanes->skipmb : NULL, + seqhdr, &pic->skipmb)) + goto failed; + } + + if (pic->fcm != GST_VC1_FRAME_PROGRESSIVE) { + if (gst_bit_reader_get_remaining (br) < 7) + goto failed; + + pic->mbmodetab = gst_bit_reader_get_bits_uint8_unchecked (br, 2); + pic->imvtab = gst_bit_reader_get_bits_uint8_unchecked (br, 2); + pic->icbptab = gst_bit_reader_get_bits_uint8_unchecked (br, 3); + + if (pic->fcm == GST_VC1_FRAME_INTERLACE) + READ_UINT8 (br, pic->mvbptab2, 2); + + if (pic->fcm == GST_VC1_FRAME_INTERLACE || + (pic->fcm == GST_VC1_FIELD_INTERLACE + && pic->mvmode == GST_VC1_MVMODE_MIXED_MV)) + READ_UINT8 (br, pic->mvbptab4, 2); + + } else { + READ_UINT8 (br, pic->mvtab, 2); + READ_UINT8 (br, pic->cbptab, 2); + } + + if (framehdr->dquant) { + parse_vopdquant (br, framehdr, framehdr->dquant); + } + + if (entrypthdr->vstransform) { + READ_UINT8 (br, pic->ttmbf, 1); + + if (pic->ttmbf) { + READ_UINT8 (br, pic->ttfrm, 2); + } + } + + framehdr->transacfrm = get_unary (br, 0, 2); + READ_UINT8 (br, framehdr->transdctab, 1); + + GST_DEBUG ("transacfrm %u transdctab %u mvmode %u mvtab %u," + "cbptab %u directmb %u skipmb %u", framehdr->transacfrm, + framehdr->transdctab, pic->mvmode, pic->mvtab, pic->cbptab, + pic->directmb, pic->skipmb); + + break; + case GST_VC1_PICTURE_TYPE_P: + if (pic->fcm == GST_VC1_FIELD_INTERLACE) { + READ_UINT8 (br, pic->numref, 1); + + if (pic->numref) + READ_UINT8 (br, pic->reffield, 1); + } + + if (entrypthdr->extended_mv) + pic->mvrange = get_unary (br, 0, 3); + else + pic->mvrange = 0; + + if (pic->fcm != GST_VC1_FRAME_PROGRESSIVE) { + if (entrypthdr->extended_dmv) + pic->dmvrange = get_unary (br, 0, 3); + } + + if (pic->fcm == GST_VC1_FRAME_INTERLACE) { + READ_UINT8 (br, pic->mvswitch4, 1); + READ_UINT8 (br, pic->intcomp, 1); + + if (pic->intcomp) { + READ_UINT8 (br, pic->lumscale, 6); + READ_UINT8 (br, pic->lumshift, 6); + } + } else { + + mvmodeidx = framehdr->pquant > 12; + pic->mvmode = vc1_mvmode_table[mvmodeidx][get_unary (br, 1, 4)]; + + if (pic->mvmode == GST_VC1_MVMODE_INTENSITY_COMP) { + pic->mvmode2 = vc1_mvmode2_table[mvmodeidx][get_unary (br, 1, 3)]; + + if (pic->fcm == GST_VC1_FIELD_INTERLACE) + pic->intcompfield = decode012 (br); + + READ_UINT8 (br, pic->lumscale, 6); + READ_UINT8 (br, pic->lumshift, 6); + GST_DEBUG ("lumscale %u lumshift %u", pic->lumscale, pic->lumshift); + + if (pic->fcm == GST_VC1_FIELD_INTERLACE && pic->intcompfield) { + READ_UINT8 (br, pic->lumscale2, 6); + READ_UINT8 (br, pic->lumshift2, 6); + } + } + + if (pic->fcm == GST_VC1_FRAME_PROGRESSIVE) { + if (pic->mvmode == GST_VC1_MVMODE_MIXED_MV || + (pic->mvmode == GST_VC1_MVMODE_INTENSITY_COMP && + pic->mvmode2 == GST_VC1_MVMODE_MIXED_MV)) { + + if (!bitplane_decoding (br, bitplanes ? bitplanes->mvtypemb : NULL, + seqhdr, &pic->mvtypemb)) + goto failed; + + GST_DEBUG ("mvtypemb %u", pic->mvtypemb); + } + } + } + + if (pic->fcm != GST_VC1_FIELD_INTERLACE) { + if (!bitplane_decoding (br, bitplanes ? bitplanes->skipmb : NULL, + seqhdr, &pic->skipmb)) + goto failed; + } + + if (pic->fcm != GST_VC1_FRAME_PROGRESSIVE) { + if (gst_bit_reader_get_remaining (br) < 7) + goto failed; + + pic->mbmodetab = gst_bit_reader_get_bits_uint8_unchecked (br, 2); + pic->imvtab = gst_bit_reader_get_bits_uint8_unchecked (br, 2); + pic->icbptab = gst_bit_reader_get_bits_uint8_unchecked (br, 3); + + if (pic->fcm != GST_VC1_FIELD_INTERLACE) { + READ_UINT8 (br, pic->mvbptab2, 2); + + if (pic->mvswitch4) + READ_UINT8 (br, pic->mvbptab4, 2); + + } else if (pic->mvmode == GST_VC1_MVMODE_MIXED_MV) + READ_UINT8 (br, pic->mvbptab4, 2); + + } else { + if (gst_bit_reader_get_remaining (br) < 4) + goto failed; + pic->mvtab = gst_bit_reader_get_bits_uint8_unchecked (br, 2); + pic->cbptab = gst_bit_reader_get_bits_uint8_unchecked (br, 2); + } + + if (framehdr->dquant) { + parse_vopdquant (br, framehdr, framehdr->dquant); + } + + if (entrypthdr->vstransform) { + READ_UINT8 (br, pic->ttmbf, 1); + + if (pic->ttmbf) { + READ_UINT8 (br, pic->ttfrm, 2); + } + } + + framehdr->transacfrm = get_unary (br, 0, 2); + READ_UINT8 (br, framehdr->transdctab, 1); + + GST_DEBUG ("transacfrm %u transdctab %u mvmode %u mvtab %u," + "cbptab %u skipmb %u", framehdr->transacfrm, framehdr->transdctab, + pic->mvmode, pic->mvtab, pic->cbptab, pic->skipmb); + + break; + + default: + goto failed; + break; + } + + return GST_VC1_PARSER_OK; + +failed: + GST_WARNING ("Failed to parse frame header"); + + return GST_VC1_PARSER_ERROR; +} + +static GstVC1ParserResult +parse_frame_header (GstBitReader * br, GstVC1FrameHdr * framehdr, + GstVC1SeqHdr * seqhdr, GstVC1BitPlanes * bitplanes) +{ + guint8 mvmodeidx, tmp; + GstVC1PicSimpleMain *pic = &framehdr->pic.simple; + GstVC1SeqStructC *structc = &seqhdr->struct_c; + + GST_DEBUG ("Parsing frame header in simple or main mode"); + + /* Set the conveninence fields */ + framehdr->profile = seqhdr->profile; + framehdr->dquant = structc->dquant; + + framehdr->interpfrm = 0; + if (structc->finterpflag) + READ_UINT8 (br, framehdr->interpfrm, 1); + + READ_UINT8 (br, pic->frmcnt, 2); + + pic->rangeredfrm = 0; + if (structc->rangered) { + READ_UINT8 (br, pic->rangeredfrm, 1); + } + + /* Figuring out the picture type */ + READ_UINT8 (br, tmp, 1); + framehdr->ptype = tmp; + + if (structc->maxbframes) { + if (!framehdr->ptype) { + READ_UINT8 (br, tmp, 1); + + if (tmp) + framehdr->ptype = GST_VC1_PICTURE_TYPE_I; + else + framehdr->ptype = GST_VC1_PICTURE_TYPE_B; + + } else + framehdr->ptype = GST_VC1_PICTURE_TYPE_P; + + } else { + if (framehdr->ptype) + framehdr->ptype = GST_VC1_PICTURE_TYPE_P; + else + framehdr->ptype = GST_VC1_PICTURE_TYPE_I; + } + + + if (framehdr->ptype == GST_VC1_PICTURE_TYPE_B) { + guint bfraction; + if (!decode_vlc (br, &bfraction, vc1_bfraction_vlc_table, + G_N_ELEMENTS (vc1_bfraction_vlc_table))) + goto failed; + + pic->bfraction = bfraction; + GST_DEBUG ("bfraction %d", pic->bfraction); + + if (pic->bfraction == GST_VC1_BFRACTION_PTYPE_BI) { + framehdr->ptype = GST_VC1_PICTURE_TYPE_BI; + } + } + + if (framehdr->ptype == GST_VC1_PICTURE_TYPE_I || + framehdr->ptype == GST_VC1_PICTURE_TYPE_BI) + READ_UINT8 (br, pic->bf, 7); + + READ_UINT8 (br, framehdr->pqindex, 5); + if (!framehdr->pqindex) + return GST_VC1_PARSER_ERROR; + + GST_DEBUG ("pqindex %u", framehdr->pqindex); + + /* compute pquant */ + if (structc->quantizer == GST_VC1_QUANTIZER_IMPLICITLY) + framehdr->pquant = vc1_pquant_table[0][framehdr->pqindex]; + else + framehdr->pquant = vc1_pquant_table[1][framehdr->pqindex]; + + GST_DEBUG ("pquant %u", framehdr->pquant); + + if (framehdr->pqindex <= 8) + READ_UINT8 (br, framehdr->halfqp, 1); + else + framehdr->halfqp = 0; + + /* Set pquantizer */ + framehdr->pquantizer = 1; + if (structc->quantizer == GST_VC1_QUANTIZER_IMPLICITLY) + framehdr->pquantizer = framehdr->pqindex < 9; + else if (structc->quantizer == GST_VC1_QUANTIZER_NON_UNIFORM) + framehdr->pquantizer = 0; + + if (structc->quantizer == GST_VC1_QUANTIZER_EXPLICITLY) + READ_UINT8 (br, framehdr->pquantizer, 1); + + if (structc->extended_mv == 1) { + pic->mvrange = get_unary (br, 0, 3); + GST_DEBUG ("mvrange %u", pic->mvrange); + } + + if (structc->multires && (framehdr->ptype == GST_VC1_PICTURE_TYPE_P || + framehdr->ptype == GST_VC1_PICTURE_TYPE_I)) { + READ_UINT8 (br, pic->respic, 2); + GST_DEBUG ("Respic %u", pic->respic); + } + + GST_DEBUG ("Parsing %u Frame, pquantizer %u, halfqp %u, rangeredfrm %u, " + "interpfrm %u", framehdr->ptype, framehdr->pquantizer, framehdr->halfqp, + pic->rangeredfrm, framehdr->interpfrm); + + switch (framehdr->ptype) { + case GST_VC1_PICTURE_TYPE_I: + case GST_VC1_PICTURE_TYPE_BI: + framehdr->transacfrm = get_unary (br, 0, 2); + pic->transacfrm2 = get_unary (br, 0, 2); + READ_UINT8 (br, framehdr->transdctab, 1); + + GST_DEBUG ("transacfrm %u, transacfrm2 %u, transdctab %u", + framehdr->transacfrm, pic->transacfrm2, framehdr->transdctab); + break; + + case GST_VC1_PICTURE_TYPE_P: + mvmodeidx = framehdr->pquant > 12; + pic->mvmode = vc1_mvmode_table[mvmodeidx][get_unary (br, 1, 4)]; + + if (pic->mvmode == GST_VC1_MVMODE_INTENSITY_COMP) { + pic->mvmode2 = vc1_mvmode2_table[mvmodeidx][get_unary (br, 1, 3)]; + READ_UINT8 (br, pic->lumscale, 6); + READ_UINT8 (br, pic->lumshift, 6); + GST_DEBUG ("lumscale %u lumshift %u", pic->lumscale, pic->lumshift); + } + + if (pic->mvmode == GST_VC1_MVMODE_MIXED_MV || + (pic->mvmode == GST_VC1_MVMODE_INTENSITY_COMP && + pic->mvmode2 == GST_VC1_MVMODE_MIXED_MV)) { + if (!bitplane_decoding (br, bitplanes ? bitplanes->mvtypemb : NULL, + seqhdr, &pic->mvtypemb)) + goto failed; + GST_DEBUG ("mvtypemb %u", pic->mvtypemb); + } + if (!bitplane_decoding (br, bitplanes ? bitplanes->skipmb : NULL, + seqhdr, &pic->skipmb)) + goto failed; + + READ_UINT8 (br, pic->mvtab, 2); + READ_UINT8 (br, pic->cbptab, 2); + + if (framehdr->dquant) { + parse_vopdquant (br, framehdr, framehdr->dquant); + } + + if (structc->vstransform) { + READ_UINT8 (br, pic->ttmbf, 1); + GST_DEBUG ("ttmbf %u", pic->ttmbf); + + if (pic->ttmbf) { + READ_UINT8 (br, pic->ttfrm, 2); + GST_DEBUG ("ttfrm %u", pic->ttfrm); + } + } + + framehdr->transacfrm = get_unary (br, 0, 2); + READ_UINT8 (br, framehdr->transdctab, 1); + + GST_DEBUG ("transacfrm %u transdctab %u mvmode %u mvtab %u," + "cbptab %u skipmb %u", framehdr->transacfrm, framehdr->transdctab, + pic->mvmode, pic->mvtab, pic->cbptab, pic->skipmb); + break; + + case GST_VC1_PICTURE_TYPE_B: + READ_UINT8 (br, pic->mvmode, 1); + if (!bitplane_decoding (br, bitplanes ? bitplanes->directmb : NULL, + seqhdr, &pic->directmb)) + goto failed; + + if (!bitplane_decoding (br, bitplanes ? bitplanes->skipmb : NULL, + seqhdr, &pic->skipmb)) + goto failed; + + READ_UINT8 (br, pic->mvtab, 2); + READ_UINT8 (br, pic->cbptab, 2); + + if (framehdr->dquant) + parse_vopdquant (br, framehdr, framehdr->dquant); + + if (structc->vstransform) { + READ_UINT8 (br, pic->ttmbf, 1); + + if (pic->ttmbf) { + READ_UINT8 (br, pic->ttfrm, 2); + } + } + + framehdr->transacfrm = get_unary (br, 0, 2); + READ_UINT8 (br, framehdr->transdctab, 1); + + GST_DEBUG ("transacfrm %u transdctab %u mvmode %u mvtab %u," + "cbptab %u directmb %u skipmb %u", framehdr->transacfrm, + framehdr->transdctab, pic->mvmode, pic->mvtab, pic->cbptab, + pic->directmb, pic->skipmb); + + break; + + default: + goto failed; + break; + } + + return GST_VC1_PARSER_OK; + +failed: + GST_WARNING ("Failed to parse Simple picture header"); + + return GST_VC1_PARSER_ERROR; +} + +static GstVC1ParserResult +parse_sequence_header_struct_a (GstBitReader * br, GstVC1SeqStructA * structa) +{ + if (gst_bit_reader_get_remaining (br) < 64) { + GST_WARNING ("Failed to parse struct A"); + + return GST_VC1_PARSER_ERROR; + } + + structa->vert_size = gst_bit_reader_get_bits_uint32_unchecked (br, 32); + structa->horiz_size = gst_bit_reader_get_bits_uint32_unchecked (br, 32); + + return GST_VC1_PARSER_OK; +} + +static GstVC1ParserResult +parse_sequence_header_struct_b (GstBitReader * br, GstVC1SeqStructB * structb) +{ + if (gst_bit_reader_get_remaining (br) < 96) { + GST_WARNING ("Failed to parse sequence header"); + + return GST_VC1_PARSER_ERROR; + } + + structb->level = gst_bit_reader_get_bits_uint8_unchecked (br, 3); + structb->cbr = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + + /* res4 */ + gst_bit_reader_skip_unchecked (br, 4); + + structb->hrd_buffer = gst_bit_reader_get_bits_uint32_unchecked (br, 24); + structb->hrd_rate = gst_bit_reader_get_bits_uint32_unchecked (br, 32); + structb->framerate = gst_bit_reader_get_bits_uint32_unchecked (br, 32); + + return GST_VC1_PARSER_OK; +} + +static GstVC1ParserResult +parse_sequence_header_struct_c (GstBitReader * br, GstVC1SeqStructC * structc) +{ + guint8 old_interlaced_mode, tmp; + + READ_UINT8 (br, tmp, 2); + structc->profile = tmp; + + if (structc->profile == GST_VC1_PROFILE_ADVANCED) + return GST_VC1_PARSER_OK; + + GST_DEBUG ("Parsing sequence header in simple or main mode"); + + if (gst_bit_reader_get_remaining (br) < 29) + goto failed; + + /* Reserved bits */ + old_interlaced_mode = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + if (old_interlaced_mode) + GST_WARNING ("Old interlaced mode used"); + + structc->wmvp = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + if (structc->wmvp) + GST_DEBUG ("WMVP mode"); + + structc->frmrtq_postproc = gst_bit_reader_get_bits_uint8_unchecked (br, 3); + structc->bitrtq_postproc = gst_bit_reader_get_bits_uint8_unchecked (br, 5); + structc->loop_filter = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + + calculate_framerate_bitrate (structc->frmrtq_postproc, + structc->bitrtq_postproc, &structc->framerate, &structc->bitrate); + + /* Skipping reserved3 bit */ + gst_bit_reader_skip_unchecked (br, 1); + + structc->multires = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + + /* Skipping reserved4 bit */ + gst_bit_reader_skip_unchecked (br, 1); + + structc->fastuvmc = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + structc->extended_mv = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + structc->dquant = gst_bit_reader_get_bits_uint8_unchecked (br, 2); + structc->vstransform = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + + /* Skipping reserved5 bit */ + gst_bit_reader_skip_unchecked (br, 1); + + structc->overlap = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + structc->syncmarker = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + structc->rangered = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + structc->maxbframes = gst_bit_reader_get_bits_uint8_unchecked (br, 3); + structc->quantizer = gst_bit_reader_get_bits_uint8_unchecked (br, 2); + structc->finterpflag = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + + GST_DEBUG ("frmrtq_postproc %u, bitrtq_postproc %u, loop_filter %u, " + "multires %u, fastuvmc %u, extended_mv %u, dquant %u, vstransform %u, " + "overlap %u, syncmarker %u, rangered %u, maxbframes %u, quantizer %u, " + "finterpflag %u", structc->frmrtq_postproc, structc->bitrtq_postproc, + structc->loop_filter, structc->multires, structc->fastuvmc, + structc->extended_mv, structc->dquant, structc->vstransform, + structc->overlap, structc->syncmarker, structc->rangered, + structc->maxbframes, structc->quantizer, structc->finterpflag); + + if (structc->wmvp) { + if (gst_bit_reader_get_remaining (br) < 29) + goto failed; + + structc->coded_width = gst_bit_reader_get_bits_uint16_unchecked (br, 11); + structc->coded_height = gst_bit_reader_get_bits_uint16_unchecked (br, 11); + structc->framerate = gst_bit_reader_get_bits_uint8_unchecked (br, 5); + gst_bit_reader_skip_unchecked (br, 1); + structc->slice_code = gst_bit_reader_get_bits_uint8_unchecked (br, 1); + + GST_DEBUG ("coded_width %u, coded_height %u, framerate %u slice_code %u", + structc->coded_width, structc->coded_height, structc->framerate, + structc->slice_code); + } + + return GST_VC1_PARSER_OK; + +failed: + GST_WARNING ("Failed to struct C"); + + return GST_VC1_PARSER_ERROR; +} + +/**** API ****/ +/** + * gst_vc1_identify_next_bdu: + * @data: The data to parse + * @size: the size of @data + * @bdu: (out): The #GstVC1BDU where to store parsed bdu headers + * + * Parses @data and fills @bdu fields + * + * Returns: a #GstVC1ParserResult + */ +GstVC1ParserResult +gst_vc1_identify_next_bdu (const guint8 * data, gsize size, GstVC1BDU * bdu) +{ + gint off1, off2; + + g_return_val_if_fail (bdu != NULL, GST_VC1_PARSER_ERROR); + + if (size < 4) { + GST_DEBUG ("Can't parse, buffer has too small size %" G_GSSIZE_FORMAT, + size); + return GST_VC1_PARSER_ERROR; + } + + off1 = scan_for_start_codes (data, size); + + if (off1 < 0) { + GST_DEBUG ("No start code prefix in this buffer"); + return GST_VC1_PARSER_NO_BDU; + } + + bdu->sc_offset = off1; + + bdu->offset = off1 + 4; + bdu->data = (guint8 *) data; + bdu->type = (GstVC1StartCode) (data[bdu->offset - 1]); + + if (bdu->type == GST_VC1_END_OF_SEQ) { + GST_DEBUG ("End-of-Seq BDU found"); + bdu->size = 0; + return GST_VC1_PARSER_OK; + } + + off2 = scan_for_start_codes (data + bdu->offset, size - bdu->offset); + if (off2 < 0) { + GST_DEBUG ("Bdu start %d, No end found", bdu->offset); + + return GST_VC1_PARSER_NO_BDU_END; + } + + if (off2 > 0 && data[bdu->offset + off2 - 1] == 00) + off2--; + + bdu->size = off2; + + GST_DEBUG ("Complete bdu found. Off: %d, Size: %d", bdu->offset, bdu->size); + return GST_VC1_PARSER_OK; +} + +/** + * gst_vc1_parse_sequence_layer: + * @data: The data to parse + * @size: the size of @data + * @structa: The #GstVC1SeqLayer to set. + * + * Parses @data, and fills @seqlayer fields. + * + * Returns: a #GstVC1ParserResult + */ +GstVC1ParserResult +gst_vc1_parse_sequence_layer (const guint8 * data, gsize size, + GstVC1SeqLayer * seqlayer) +{ + guint32 tmp; + GstBitReader br = GST_BIT_READER_INIT (data, size); + + g_return_val_if_fail (seqlayer != NULL, GST_VC1_PARSER_ERROR); + + READ_UINT32 (&br, tmp, 8); + if (tmp != 0xC5) + goto failed; + + READ_UINT32 (&br, seqlayer->numframes, 24); + + READ_UINT32 (&br, tmp, 32); + if (tmp != 0x04) + goto failed; + + if (parse_sequence_header_struct_c (&br, &seqlayer->struct_c) == + GST_VC1_PARSER_ERROR) + goto failed; + + if (parse_sequence_header_struct_a (&br, &seqlayer->struct_a) == + GST_VC1_PARSER_ERROR) + goto failed; + + READ_UINT32 (&br, tmp, 32); + if (tmp != 0x0C) + goto failed; + + if (parse_sequence_header_struct_b (&br, &seqlayer->struct_b) == + GST_VC1_PARSER_ERROR) + goto failed; + + return GST_VC1_PARSER_OK; + +failed: + GST_WARNING ("Failed to parse sequence layer"); + + return GST_VC1_PARSER_ERROR; +} + +/** + * gst_vc1_parse_sequence_header_struct_a: + * @data: The data to parse + * @size: the size of @data + * @structa: The #GstVC1SeqStructA to set. + * + * Parses @data, and fills @structa fields. + * + * Returns: a #GstVC1ParserResult + */ +GstVC1ParserResult +gst_vc1_parse_sequence_header_struct_a (const guint8 * data, + gsize size, GstVC1SeqStructA * structa) +{ + GstBitReader br = GST_BIT_READER_INIT (data, size); + + g_return_val_if_fail (structa != NULL, GST_VC1_PARSER_ERROR); + + return parse_sequence_header_struct_a (&br, structa); +} + +/** + * gst_vc1_parse_sequence_header_struct_b: + * @data: The data to parse + * @size: the size of @data + * @structa: The #GstVC1SeqStructB to set. + * + * Parses @data, and fills @structb fields. + * + * Returns: a #GstVC1ParserResult + */ +GstVC1ParserResult +gst_vc1_parse_sequence_header_struct_b (const guint8 * data, + gsize size, GstVC1SeqStructB * structb) +{ + GstBitReader br = GST_BIT_READER_INIT (data, size); + + g_return_val_if_fail (structb != NULL, GST_VC1_PARSER_ERROR); + + return parse_sequence_header_struct_b (&br, structb); +} + +/** + * gst_vc1_parse_sequence_header_struct_c: + * @data: The data to parse + * @size: the size of @data + * @structc: The #GstVC1SeqStructC to set. + * + * Parses @data, and fills @structc fields. + * + * Returns: a #GstVC1ParserResult + */ +GstVC1ParserResult +gst_vc1_parse_sequence_header_struct_c (const guint8 * data, gsize size, + GstVC1SeqStructC * structc) +{ + GstBitReader br = GST_BIT_READER_INIT (data, size); + + g_return_val_if_fail (structc != NULL, GST_VC1_PARSER_ERROR); + + return parse_sequence_header_struct_c (&br, structc); +} + +/** +* gst_vc1_parse_sequence_header: +* @data: The data to parse +* @size: the size of @data +* @seqhdr: The #GstVC1SeqHdr to set. + * + * Parses @data, and fills @seqhdr fields. + * + * Returns: a #GstVC1ParserResult + */ +GstVC1ParserResult +gst_vc1_parse_sequence_header (const guint8 * data, gsize size, + GstVC1SeqHdr * seqhdr) +{ + GstBitReader br = GST_BIT_READER_INIT (data, size); + + g_return_val_if_fail (seqhdr != NULL, GST_VC1_PARSER_ERROR); + + if (parse_sequence_header_struct_c (&br, &seqhdr->struct_c) == + GST_VC1_PARSER_ERROR) + goto failed; + + /* Convenience field */ + seqhdr->profile = seqhdr->struct_c.profile; + + if (seqhdr->profile == GST_VC1_PROFILE_ADVANCED) + return parse_sequence_header_advanced (seqhdr, &br); + + /* Compute MB height and width */ + calculate_mb_size (seqhdr, seqhdr->struct_c.coded_width, + seqhdr->struct_c.coded_height); + + return GST_VC1_PARSER_OK; + +failed: + GST_WARNING ("Failed to parse sequence header"); + + return GST_VC1_PARSER_ERROR; +} + +/** + * gst_vc1_parse_entry_point_header: + * @data: The data to parse + * @size: the size of @data + * @entrypoint: (out): The #GstVC1EntryPointHdr to set. + * @seqhdr: The #GstVC1SeqHdr currently being parsed + * + * Parses @data, and sets @entrypoint fields. + * + * Returns: a #GstVC1EntryPointHdr + */ +GstVC1ParserResult +gst_vc1_parse_entry_point_header (const guint8 * data, gsize size, + GstVC1EntryPointHdr * entrypoint, GstVC1SeqHdr * seqhdr) +{ + GstBitReader br; + guint8 i; + GstVC1AdvancedSeqHdr *advanced = &seqhdr->advanced; + + g_return_val_if_fail (entrypoint != NULL, GST_VC1_PARSER_ERROR); + + gst_bit_reader_init (&br, data, size); + + if (gst_bit_reader_get_remaining (&br) < 13) + goto failed; + + entrypoint->broken_link = gst_bit_reader_get_bits_uint8_unchecked (&br, 1); + entrypoint->closed_entry = gst_bit_reader_get_bits_uint8_unchecked (&br, 1); + entrypoint->panscan_flag = gst_bit_reader_get_bits_uint8_unchecked (&br, 1); + entrypoint->refdist_flag = gst_bit_reader_get_bits_uint8_unchecked (&br, 1); + entrypoint->loopfilter = gst_bit_reader_get_bits_uint8_unchecked (&br, 1); + entrypoint->fastuvmc = gst_bit_reader_get_bits_uint8_unchecked (&br, 1); + entrypoint->extended_mv = gst_bit_reader_get_bits_uint8_unchecked (&br, 1); + entrypoint->dquant = gst_bit_reader_get_bits_uint8_unchecked (&br, 2); + entrypoint->vstransform = gst_bit_reader_get_bits_uint8_unchecked (&br, 1); + entrypoint->overlap = gst_bit_reader_get_bits_uint8_unchecked (&br, 1); + entrypoint->quantizer = gst_bit_reader_get_bits_uint8_unchecked (&br, 2); + + if (advanced->hrd_param_flag) { + if (seqhdr->advanced.hrd_param.hrd_num_leaky_buckets > + MAX_HRD_NUM_LEAKY_BUCKETS) { + GST_WARNING + ("hrd_num_leaky_buckets (%d) > MAX_HRD_NUM_LEAKY_BUCKETS (%d)", + seqhdr->advanced.hrd_param.hrd_num_leaky_buckets, + MAX_HRD_NUM_LEAKY_BUCKETS); + goto failed; + } + for (i = 0; i < seqhdr->advanced.hrd_param.hrd_num_leaky_buckets; i++) + READ_UINT8 (&br, entrypoint->hrd_full[i], 8); + } + + READ_UINT8 (&br, entrypoint->coded_size_flag, 1); + if (entrypoint->coded_size_flag) { + READ_UINT16 (&br, entrypoint->coded_width, 12); + READ_UINT16 (&br, entrypoint->coded_height, 12); + entrypoint->coded_height = (entrypoint->coded_height + 1) << 1; + entrypoint->coded_width = (entrypoint->coded_width + 1) << 1; + calculate_mb_size (seqhdr, entrypoint->coded_width, + entrypoint->coded_height); + } + + if (entrypoint->extended_mv) + READ_UINT8 (&br, entrypoint->extended_dmv, 1); + + READ_UINT8 (&br, entrypoint->range_mapy_flag, 1); + if (entrypoint->range_mapy_flag) + READ_UINT8 (&br, entrypoint->range_mapy, 3); + + READ_UINT8 (&br, entrypoint->range_mapuv_flag, 1); + if (entrypoint->range_mapy_flag) + READ_UINT8 (&br, entrypoint->range_mapuv, 3); + + advanced->entrypoint = *entrypoint; + + return GST_VC1_PARSER_OK; + +failed: + GST_WARNING ("Failed to parse entry point header"); + + return GST_VC1_PARSER_ERROR; +} + +/** + * gst_vc1_parse_frame_layer: + * @data: The data to parse + * @size: the size of @data + * @framelayer: The #GstVC1FrameLayer to fill. + * + * Parses @data, and fills @framelayer fields. + * + * Returns: a #GstVC1ParserResult + */ +GstVC1ParserResult +gst_vc1_parse_frame_layer (const guint8 * data, gsize size, + GstVC1FrameLayer * framelayer) +{ + GstBitReader br = GST_BIT_READER_INIT (data, size); + + if (gst_bit_reader_get_remaining (&br) < 64) { + GST_WARNING ("Could not parse frame layer"); + + return GST_VC1_PARSER_ERROR; + } + + /* set default values */ + framelayer->skiped_p_frame = 0; + + framelayer->key = gst_bit_reader_get_bits_uint8_unchecked (&br, 1); + gst_bit_reader_skip_unchecked (&br, 7); + + framelayer->framesize = gst_bit_reader_get_bits_uint32_unchecked (&br, 24); + + if (framelayer->framesize == 0 || framelayer->framesize == 1) + framelayer->skiped_p_frame = 1; + + /* compute next_framelayer_offset */ + framelayer->next_framelayer_offset = framelayer->framesize + 8; + + framelayer->timestamp = gst_bit_reader_get_bits_uint32_unchecked (&br, 32); + + return GST_VC1_PARSER_OK; +} + +/** + * gst_vc1_parse_frame_header: + * @data: The data to parse + * @size: the size of @data + * @framehdr: The #GstVC1FrameHdr to fill. + * @seqhdr: The #GstVC1SeqHdr currently being parsed + * @bitplanes: The #GstVC1BitPlanes to store bitplanes in or %NULL + * + * Parses @data, and fills @entrypoint fields. + * + * Returns: a #GstVC1ParserResult + */ +GstVC1ParserResult +gst_vc1_parse_frame_header (const guint8 * data, gsize size, + GstVC1FrameHdr * framehdr, GstVC1SeqHdr * seqhdr, + GstVC1BitPlanes * bitplanes) +{ + GstBitReader br; + GstVC1ParserResult result; + + gst_bit_reader_init (&br, data, size); + + if (seqhdr->profile == GST_VC1_PROFILE_ADVANCED) + result = parse_frame_header_advanced (&br, framehdr, seqhdr, bitplanes, + FALSE); + else + result = parse_frame_header (&br, framehdr, seqhdr, bitplanes); + + framehdr->header_size = gst_bit_reader_get_pos (&br); + return result; +} + +/** + * gst_vc1_parse_field_header: + * @data: The data to parse + * @size: the size of @data + * @fieldhdr: The #GstVC1FrameHdr to fill. + * @seqhdr: The #GstVC1SeqHdr currently being parsed + * @bitplanes: The #GstVC1BitPlanes to store bitplanes in or %NULL + * + * Parses @data, and fills @fieldhdr fields. + * + * Returns: a #GstVC1ParserResult + */ +GstVC1ParserResult +gst_vc1_parse_field_header (const guint8 * data, gsize size, + GstVC1FrameHdr * fieldhdr, GstVC1SeqHdr * seqhdr, + GstVC1BitPlanes * bitplanes) +{ + GstBitReader br; + GstVC1ParserResult result; + + gst_bit_reader_init (&br, data, size); + + result = parse_frame_header_advanced (&br, fieldhdr, seqhdr, bitplanes, TRUE); + + return result; +} + +/** + * gst_vc1_bitplanes_new: + * @seqhdr: The #GstVC1SeqHdr from which to set @bitplanes + * + * Creates a new #GstVC1BitPlanes. It should be freed with + * gst_vc1_bitplanes_free() after use. + * + * Returns: a new #GstVC1BitPlanes + */ +GstVC1BitPlanes * +gst_vc1_bitplanes_new (void) +{ + return g_slice_new0 (GstVC1BitPlanes); +} + +/** + * gst_vc1_bitplane_free: + * @bitplanes: the #GstVC1BitPlanes to free + * + * Frees @bitplanes. + */ +void +gst_vc1_bitplanes_free (GstVC1BitPlanes * bitplanes) +{ + gst_vc1_bitplanes_free_1 (bitplanes); + g_slice_free (GstVC1BitPlanes, bitplanes); +} + +/** + * gst_vc1_bitplane_free_1: + * @bitplanes: The #GstVC1BitPlanes to free + * + * Frees @bitplanes fields. + */ +void +gst_vc1_bitplanes_free_1 (GstVC1BitPlanes * bitplanes) +{ + g_free (bitplanes->acpred); + g_free (bitplanes->fieldtx); + g_free (bitplanes->overflags); + g_free (bitplanes->mvtypemb); + g_free (bitplanes->skipmb); + g_free (bitplanes->directmb); + g_free (bitplanes->forwardmb); +} + +/** + * gst_vc1_bitplanes_ensure_size: + * @bitplanes: The #GstVC1BitPlanes to reset + * @seqhdr: The #GstVC1SeqHdr from which to set @bitplanes + * + * Fills the @bitplanes structure from @seqhdr, this function + * should be called after #gst_vc1_parse_sequence_header if + * in simple or main mode, or after #gst_vc1_parse_entry_point_header + * if in advanced mode. + * + * Returns: %TRUE if everything went fine, %FALSE otherwize + */ +gboolean +gst_vc1_bitplanes_ensure_size (GstVC1BitPlanes * bitplanes, + GstVC1SeqHdr * seqhdr) +{ + g_return_val_if_fail (bitplanes != NULL, FALSE); + g_return_val_if_fail (seqhdr != NULL, FALSE); + + if (bitplanes->size) { + bitplanes->size = seqhdr->mb_height * seqhdr->mb_stride; + bitplanes->acpred = + g_realloc_n (bitplanes->acpred, bitplanes->size, sizeof (guint8)); + bitplanes->fieldtx = + g_realloc_n (bitplanes->fieldtx, bitplanes->size, sizeof (guint8)); + bitplanes->overflags = + g_realloc_n (bitplanes->overflags, bitplanes->size, sizeof (guint8)); + bitplanes->mvtypemb = + g_realloc_n (bitplanes->mvtypemb, bitplanes->size, sizeof (guint8)); + bitplanes->skipmb = + g_realloc_n (bitplanes->skipmb, bitplanes->size, sizeof (guint8)); + bitplanes->directmb = + g_realloc_n (bitplanes->directmb, bitplanes->size, sizeof (guint8)); + bitplanes->forwardmb = + g_realloc_n (bitplanes->forwardmb, bitplanes->size, sizeof (guint8)); + } else { + bitplanes->size = seqhdr->mb_height * seqhdr->mb_stride; + bitplanes->acpred = g_malloc0 (bitplanes->size * sizeof (guint8)); + bitplanes->fieldtx = g_malloc0 (bitplanes->size * sizeof (guint8)); + bitplanes->overflags = g_malloc0 (bitplanes->size * sizeof (guint8)); + bitplanes->mvtypemb = g_malloc0 (bitplanes->size * sizeof (guint8)); + bitplanes->skipmb = g_malloc0 (bitplanes->size * sizeof (guint8)); + bitplanes->directmb = g_malloc0 (bitplanes->size * sizeof (guint8)); + bitplanes->forwardmb = g_malloc0 (bitplanes->size * sizeof (guint8)); + } + + return TRUE; +} diff --git a/gst-libs/gst/codecparsers/gstvc1parser.h b/gst-libs/gst/codecparsers/gstvc1parser.h new file mode 100644 index 0000000..ce463d7 --- /dev/null +++ b/gst-libs/gst/codecparsers/gstvc1parser.h @@ -0,0 +1,622 @@ +/* Gstreamer + * Copyright (C) <2011> Intel + * Copyright (C) <2011> Collabora Ltd. + * Copyright (C) <2011> Thibault Saunier <thibault.saunier@collabora.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifndef __GST_VC1_PARSER_H__ +#define __GST_VC1_PARSER_H__ + +#ifndef GST_USE_UNSTABLE_API +#warning "The VC1 parsing library is unstable API and may change in future." +#warning "You can define GST_USE_UNSTABLE_API to avoid this warning." +#endif + +#include <gst/gst.h> + +G_BEGIN_DECLS + +#define MAX_HRD_NUM_LEAKY_BUCKETS 31 + +/** + * @GST_VC1_BFRACTION_BASIS: The @bfraction variable should be divided + * by this constant to have the actual value. + */ +#define GST_VC1_BFRACTION_BASIS 840 + +#define GST_VC1_BFRACTION_RESERVED (GST_VC1_BFRACTION_BASIS + 1) +#define GST_VC1_BFRACTION_PTYPE_BI (GST_VC1_BFRACTION_BASIS + 2) + +typedef enum { + GST_VC1_END_OF_SEQ = 0x0A, + GST_VC1_SLICE = 0x0B, + GST_VC1_FIELD = 0x0C, + GST_VC1_FRAME = 0x0D, + GST_VC1_ENTRYPOINT = 0x0E, + GST_VC1_SEQUENCE = 0x0F, + GST_VC1_SLICE_USER = 0x1B, + GST_VC1_FIELD_USER = 0x1C, + GST_VC1_FRAME_USER = 0x1D, + GST_VC1_ENTRY_POINT_USER = 0x1E, + GST_VC1_SEQUENCE_USER = 0x1F +} GstVC1StartCode; + +typedef enum { + GST_VC1_PROFILE_SIMPLE, + GST_VC1_PROFILE_MAIN, + GST_VC1_PROFILE_RESERVED, + GST_VC1_PROFILE_ADVANCED +} GstVC1Profile; + +typedef enum { + GST_VC1_PARSER_OK, + GST_VC1_PARSER_BROKEN_DATA, + GST_VC1_PARSER_NO_BDU, + GST_VC1_PARSER_NO_BDU_END, + GST_VC1_PARSER_ERROR, +} GstVC1ParserResult; + +typedef enum +{ + GST_VC1_PICTURE_TYPE_P, + GST_VC1_PICTURE_TYPE_B, + GST_VC1_PICTURE_TYPE_I, + GST_VC1_PICTURE_TYPE_BI, + GST_VC1_PICTURE_TYPE_SKIPPED +} GstVC1PictureType; + +typedef enum +{ + GST_VC1_LEVEL_LOW = 0, /* Simple/Main profile low level */ + GST_VC1_LEVEL_MEDIUM = 1, /* Simple/Main profile medium level */ + GST_VC1_LEVEL_HIGH = 2, /* Main profile high level */ + + GST_VC1_LEVEL_L0 = 0, /* Advanced profile level 0 */ + GST_VC1_LEVEL_L1 = 1, /* Advanced profile level 1 */ + GST_VC1_LEVEL_L2 = 2, /* Advanced profile level 2 */ + GST_VC1_LEVEL_L3 = 3, /* Advanced profile level 3 */ + GST_VC1_LEVEL_L4 = 4, /* Advanced profile level 4 */ + + /* 5 to 7 reserved */ + GST_VC1_LEVEL_UNKNOWN = 255 /* Unknown profile */ +} GstVC1Level; + +typedef enum +{ + GST_VC1_QUANTIZER_IMPLICITLY, + GST_VC1_QUANTIZER_EXPLICITLY, + GST_VC1_QUANTIZER_NON_UNIFORM, + GST_VC1_QUANTIZER_UNIFORM +} GstVC1QuantizerSpec; + +typedef enum { + GST_VC1_DQPROFILE_FOUR_EDGES, + GST_VC1_DQPROFILE_DOUBLE_EDGES, + GST_VC1_DQPROFILE_SINGLE_EDGE, + GST_VC1_DQPROFILE_ALL_MBS +} GstVC1DQProfile; + +typedef enum { + GST_VC1_CONDOVER_NONE, + GST_VC1_CONDOVER_ALL, + GST_VC1_CONDOVER_SELECT +} GstVC1Condover; + +/** + * GstVC1MvMode: + * + */ +typedef enum +{ + GST_VC1_MVMODE_1MV_HPEL_BILINEAR, + GST_VC1_MVMODE_1MV, + GST_VC1_MVMODE_1MV_HPEL, + GST_VC1_MVMODE_MIXED_MV, + GST_VC1_MVMODE_INTENSITY_COMP +} GstVC1MvMode; + +typedef enum +{ + GST_VC1_FRAME_PROGRESSIVE = 0x0, + GST_VC1_FRAME_INTERLACE = 0x10, + GST_VC1_FIELD_INTERLACE = 0x11 +} GstVC1FrameCodingMode; + +typedef struct _GstVC1SeqHdr GstVC1SeqHdr; +typedef struct _GstVC1AdvancedSeqHdr GstVC1AdvancedSeqHdr; +typedef struct _GstVC1HrdParam GstVC1HrdParam; +typedef struct _GstVC1EntryPointHdr GstVC1EntryPointHdr; + +typedef struct _GstVC1SeqLayer GstVC1SeqLayer; + +typedef struct _GstVC1SeqStructA GstVC1SeqStructA; +typedef struct _GstVC1SeqStructB GstVC1SeqStructB; +typedef struct _GstVC1SeqStructC GstVC1SeqStructC; + +/* Pictures Structures */ +typedef struct _GstVC1FrameLayer GstVC1FrameLayer; +typedef struct _GstVC1FrameHdr GstVC1FrameHdr; +typedef struct _GstVC1PicAdvanced GstVC1PicAdvanced; +typedef struct _GstVC1PicSimpleMain GstVC1PicSimpleMain; +typedef struct _GstVC1Picture GstVC1Picture; + +typedef struct _GstVC1VopDquant GstVC1VopDquant; + +typedef struct _GstVC1BitPlanes GstVC1BitPlanes; + +typedef struct _GstVC1BDU GstVC1BDU; + +struct _GstVC1HrdParam +{ + guint8 hrd_num_leaky_buckets; + guint8 bit_rate_exponent; + guint8 buffer_size_exponent; + guint16 hrd_rate[MAX_HRD_NUM_LEAKY_BUCKETS]; + guint16 hrd_buffer[MAX_HRD_NUM_LEAKY_BUCKETS]; +}; + +/** + * GstVC1EntryPointHdr: + * + * Structure for entrypoint header, this will be used only in advanced profiles + */ +struct _GstVC1EntryPointHdr +{ + guint8 broken_link; + guint8 closed_entry; + guint8 panscan_flag; + guint8 refdist_flag; + guint8 loopfilter; + guint8 fastuvmc; + guint8 extended_mv; + guint8 dquant; + guint8 vstransform; + guint8 overlap; + guint8 quantizer; + guint8 coded_size_flag; + guint16 coded_width; + guint16 coded_height; + guint8 extended_dmv; + guint8 range_mapy_flag; + guint8 range_mapy; + guint8 range_mapuv_flag; + guint8 range_mapuv; + + guint8 hrd_full[MAX_HRD_NUM_LEAKY_BUCKETS]; +}; + +/** + * GstVC1AdvancedSeqHdr: + * + * Structure for the advanced profile sequence headers specific parameters. + */ +struct _GstVC1AdvancedSeqHdr +{ + GstVC1Level level; + + guint8 frmrtq_postproc; + guint8 bitrtq_postproc; + guint8 postprocflag; + guint16 max_coded_width; + guint16 max_coded_height; + guint8 pulldown; + guint8 interlace; + guint8 tfcntrflag; + guint8 finterpflag; + guint8 psf; + guint8 display_ext; + guint16 disp_horiz_size; + guint16 disp_vert_size; + guint8 aspect_ratio_flag; + guint8 aspect_ratio; + guint8 aspect_horiz_size; + guint8 aspect_vert_size; + guint8 framerate_flag; + guint8 framerateind; + guint8 frameratenr; + guint8 frameratedr; + guint16 framerateexp; + guint8 color_format_flag; + guint8 color_prim; + guint8 transfer_char; + guint8 matrix_coef; + guint8 hrd_param_flag; + guint8 colordiff_format; + + GstVC1HrdParam hrd_param; + + /* computed */ + guint framerate; /* Around in fps, 0 if unknown*/ + guint bitrate; /* Around in kpbs, 0 if unknown*/ + guint par_n; + guint par_d; + guint fps_n; + guint fps_d; + + /* The last parsed entry point */ + GstVC1EntryPointHdr entrypoint; +}; + +struct _GstVC1SeqStructA +{ + guint32 vert_size; + guint32 horiz_size; +}; + +struct _GstVC1SeqStructB +{ + GstVC1Level level; + + guint8 cbr; + guint32 framerate; + + /* In simple and main profiles only */ + guint32 hrd_buffer; + guint32 hrd_rate; +}; + +struct _GstVC1SeqStructC +{ + GstVC1Profile profile; + + /* Only in simple and main profiles */ + guint8 frmrtq_postproc; + guint8 bitrtq_postproc; + guint8 res_sprite; + guint8 loop_filter; + guint8 multires; + guint8 fastuvmc; + guint8 extended_mv; + guint8 dquant; + guint8 vstransform; + guint8 overlap; + guint8 syncmarker; + guint8 rangered; + guint8 maxbframes; + guint8 quantizer; + guint8 finterpflag; + + /* Computed */ + guint framerate; /* Around in fps, 0 if unknown*/ + guint bitrate; /* Around in kpbs, 0 if unknown*/ + + /* This should be filled by user if previously known */ + guint16 coded_width; + /* This should be filled by user if previously known */ + guint16 coded_height; + + /* Wmvp specific */ + guint8 wmvp; /* Specify if the stream is wmp or not */ + /* In the wmvp case, the framerate is not computed but in the bistream */ + guint8 slice_code; +}; + +struct _GstVC1SeqLayer +{ + guint32 numframes; + + GstVC1SeqStructA struct_a; + GstVC1SeqStructB struct_b; + GstVC1SeqStructC struct_c; +}; + +/** + * GstVC1SeqHdr: + * + * Structure for sequence headers in any profile. + */ +struct _GstVC1SeqHdr +{ + GstVC1Profile profile; + + GstVC1SeqStructC struct_c; + + /* calculated */ + guint mb_height; + guint mb_width; + guint mb_stride; + + GstVC1AdvancedSeqHdr advanced; + +}; + +/** + * GstVC1PicSimpleMain: + * @bfaction: Should be divided by #GST_VC1_BFRACTION_BASIS + * to get the real value. + */ +struct _GstVC1PicSimpleMain +{ + guint8 frmcnt; + guint8 mvrange; + guint8 rangeredfrm; + + /* I and P pic simple and main profiles only */ + guint8 respic; + + /* I and BI pic simple and main profiles only */ + guint8 transacfrm2; + guint8 bf; + + /* B and P pic simple and main profiles only */ + guint8 mvmode; + guint8 mvtab; + guint8 ttmbf; + + /* P pic simple and main profiles only */ + guint8 mvmode2; + guint8 lumscale; + guint8 lumshift; + + guint8 cbptab; + guint8 ttfrm; + + /* B and BI picture only + * Should be divided by #GST_VC1_BFRACTION_BASIS + * to get the real value. */ + guint16 bfraction; + + /* Biplane value, those fields only mention the fact + * that the bitplane is in raw mode or not */ + guint8 mvtypemb; + guint8 skipmb; + guint8 directmb; /* B pic main profile only */ +}; + +/** + * GstVC1PicAdvanced: + * @bfaction: Should be divided by #GST_VC1_BFRACTION_BASIS + * to get the real value. + */ +struct _GstVC1PicAdvanced +{ + GstVC1FrameCodingMode fcm; + guint8 tfcntr; + + guint8 rptfrm; + guint8 tff; + guint8 rff; + guint8 ps_present; + guint32 ps_hoffset; + guint32 ps_voffset; + guint16 ps_width; + guint16 ps_height; + guint8 rndctrl; + guint8 uvsamp; + guint8 postproc; + + /* B and P picture specific */ + guint8 mvrange; + guint8 mvmode; + guint8 mvtab; + guint8 cbptab; + guint8 ttmbf; + guint8 ttfrm; + + /* B and BI picture only + * Should be divided by #GST_VC1_BFRACTION_BASIS + * to get the real value. */ + guint16 bfraction; + + /* ppic */ + guint8 mvmode2; + guint8 lumscale; + guint8 lumshift; + + /* bipic */ + guint8 bf; + guint8 condover; + guint8 transacfrm2; + + /* Biplane value, those fields only mention the fact + * that the bitplane is in raw mode or not */ + guint8 acpred; + guint8 overflags; + guint8 mvtypemb; + guint8 skipmb; + guint8 directmb; + guint8 forwardmb; /* B pic interlace field only */ + + /* For interlaced pictures only */ + guint8 fieldtx; + + /* P and B pictures */ + guint8 intcomp; + guint8 dmvrange; + guint8 mbmodetab; + guint8 imvtab; + guint8 icbptab; + guint8 mvbptab2; + guint8 mvbptab4; /* If 4mvswitch in ppic */ + + /* P picture */ + guint8 mvswitch4; + + /* For interlaced fields only */ + guint16 refdist; + guint8 fptype; /* Raw value */ + + /* P pic */ + guint8 numref; + guint8 reffield; + guint8 lumscale2; + guint8 lumshift2; + guint8 intcompfield; + +}; + +struct _GstVC1BitPlanes +{ + guint8 *acpred; + guint8 *fieldtx; + guint8 *overflags; + guint8 *mvtypemb; + guint8 *skipmb; + guint8 *directmb; + guint8 *forwardmb; + + guint size; /* Size of the arrays */ +}; + +struct _GstVC1VopDquant +{ + guint8 pqdiff; + guint8 abspq; + + /* Computed */ + guint8 altpquant; + + /* if dqant != 2*/ + guint8 dquantfrm; + guint8 dqprofile; + + /* if dqprofile == GST_VC1_DQPROFILE_SINGLE_EDGE + * or GST_VC1_DQPROFILE_DOUBLE_EDGE:*/ + guint8 dqsbedge; + + /* if dqprofile == GST_VC1_DQPROFILE_SINGLE_EDGE + * or GST_VC1_DQPROFILE_DOUBLE_EDGE:*/ + guint8 dqbedge; + + /* if dqprofile == GST_VC1_DQPROFILE_ALL_MBS */ + guint8 dqbilevel; + +}; + +struct _GstVC1FrameLayer +{ + guint8 key; + guint32 framesize; + + guint32 timestamp; + + /* calculated */ + guint32 next_framelayer_offset; + guint8 skiped_p_frame; +}; + +/** + * GstVC1FrameHdr: + * + * Structure that represent picture in any profile or mode. + * You should look at @ptype and @profile to know what is currently + * in use. + */ +struct _GstVC1FrameHdr +{ + /* common fields */ + GstVC1PictureType ptype; + guint8 interpfrm; + guint8 halfqp; + guint8 transacfrm; + guint8 transdctab; + guint8 pqindex; + guint8 pquantizer; + + /* Computed */ + guint8 pquant; + + /* Convenience fields */ + guint8 profile; + guint8 dquant; + + /* If dquant */ + GstVC1VopDquant vopdquant; + + union { + GstVC1PicSimpleMain simple; + GstVC1PicAdvanced advanced; + } pic; + + /* Size of the picture layer in bits */ + guint header_size; +}; + +/** + * GstVC1BDU: + * + * Structure that represents a Bitstream Data Unit. + */ +struct _GstVC1BDU +{ + GstVC1StartCode type; + guint size; + guint sc_offset; + guint offset; + guint8 * data; +}; + +GstVC1ParserResult gst_vc1_identify_next_bdu (const guint8 *data, + gsize size, + GstVC1BDU *bdu); + + +GstVC1ParserResult gst_vc1_parse_sequence_header (const guint8 *data, + gsize size, + GstVC1SeqHdr * seqhdr); + +GstVC1ParserResult gst_vc1_parse_entry_point_header (const guint8 *data, + gsize size, + GstVC1EntryPointHdr * entrypoint, + GstVC1SeqHdr *seqhdr); + +GstVC1ParserResult gst_vc1_parse_sequence_layer (const guint8 *data, + gsize size, + GstVC1SeqLayer * seqlayer); + +GstVC1ParserResult +gst_vc1_parse_sequence_header_struct_a (const guint8 *data, + gsize size, + GstVC1SeqStructA *structa); +GstVC1ParserResult +gst_vc1_parse_sequence_header_struct_b (const guint8 *data, + gsize size, + GstVC1SeqStructB *structb); + +GstVC1ParserResult +gst_vc1_parse_sequence_header_struct_c (const guint8 *data, + gsize size, + GstVC1SeqStructC *structc); + +GstVC1ParserResult gst_vc1_parse_frame_layer (const guint8 *data, + gsize size, + GstVC1FrameLayer * framelayer); + +GstVC1ParserResult gst_vc1_parse_frame_header (const guint8 *data, + gsize size, + GstVC1FrameHdr * framehdr, + GstVC1SeqHdr *seqhdr, + GstVC1BitPlanes *bitplanes); + +GstVC1ParserResult gst_vc1_parse_field_header (const guint8 *data, + gsize size, + GstVC1FrameHdr * fieldhdr, + GstVC1SeqHdr *seqhdr, + GstVC1BitPlanes *bitplanes); + +GstVC1BitPlanes * gst_vc1_bitplanes_new (void); + +void gst_vc1_bitplanes_free (GstVC1BitPlanes *bitplanes); + +void gst_vc1_bitplanes_free_1 (GstVC1BitPlanes *bitplanes); + +gboolean gst_vc1_bitplanes_ensure_size (GstVC1BitPlanes *bitplanes, + GstVC1SeqHdr *seqhdr); + +G_END_DECLS +#endif diff --git a/gst-libs/gst/codecparsers/parserutils.c b/gst-libs/gst/codecparsers/parserutils.c new file mode 100644 index 0000000..a31fe48 --- /dev/null +++ b/gst-libs/gst/codecparsers/parserutils.c @@ -0,0 +1,57 @@ +/* Gstreamer + * Copyright (C) <2011> Intel Corporation + * Copyright (C) <2011> Collabora Ltd. + * Copyright (C) <2011> Thibault Saunier <thibault.saunier@collabora.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#include "parserutils.h" + +gboolean +decode_vlc (GstBitReader * br, guint * res, const VLCTable * table, + guint length) +{ + guint8 i; + guint cbits = 0; + guint32 value = 0; + + for (i = 0; i < length; i++) { + if (cbits != table[i].cbits) { + cbits = table[i].cbits; + if (!gst_bit_reader_peek_bits_uint32 (br, &value, cbits)) { + goto failed; + } + } + + if (value == table[i].cword) { + SKIP (br, cbits); + if (res) + *res = table[i].value; + + return TRUE; + } + } + + GST_DEBUG ("Did not find code"); + +failed: + { + GST_WARNING ("Could not decode VLC returning"); + + return FALSE; + } +} diff --git a/gst-libs/gst/codecparsers/parserutils.h b/gst-libs/gst/codecparsers/parserutils.h new file mode 100644 index 0000000..009b250 --- /dev/null +++ b/gst-libs/gst/codecparsers/parserutils.h @@ -0,0 +1,108 @@ +/* Gstreamer + * Copyright (C) <2011> Intel + * Copyright (C) <2011> Collabora Ltd. + * Copyright (C) <2011> Thibault Saunier <thibault.saunier@collabora.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifndef __PARSER_UTILS__ +#define __PARSER_UTILS__ + +#include <gst/gst.h> +#include <gst/base/gstbitreader.h> + +/* Parsing utils */ +#define GET_BITS(b, num, bits) G_STMT_START { \ + if (!gst_bit_reader_get_bits_uint32(b, bits, num)) \ + goto failed; \ + GST_TRACE ("parsed %d bits: %d", num, *(bits)); \ +} G_STMT_END + +#define CHECK_ALLOWED(val, min, max) G_STMT_START { \ + if (val < min || val > max) { \ + GST_WARNING ("value not in allowed range. value: %d, range %d-%d", \ + val, min, max); \ + goto failed; \ + } \ +} G_STMT_END + +#define READ_UINT8(reader, val, nbits) G_STMT_START { \ + if (!gst_bit_reader_get_bits_uint8 (reader, &val, nbits)) { \ + GST_WARNING ("failed to read uint8, nbits: %d", nbits); \ + goto failed; \ + } \ +} G_STMT_END + +#define READ_UINT16(reader, val, nbits) G_STMT_START { \ + if (!gst_bit_reader_get_bits_uint16 (reader, &val, nbits)) { \ + GST_WARNING ("failed to read uint16, nbits: %d", nbits); \ + goto failed; \ + } \ +} G_STMT_END + +#define READ_UINT32(reader, val, nbits) G_STMT_START { \ + if (!gst_bit_reader_get_bits_uint32 (reader, &val, nbits)) { \ + GST_WARNING ("failed to read uint32, nbits: %d", nbits); \ + goto failed; \ + } \ +} G_STMT_END + +#define READ_UINT64(reader, val, nbits) G_STMT_START { \ + if (!gst_bit_reader_get_bits_uint64 (reader, &val, nbits)) { \ + GST_WARNING ("failed to read uint64, nbits: %d", nbits); \ + goto failed; \ + } \ +} G_STMT_END + + +#define U_READ_UINT8(reader, val, nbits) G_STMT_START { \ + val = gst_bit_reader_get_bits_uint8_unchecked (reader, nbits); \ +} G_STMT_END + +#define U_READ_UINT16(reader, val, nbits) G_STMT_START { \ + val = gst_bit_reader_get_bits_uint16_unchecked (reader, nbits); \ +} G_STMT_END + +#define U_READ_UINT32(reader, val, nbits) G_STMT_START { \ + val = gst_bit_reader_get_bits_uint32_unchecked (reader, nbits); \ +} G_STMT_END + +#define U_READ_UINT64(reader, val, nbits) G_STMT_START { \ + val = gst_bit_reader_get_bits_uint64_unchecked (reader, nbits); \ +} G_STMT_END + +#define SKIP(reader, nbits) G_STMT_START { \ + if (!gst_bit_reader_skip (reader, nbits)) { \ + GST_WARNING ("failed to skip nbits: %d", nbits); \ + goto failed; \ + } \ +} G_STMT_END + +typedef struct _VLCTable VLCTable; + +struct _VLCTable +{ + guint value; + guint cword; + guint cbits; +}; + +gboolean +decode_vlc (GstBitReader * br, guint * res, const VLCTable * table, + guint length); + +#endif /* __PARSER_UTILS__ */ diff --git a/gst-libs/gst/video/Makefile.am b/gst-libs/gst/video/Makefile.am index a31276b..e1b7efb 100644 --- a/gst-libs/gst/video/Makefile.am +++ b/gst-libs/gst/video/Makefile.am @@ -13,7 +13,8 @@ libgstbasevideo_@GST_MAJORMINOR@includedir = $(includedir)/gstreamer-@GST_MAJORM libgstbasevideo_@GST_MAJORMINOR@include_HEADERS = \ gstbasevideocodec.h \ gstbasevideodecoder.h \ - gstbasevideoencoder.h + gstbasevideoencoder.h \ + gstbasevideoutils.h libgstbasevideo_@GST_MAJORMINOR@_la_CFLAGS = \ $(GST_PLUGINS_BAD_CFLAGS) \ diff --git a/gst-libs/gst/video/gstbasevideocodec.c b/gst-libs/gst/video/gstbasevideocodec.c index 1b7d784..68e203d 100644 --- a/gst-libs/gst/video/gstbasevideocodec.c +++ b/gst-libs/gst/video/gstbasevideocodec.c @@ -1,4 +1,4 @@ -/* Schrodinger +/* GStreamer * Copyright (C) 2006 David Schleef <ds@schleef.org> * * This library is free software; you can redistribute it and/or @@ -42,15 +42,8 @@ enum static void gst_base_video_codec_finalize (GObject * object); -//static const GstQueryType *gst_base_video_codec_get_query_types (GstPad *pad); -//static gboolean gst_base_video_codec_src_query (GstPad *pad, GstQuery *query); -//static gboolean gst_base_video_codec_sink_query (GstPad *pad, GstQuery *query); -//static gboolean gst_base_video_codec_src_event (GstPad *pad, GstEvent *event); -//static gboolean gst_base_video_codec_sink_event (GstPad *pad, GstEvent *event); static GstStateChangeReturn gst_base_video_codec_change_state (GstElement * element, GstStateChange transition); -//static GstFlowReturn gst_base_video_codec_push_all (GstBaseVideoCodec *base_video_codec, -// gboolean at_eos); GST_BOILERPLATE (GstBaseVideoCodec, gst_base_video_codec, GstElement, @@ -84,15 +77,13 @@ gst_base_video_codec_init (GstBaseVideoCodec * base_video_codec, { GstPadTemplate *pad_template; - GST_DEBUG ("gst_base_video_codec_init"); + GST_DEBUG_OBJECT (base_video_codec, "gst_base_video_codec_init"); pad_template = gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink"); g_return_if_fail (pad_template != NULL); base_video_codec->sinkpad = gst_pad_new_from_template (pad_template, "sink"); - //gst_pad_set_query_function (base_video_codec->sinkpad, - // gst_base_video_codec_sink_query); gst_element_add_pad (GST_ELEMENT (base_video_codec), base_video_codec->sinkpad); @@ -106,6 +97,7 @@ gst_base_video_codec_init (GstBaseVideoCodec * base_video_codec, gst_segment_init (&base_video_codec->segment, GST_FORMAT_TIME); + g_static_rec_mutex_init (&base_video_codec->stream_lock); } static void @@ -113,376 +105,32 @@ gst_base_video_codec_reset (GstBaseVideoCodec * base_video_codec) { GList *g; - GST_DEBUG ("reset"); + GST_DEBUG_OBJECT (base_video_codec, "reset"); + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_codec); for (g = base_video_codec->frames; g; g = g_list_next (g)) { gst_base_video_codec_free_frame ((GstVideoFrame *) g->data); } g_list_free (base_video_codec->frames); + base_video_codec->frames = NULL; - if (base_video_codec->caps) { - gst_caps_unref (base_video_codec->caps); - base_video_codec->caps = NULL; - } + base_video_codec->bytes = 0; + base_video_codec->time = 0; + gst_buffer_replace (&base_video_codec->state.codec_data, NULL); + gst_caps_replace (&base_video_codec->state.caps, NULL); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_codec); } static void gst_base_video_codec_finalize (GObject * object) { - G_OBJECT_CLASS (parent_class)->finalize (object); -} - -#ifdef unused -static const GstQueryType * -gst_base_video_codec_get_query_types (GstPad * pad) -{ - static const GstQueryType query_types[] = { - GST_QUERY_POSITION, - GST_QUERY_DURATION, - GST_QUERY_CONVERT, - 0 - }; - - return query_types; -} -#endif - -#if 0 -static gboolean -gst_base_video_codec_src_convert (GstPad * pad, - GstFormat src_format, gint64 src_value, - GstFormat * dest_format, gint64 * dest_value) -{ - gboolean res; - GstBaseVideoCodec *dec; - - if (src_format == *dest_format) { - *dest_value = src_value; - return TRUE; - } - - dec = GST_BASE_VIDEO_CODEC (gst_pad_get_parent (pad)); - - if (src_format == GST_FORMAT_DEFAULT && *dest_format == GST_FORMAT_TIME) { - if (dec->fps_d != 0) { - *dest_value = gst_util_uint64_scale (granulepos_to_frame (src_value), - dec->fps_d * GST_SECOND, dec->fps_n); - res = TRUE; - } else { - res = FALSE; - } - } else { - GST_WARNING ("unhandled conversion from %d to %d", src_format, - *dest_format); - res = FALSE; - } - - gst_object_unref (dec); - - return res; -} - -static gboolean -gst_base_video_codec_sink_convert (GstPad * pad, - GstFormat src_format, gint64 src_value, - GstFormat * dest_format, gint64 * dest_value) -{ - gboolean res = TRUE; - GstBaseVideoCodec *dec; - - if (src_format == *dest_format) { - *dest_value = src_value; - return TRUE; - } - - dec = GST_BASE_VIDEO_CODEC (gst_pad_get_parent (pad)); - - /* FIXME: check if we are in a decoding state */ - - switch (src_format) { - case GST_FORMAT_DEFAULT: - switch (*dest_format) { - case GST_FORMAT_TIME: - *dest_value = gst_util_uint64_scale (src_value, - dec->fps_d * GST_SECOND, dec->fps_n); - break; - default: - res = FALSE; - } - break; - case GST_FORMAT_TIME: - switch (*dest_format) { - case GST_FORMAT_DEFAULT: - { - *dest_value = gst_util_uint64_scale (src_value, - dec->fps_n, dec->fps_d * GST_SECOND); - break; - } - default: - res = FALSE; - break; - } - break; - default: - res = FALSE; - break; - } - - gst_object_unref (dec); - - return res; -} -#endif - -#ifdef unused -static gboolean -gst_base_video_codec_src_query (GstPad * pad, GstQuery * query) -{ - GstBaseVideoCodec *base_codec; - gboolean res = FALSE; - - base_codec = GST_BASE_VIDEO_CODEC (gst_pad_get_parent (pad)); - - switch (GST_QUERY_TYPE (query)) { - case GST_QUERY_POSITION: - { - GstFormat format; - gint64 time; - gint64 value; - - gst_query_parse_position (query, &format, NULL); - - time = gst_util_uint64_scale (base_codec->system_frame_number, - base_codec->state.fps_n, base_codec->state.fps_d); - time += base_codec->state.segment.time; - GST_DEBUG ("query position %" GST_TIME_FORMAT, GST_TIME_ARGS (time)); - res = gst_base_video_encoded_video_convert (&base_codec->state, - GST_FORMAT_TIME, time, &format, &value); - if (!res) - goto error; - - gst_query_set_position (query, format, value); - break; - } - case GST_QUERY_DURATION: - res = gst_pad_query (GST_PAD_PEER (base_codec->sinkpad), query); - if (!res) - goto error; - break; - case GST_QUERY_CONVERT: - { - GstFormat src_fmt, dest_fmt; - gint64 src_val, dest_val; - - GST_DEBUG ("query convert"); - - gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - res = gst_base_video_encoded_video_convert (&base_codec->state, - src_fmt, src_val, &dest_fmt, &dest_val); - if (!res) - goto error; - gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); - break; - } - default: - res = gst_pad_query_default (pad, query); - break; - } -done: - gst_object_unref (base_codec); - - return res; -error: - GST_DEBUG_OBJECT (base_codec, "query failed"); - goto done; -} -#endif - -#ifdef unused -static gboolean -gst_base_video_codec_sink_query (GstPad * pad, GstQuery * query) -{ - GstBaseVideoCodec *base_video_codec; - gboolean res = FALSE; - - base_video_codec = GST_BASE_VIDEO_CODEC (gst_pad_get_parent (pad)); - - switch (GST_QUERY_TYPE (query)) { - case GST_QUERY_CONVERT: - { - GstFormat src_fmt, dest_fmt; - gint64 src_val, dest_val; - - gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - res = gst_base_video_encoded_video_convert (&base_video_codec->state, - src_fmt, src_val, &dest_fmt, &dest_val); - if (!res) - goto error; - gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); - break; - } - default: - res = gst_pad_query_default (pad, query); - break; - } -done: - gst_object_unref (base_video_codec); - - return res; -error: - GST_DEBUG_OBJECT (base_video_codec, "query failed"); - goto done; -} -#endif + GstBaseVideoCodec *base_video_codec = GST_BASE_VIDEO_CODEC (object); -#ifdef unused -static gboolean -gst_base_video_codec_src_event (GstPad * pad, GstEvent * event) -{ - GstBaseVideoCodec *base_video_codec; - gboolean res = FALSE; - - base_video_codec = GST_BASE_VIDEO_CODEC (gst_pad_get_parent (pad)); - - switch (GST_EVENT_TYPE (event)) { - case GST_EVENT_SEEK: - { - GstFormat format, tformat; - gdouble rate; - GstEvent *real_seek; - GstSeekFlags flags; - GstSeekType cur_type, stop_type; - gint64 cur, stop; - gint64 tcur, tstop; - - gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, - &cur, &stop_type, &stop); - gst_event_unref (event); - - tformat = GST_FORMAT_TIME; - res = gst_base_video_encoded_video_convert (&base_video_codec->state, - format, cur, &tformat, &tcur); - if (!res) - goto convert_error; - res = gst_base_video_encoded_video_convert (&base_video_codec->state, - format, stop, &tformat, &tstop); - if (!res) - goto convert_error; - - real_seek = gst_event_new_seek (rate, GST_FORMAT_TIME, - flags, cur_type, tcur, stop_type, tstop); - - res = gst_pad_push_event (base_video_codec->sinkpad, real_seek); + g_static_rec_mutex_free (&base_video_codec->stream_lock); - break; - } -#if 0 - case GST_EVENT_QOS: - { - gdouble proportion; - GstClockTimeDiff diff; - GstClockTime timestamp; - - gst_event_parse_qos (event, &proportion, &diff, ×tamp); - - GST_OBJECT_LOCK (base_video_codec); - base_video_codec->proportion = proportion; - base_video_codec->earliest_time = timestamp + diff; - GST_OBJECT_UNLOCK (base_video_codec); - - GST_DEBUG_OBJECT (base_video_codec, - "got QoS %" GST_TIME_FORMAT ", %" G_GINT64_FORMAT, - GST_TIME_ARGS (timestamp), diff); - - res = gst_pad_push_event (base_video_codec->sinkpad, event); - break; - } -#endif - default: - res = gst_pad_push_event (base_video_codec->sinkpad, event); - break; - } -done: - gst_object_unref (base_video_codec); - return res; - -convert_error: - GST_DEBUG_OBJECT (base_video_codec, "could not convert format"); - goto done; -} -#endif - -#ifdef unused -static gboolean -gst_base_video_codec_sink_event (GstPad * pad, GstEvent * event) -{ - GstBaseVideoCodec *base_video_codec; - gboolean ret = FALSE; - - base_video_codec = GST_BASE_VIDEO_CODEC (gst_pad_get_parent (pad)); - - switch (GST_EVENT_TYPE (event)) { - case GST_EVENT_FLUSH_START: - ret = gst_pad_push_event (base_video_codec->srcpad, event); - break; - case GST_EVENT_FLUSH_STOP: - gst_base_video_codec_reset (base_video_codec); - ret = gst_pad_push_event (base_video_codec->srcpad, event); - break; - case GST_EVENT_EOS: - if (gst_base_video_codec_push_all (base_video_codec, - FALSE) == GST_FLOW_ERROR) { - gst_event_unref (event); - return FALSE; - } - - ret = gst_pad_push_event (base_video_codec->srcpad, event); - break; - case GST_EVENT_NEWSEGMENT: - { - gboolean update; - GstFormat format; - gdouble rate; - gint64 start, stop, time; - - gst_event_parse_new_segment (event, &update, &rate, &format, &start, - &stop, &time); - - if (format != GST_FORMAT_TIME) - goto newseg_wrong_format; - - if (rate <= 0.0) - goto newseg_wrong_rate; - - GST_DEBUG ("newsegment %" GST_TIME_FORMAT " %" GST_TIME_FORMAT, - GST_TIME_ARGS (start), GST_TIME_ARGS (time)); - gst_segment_set_newsegment (&base_video_codec->state.segment, update, - rate, format, start, stop, time); - - ret = gst_pad_push_event (base_video_codec->srcpad, event); - break; - } - default: - ret = gst_pad_push_event (base_video_codec->srcpad, event); - break; - } -done: - gst_object_unref (base_video_codec); - return ret; - -newseg_wrong_format: - GST_DEBUG_OBJECT (base_video_codec, "received non TIME newsegment"); - gst_event_unref (event); - goto done; - -newseg_wrong_rate: - GST_DEBUG_OBJECT (base_video_codec, "negative rates not supported"); - gst_event_unref (event); - goto done; + G_OBJECT_CLASS (parent_class)->finalize (object); } -#endif - static GstStateChangeReturn gst_base_video_codec_change_state (GstElement * element, @@ -525,10 +173,12 @@ gst_base_video_codec_new_frame (GstBaseVideoCodec * base_video_codec) { GstVideoFrame *frame; - frame = g_malloc0 (sizeof (GstVideoFrame)); + frame = g_slice_new0 (GstVideoFrame); + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_codec); frame->system_frame_number = base_video_codec->system_frame_number; base_video_codec->system_frame_number++; + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_codec); return frame; } @@ -536,9 +186,21 @@ gst_base_video_codec_new_frame (GstBaseVideoCodec * base_video_codec) void gst_base_video_codec_free_frame (GstVideoFrame * frame) { + g_return_if_fail (frame != NULL); + if (frame->sink_buffer) { gst_buffer_unref (frame->sink_buffer); } - g_free (frame); + if (frame->src_buffer) { + gst_buffer_unref (frame->src_buffer); + } + + g_list_foreach (frame->events, (GFunc) gst_event_unref, NULL); + g_list_free (frame->events); + + if (frame->coder_hook_destroy_notify && frame->coder_hook) + frame->coder_hook_destroy_notify (frame->coder_hook); + + g_slice_free (GstVideoFrame, frame); } diff --git a/gst-libs/gst/video/gstbasevideocodec.h b/gst-libs/gst/video/gstbasevideocodec.h index 8ef4893..6471c35 100644 --- a/gst-libs/gst/video/gstbasevideocodec.h +++ b/gst-libs/gst/video/gstbasevideocodec.h @@ -79,6 +79,9 @@ G_BEGIN_DECLS */ #define GST_BASE_VIDEO_CODEC_FLOW_NEED_DATA GST_FLOW_CUSTOM_SUCCESS +#define GST_BASE_VIDEO_CODEC_STREAM_LOCK(codec) g_static_rec_mutex_lock (&GST_BASE_VIDEO_CODEC (codec)->stream_lock) +#define GST_BASE_VIDEO_CODEC_STREAM_UNLOCK(codec) g_static_rec_mutex_unlock (&GST_BASE_VIDEO_CODEC (codec)->stream_lock) + typedef struct _GstVideoState GstVideoState; typedef struct _GstVideoFrame GstVideoFrame; typedef struct _GstBaseVideoCodec GstBaseVideoCodec; @@ -86,6 +89,7 @@ typedef struct _GstBaseVideoCodecClass GstBaseVideoCodecClass; struct _GstVideoState { + GstCaps *caps; GstVideoFormat format; int width, height; int fps_n, fps_d; @@ -100,9 +104,6 @@ struct _GstVideoState int bytes_per_picture; - //GstSegment segment; - - int picture_number; GstBuffer *codec_data; }; @@ -128,9 +129,15 @@ struct _GstVideoFrame int n_fields; void *coder_hook; + GDestroyNotify coder_hook_destroy_notify; + GstClockTime deadline; gboolean force_keyframe; + + /* Events that should be pushed downstream *before* + * the next src_buffer */ + GList *events; }; struct _GstBaseVideoCodec @@ -141,16 +148,23 @@ struct _GstBaseVideoCodec GstPad *sinkpad; GstPad *srcpad; + /* protects all data processing, i.e. is locked + * in the chain function, finish_frame and when + * processing serialized events */ + GStaticRecMutex stream_lock; + guint64 system_frame_number; - GList *frames; + GList *frames; /* Protected with OBJECT_LOCK */ GstVideoState state; GstSegment segment; - GstCaps *caps; - gdouble proportion; GstClockTime earliest_time; + gboolean discont; + + gint64 bytes; + gint64 time; /* FIXME before moving to base */ void *padding[GST_PADDING_LARGE]; @@ -160,15 +174,6 @@ struct _GstBaseVideoCodecClass { GstElementClass element_class; - gboolean (*start) (GstBaseVideoCodec *codec); - gboolean (*stop) (GstBaseVideoCodec *codec); - gboolean (*reset) (GstBaseVideoCodec *codec); - GstFlowReturn (*parse_data) (GstBaseVideoCodec *codec, gboolean at_eos); - int (*scan_for_sync) (GstAdapter *adapter, gboolean at_eos, - int offset, int n); - GstFlowReturn (*shape_output) (GstBaseVideoCodec *codec, GstVideoFrame *frame); - GstCaps *(*get_caps) (GstBaseVideoCodec *codec); - /* FIXME before moving to base */ void *padding[GST_PADDING_LARGE]; }; @@ -178,17 +183,6 @@ GType gst_base_video_codec_get_type (void); GstVideoFrame * gst_base_video_codec_new_frame (GstBaseVideoCodec *base_video_codec); void gst_base_video_codec_free_frame (GstVideoFrame *frame); - -gboolean gst_base_video_rawvideo_convert (GstVideoState *state, - GstFormat src_format, gint64 src_value, - GstFormat * dest_format, gint64 *dest_value); -gboolean gst_base_video_encoded_video_convert (GstVideoState *state, - GstFormat src_format, gint64 src_value, - GstFormat * dest_format, gint64 *dest_value); - -GstClockTime gst_video_state_get_timestamp (const GstVideoState *state, - GstSegment *segment, int frame_number); - G_END_DECLS #endif diff --git a/gst-libs/gst/video/gstbasevideodecoder.c b/gst-libs/gst/video/gstbasevideodecoder.c index cf3910f..1b01ed7 100644 --- a/gst-libs/gst/video/gstbasevideodecoder.c +++ b/gst-libs/gst/video/gstbasevideodecoder.c @@ -1,5 +1,8 @@ /* GStreamer * Copyright (C) 2008 David Schleef <ds@schleef.org> + * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>. + * Copyright (C) 2011 Nokia Corporation. All rights reserved. + * Contact: Stefan Kost <stefan.kost@nokia.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public @@ -17,11 +20,115 @@ * Boston, MA 02111-1307, USA. */ +/** + * SECTION:gstbasevideodecoder + * @short_description: Base class for video decoders + * @see_also: #GstBaseTransform + * + * This base class is for video decoders turning encoded data into raw video + * frames. + * + * GstBaseVideoDecoder and subclass should cooperate as follows. + * <orderedlist> + * <listitem> + * <itemizedlist><title>Configuration</title> + * <listitem><para> + * Initially, GstBaseVideoDecoder calls @start when the decoder element + * is activated, which allows subclass to perform any global setup. + * </para></listitem> + * <listitem><para> + * GstBaseVideoDecoder calls @set_format to inform subclass of caps + * describing input video data that it is about to receive, including + * possibly configuration data. + * While unlikely, it might be called more than once, if changing input + * parameters require reconfiguration. + * </para></listitem> + * <listitem><para> + * GstBaseVideoDecoder calls @stop at end of all processing. + * </para></listitem> + * </itemizedlist> + * </listitem> + * <listitem> + * <itemizedlist> + * <title>Data processing</title> + * <listitem><para> + * Base class gathers input data, and optionally allows subclass + * to parse this into subsequently manageable chunks, typically + * corresponding to and referred to as 'frames'. + * </para></listitem> + * <listitem><para> + * Input frame is provided to subclass' @handle_frame. + * </para></listitem> + * <listitem><para> + * If codec processing results in decoded data, subclass should call + * @gst_base_video_decoder_finish_frame to have decoded data pushed + * downstream. + * </para></listitem> + * </itemizedlist> + * </listitem> + * <listitem> + * <itemizedlist><title>Shutdown phase</title> + * <listitem><para> + * GstBaseVideoDecoder class calls @stop to inform the subclass that data + * parsing will be stopped. + * </para></listitem> + * </itemizedlist> + * </listitem> + * </orderedlist> + * + * Subclass is responsible for providing pad template caps for + * source and sink pads. The pads need to be named "sink" and "src". It also + * needs to set the fixed caps on srcpad, when the format is ensured. This + * is typically when base class calls subclass' @set_format function, though + * it might be delayed until calling @gst_base_video_decoder_finish_frame. + * + * Subclass is also responsible for providing (presentation) timestamps + * (likely based on corresponding input ones). If that is not applicable + * or possible, baseclass provides limited framerate based interpolation. + * + * Similarly, the baseclass provides some limited (legacy) seeking support + * (upon explicit subclass request), as full-fledged support + * should rather be left to upstream demuxer, parser or alike. This simple + * approach caters for seeking and duration reporting using estimated input + * bitrates. + * + * Baseclass provides some support for reverse playback, in particular + * in case incoming data is not packetized or upstream does not provide + * fragments on keyframe boundaries. However, subclass should then be prepared + * for the parsing and frame processing stage to occur separately (rather + * than otherwise the latter immediately following the former), + * and should ensure the parsing stage properly marks keyframes or rely on + * upstream to do so properly for incoming data. + * + * Things that subclass need to take care of: + * <itemizedlist> + * <listitem><para>Provide pad templates</para></listitem> + * <listitem><para> + * Set source pad caps when appropriate + * </para></listitem> + * <listitem><para> + * Configure some baseclass behaviour parameters. + * </para></listitem> + * <listitem><para> + * Optionally parse input data, if it is not considered packetized. + * Parse sync is obtained either by providing baseclass with a + * mask and pattern or a custom @scan_for_sync. When sync is established, + * @parse_data should invoke @gst_base_video_decoder_add_to_frame and + * @gst_base_video_decoder_have_frame as appropriate. + * </para></listitem> + * <listitem><para> + * Accept data in @handle_frame and provide decoded results to + * @gst_base_video_decoder_finish_frame. + * </para></listitem> + * </itemizedlist> + */ + #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "gstbasevideodecoder.h" +#include "gstbasevideoutils.h" #include <string.h> @@ -40,18 +147,14 @@ static GstFlowReturn gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf); static gboolean gst_base_video_decoder_sink_query (GstPad * pad, GstQuery * query); -//static GstFlowReturn gst_base_video_decoder_process (GstBaseVideoDecoder *base_video_decoder); static GstStateChangeReturn gst_base_video_decoder_change_state (GstElement * element, GstStateChange transition); static const GstQueryType *gst_base_video_decoder_get_query_types (GstPad * pad); static gboolean gst_base_video_decoder_src_query (GstPad * pad, GstQuery * query); -static gboolean gst_base_video_decoder_src_convert (GstPad * pad, - GstFormat src_format, gint64 src_value, GstFormat * dest_format, - gint64 * dest_value); static void gst_base_video_decoder_reset (GstBaseVideoDecoder * - base_video_decoder); + base_video_decoder, gboolean full); static GstFlowReturn gst_base_video_decoder_have_frame_2 (GstBaseVideoDecoder * base_video_decoder); @@ -66,7 +169,8 @@ static guint64 gst_base_video_decoder_get_field_duration (GstBaseVideoDecoder * base_video_decoder, int n_fields); static GstVideoFrame *gst_base_video_decoder_new_frame (GstBaseVideoDecoder * base_video_decoder); -static void gst_base_video_decoder_free_frame (GstVideoFrame * frame); + +static void gst_base_video_decoder_clear_queues (GstBaseVideoDecoder * dec); GST_BOILERPLATE (GstBaseVideoDecoder, gst_base_video_decoder, GstBaseVideoCodec, GST_TYPE_BASE_VIDEO_CODEC); @@ -90,9 +194,8 @@ gst_base_video_decoder_class_init (GstBaseVideoDecoderClass * klass) gobject_class->finalize = gst_base_video_decoder_finalize; - gstelement_class->change_state = gst_base_video_decoder_change_state; - - parent_class = g_type_class_peek_parent (klass); + gstelement_class->change_state = + GST_DEBUG_FUNCPTR (gst_base_video_decoder_change_state); } static void @@ -101,74 +204,127 @@ gst_base_video_decoder_init (GstBaseVideoDecoder * base_video_decoder, { GstPad *pad; - GST_DEBUG ("gst_base_video_decoder_init"); + GST_DEBUG_OBJECT (base_video_decoder, "gst_base_video_decoder_init"); pad = GST_BASE_VIDEO_CODEC_SINK_PAD (base_video_decoder); - gst_pad_set_chain_function (pad, gst_base_video_decoder_chain); - gst_pad_set_event_function (pad, gst_base_video_decoder_sink_event); - gst_pad_set_setcaps_function (pad, gst_base_video_decoder_sink_setcaps); - gst_pad_set_query_function (pad, gst_base_video_decoder_sink_query); + gst_pad_set_chain_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_decoder_chain)); + gst_pad_set_event_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_decoder_sink_event)); + gst_pad_set_setcaps_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_decoder_sink_setcaps)); + gst_pad_set_query_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_decoder_sink_query)); pad = GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder); - gst_pad_set_event_function (pad, gst_base_video_decoder_src_event); - gst_pad_set_query_type_function (pad, gst_base_video_decoder_get_query_types); - gst_pad_set_query_function (pad, gst_base_video_decoder_src_query); + gst_pad_set_event_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_decoder_src_event)); + gst_pad_set_query_type_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_decoder_get_query_types)); + gst_pad_set_query_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_decoder_src_query)); gst_pad_use_fixed_caps (pad); base_video_decoder->input_adapter = gst_adapter_new (); base_video_decoder->output_adapter = gst_adapter_new (); - gst_base_video_decoder_reset (base_video_decoder); - - base_video_decoder->current_frame = - gst_base_video_decoder_new_frame (base_video_decoder); + gst_base_video_decoder_reset (base_video_decoder, TRUE); base_video_decoder->sink_clipping = TRUE; } static gboolean +gst_base_video_decoder_push_src_event (GstBaseVideoDecoder * decoder, + GstEvent * event) +{ + /* Forward non-serialized events and EOS/FLUSH_STOP immediately. + * For EOS this is required because no buffer or serialized event + * will come after EOS and nothing could trigger another + * _finish_frame() call. * + * If the subclass handles sending of EOS manually it can return + * _DROPPED from ::finish() and all other subclasses should have + * decoded/flushed all remaining data before this + * + * For FLUSH_STOP this is required because it is expected + * to be forwarded immediately and no buffers are queued anyway. + */ + if (!GST_EVENT_IS_SERIALIZED (event) + || GST_EVENT_TYPE (event) == GST_EVENT_EOS + || GST_EVENT_TYPE (event) == GST_EVENT_FLUSH_STOP) + return gst_pad_push_event (decoder->base_video_codec.srcpad, event); + + GST_BASE_VIDEO_CODEC_STREAM_LOCK (decoder); + decoder->current_frame_events = + g_list_prepend (decoder->current_frame_events, event); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (decoder); + + return TRUE; +} + +static gboolean gst_base_video_decoder_sink_setcaps (GstPad * pad, GstCaps * caps) { GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *base_video_decoder_class; GstStructure *structure; const GValue *codec_data; - GstVideoState *state; + GstVideoState state; gboolean ret = TRUE; base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); - GST_DEBUG ("setcaps %" GST_PTR_FORMAT, caps); + GST_DEBUG_OBJECT (base_video_decoder, "setcaps %" GST_PTR_FORMAT, caps); - state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); - if (state->codec_data) { - gst_buffer_unref (state->codec_data); - } - memset (state, 0, sizeof (GstVideoState)); + memset (&state, 0, sizeof (state)); + + state.caps = gst_caps_ref (caps); structure = gst_caps_get_structure (caps, 0); - gst_video_format_parse_caps (caps, NULL, &state->width, &state->height); - gst_video_parse_caps_framerate (caps, &state->fps_n, &state->fps_d); - gst_video_parse_caps_pixel_aspect_ratio (caps, &state->par_n, &state->par_d); + gst_video_format_parse_caps (caps, NULL, &state.width, &state.height); + /* this one fails if no framerate in caps */ + if (!gst_video_parse_caps_framerate (caps, &state.fps_n, &state.fps_d)) { + state.fps_n = 0; + state.fps_d = 1; + } + /* but the p-a-r sets 1/1 instead, which is not quite informative ... */ + if (!gst_structure_has_field (structure, "pixel-aspect-ratio") || + !gst_video_parse_caps_pixel_aspect_ratio (caps, + &state.par_n, &state.par_d)) { + state.par_n = 0; + state.par_d = 1; + } - state->have_interlaced = - gst_video_format_parse_caps_interlaced (caps, &state->interlaced); + state.have_interlaced = + gst_video_format_parse_caps_interlaced (caps, &state.interlaced); codec_data = gst_structure_get_value (structure, "codec_data"); if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER) { - state->codec_data = gst_value_get_buffer (codec_data); + state.codec_data = GST_BUFFER (gst_value_dup_mini_object (codec_data)); } - if (base_video_decoder_class->start) { - ret = base_video_decoder_class->start (base_video_decoder); + if (base_video_decoder_class->set_format) { + ret = base_video_decoder_class->set_format (base_video_decoder, &state); } + if (ret) { + gst_buffer_replace (&GST_BASE_VIDEO_CODEC (base_video_decoder)-> + state.codec_data, NULL); + gst_caps_replace (&GST_BASE_VIDEO_CODEC (base_video_decoder)->state.caps, + NULL); + GST_BASE_VIDEO_CODEC (base_video_decoder)->state = state; + } else { + gst_buffer_replace (&state.codec_data, NULL); + gst_caps_replace (&state.caps, NULL); + } + + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); g_object_unref (base_video_decoder); return ret; @@ -181,7 +337,7 @@ gst_base_video_decoder_finalize (GObject * object) base_video_decoder = GST_BASE_VIDEO_DECODER (object); - gst_base_video_decoder_reset (base_video_decoder); + GST_DEBUG_OBJECT (object, "finalize"); if (base_video_decoder->input_adapter) { g_object_unref (base_video_decoder->input_adapter); @@ -192,11 +348,46 @@ gst_base_video_decoder_finalize (GObject * object) base_video_decoder->output_adapter = NULL; } - GST_DEBUG_OBJECT (object, "finalize"); - G_OBJECT_CLASS (parent_class)->finalize (object); } +/* hard == FLUSH, otherwise discont */ +static GstFlowReturn +gst_base_video_decoder_flush (GstBaseVideoDecoder * dec, gboolean hard) +{ + GstBaseVideoDecoderClass *klass; + GstFlowReturn ret = GST_FLOW_OK; + + klass = GST_BASE_VIDEO_DECODER_GET_CLASS (dec); + + GST_LOG_OBJECT (dec, "flush hard %d", hard); + + /* Inform subclass */ + /* FIXME ? only if hard, or tell it if hard ? */ + if (klass->reset) + klass->reset (dec); + + /* FIXME make some more distinction between hard and soft, + * but subclass may not be prepared for that */ + /* FIXME perhaps also clear pending frames ?, + * but again, subclass may still come up with one of those */ + if (!hard) { + /* TODO ? finish/drain some stuff */ + } else { + gst_segment_init (&GST_BASE_VIDEO_CODEC (dec)->segment, + GST_FORMAT_UNDEFINED); + gst_base_video_decoder_clear_queues (dec); + dec->error_count = 0; + g_list_foreach (dec->current_frame_events, (GFunc) gst_event_unref, NULL); + g_list_free (dec->current_frame_events); + dec->current_frame_events = NULL; + } + /* and get (re)set for the sequel */ + gst_base_video_decoder_reset (dec, FALSE); + + return ret; +} + static gboolean gst_base_video_decoder_sink_event (GstPad * pad, GstEvent * event) { @@ -208,12 +399,17 @@ gst_base_video_decoder_sink_event (GstPad * pad, GstEvent * event) base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); + GST_DEBUG_OBJECT (base_video_decoder, + "received event %d, %s", GST_EVENT_TYPE (event), + GST_EVENT_TYPE_NAME (event)); + switch (GST_EVENT_TYPE (event)) { case GST_EVENT_EOS: { - if (!base_video_decoder->packetized) { - GstFlowReturn flow_ret; + GstFlowReturn flow_ret; + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); + if (!base_video_decoder->packetized) { do { flow_ret = base_video_decoder_class->parse_data (base_video_decoder, TRUE); @@ -221,69 +417,91 @@ gst_base_video_decoder_sink_event (GstPad * pad, GstEvent * event) } if (base_video_decoder_class->finish) { - base_video_decoder_class->finish (base_video_decoder); + flow_ret = base_video_decoder_class->finish (base_video_decoder); + } else { + flow_ret = GST_FLOW_OK; } - ret = - gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), - event); - } + if (flow_ret == GST_FLOW_OK) + ret = gst_base_video_decoder_push_src_event (base_video_decoder, event); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); break; + } case GST_EVENT_NEWSEGMENT: { gboolean update; - double rate; - double applied_rate; + double rate, arate; GstFormat format; gint64 start; gint64 stop; - gint64 position; + gint64 pos; GstSegment *segment = &GST_BASE_VIDEO_CODEC (base_video_decoder)->segment; + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); gst_event_parse_new_segment_full (event, &update, &rate, - &applied_rate, &format, &start, &stop, &position); - - if (format != GST_FORMAT_TIME) - goto newseg_wrong_format; + &arate, &format, &start, &stop, &pos); + + if (format == GST_FORMAT_TIME) { + GST_DEBUG_OBJECT (base_video_decoder, + "received TIME NEW_SEGMENT %" GST_TIME_FORMAT + " -- %" GST_TIME_FORMAT ", pos %" GST_TIME_FORMAT + ", rate %g, applied_rate %g", + GST_TIME_ARGS (start), GST_TIME_ARGS (stop), GST_TIME_ARGS (pos), + rate, arate); + } else { + GstFormat dformat = GST_FORMAT_TIME; + + GST_DEBUG_OBJECT (base_video_decoder, + "received NEW_SEGMENT %" G_GINT64_FORMAT + " -- %" G_GINT64_FORMAT ", time %" G_GINT64_FORMAT + ", rate %g, applied_rate %g", start, stop, pos, rate, arate); + /* handle newsegment as a result from our legacy simple seeking */ + /* note that initial 0 should convert to 0 in any case */ + if (base_video_decoder->do_byte_time && + gst_pad_query_convert (GST_BASE_VIDEO_CODEC_SINK_PAD + (base_video_decoder), GST_FORMAT_BYTES, start, &dformat, + &start)) { + /* best attempt convert */ + /* as these are only estimates, stop is kept open-ended to avoid + * premature cutting */ + GST_DEBUG_OBJECT (base_video_decoder, + "converted to TIME start %" GST_TIME_FORMAT, + GST_TIME_ARGS (start)); + pos = start; + stop = GST_CLOCK_TIME_NONE; + /* replace event */ + gst_event_unref (event); + event = gst_event_new_new_segment_full (update, rate, arate, + GST_FORMAT_TIME, start, stop, pos); + } else { + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); + goto newseg_wrong_format; + } + } if (!update) { - gst_base_video_decoder_reset (base_video_decoder); + gst_base_video_decoder_flush (base_video_decoder, FALSE); } base_video_decoder->timestamp_offset = start; gst_segment_set_newsegment_full (segment, - update, rate, applied_rate, format, start, stop, position); - base_video_decoder->have_segment = TRUE; + update, rate, arate, format, start, stop, pos); - GST_DEBUG_OBJECT (base_video_decoder, - "new segment: format %d rate %g start %" GST_TIME_FORMAT - " stop %" GST_TIME_FORMAT - " position %" GST_TIME_FORMAT - " update %d", - format, rate, - GST_TIME_ARGS (segment->start), - GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time), update); - - ret = - gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), - event); - } + ret = gst_base_video_decoder_push_src_event (base_video_decoder, event); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); break; - case GST_EVENT_FLUSH_STOP:{ - GST_OBJECT_LOCK (base_video_decoder); - GST_BASE_VIDEO_CODEC (base_video_decoder)->earliest_time = - GST_CLOCK_TIME_NONE; - GST_BASE_VIDEO_CODEC (base_video_decoder)->proportion = 0.5; - gst_segment_init (&GST_BASE_VIDEO_CODEC (base_video_decoder)->segment, - GST_FORMAT_TIME); - GST_OBJECT_UNLOCK (base_video_decoder); + } + case GST_EVENT_FLUSH_STOP: + { + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); + /* well, this is kind of worse than a DISCONT */ + gst_base_video_decoder_flush (base_video_decoder, TRUE); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); } default: /* FIXME this changes the order of events */ - ret = - gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), - event); + ret = gst_base_video_decoder_push_src_event (base_video_decoder, event); break; } @@ -299,6 +517,76 @@ newseg_wrong_format: } } +/* perform upstream byte <-> time conversion (duration, seeking) + * if subclass allows and if enough data for moderately decent conversion */ +static inline gboolean +gst_base_video_decoder_do_byte (GstBaseVideoDecoder * dec) +{ + GstBaseVideoCodec *codec = GST_BASE_VIDEO_CODEC (dec); + + return dec->do_byte_time && (codec->bytes > 0) && (codec->time > GST_SECOND); +} + +static gboolean +gst_base_video_decoder_do_seek (GstBaseVideoDecoder * dec, GstEvent * event) +{ + GstBaseVideoCodec *codec = GST_BASE_VIDEO_CODEC (dec); + GstSeekFlags flags; + GstSeekType start_type, end_type; + GstFormat format; + gdouble rate; + gint64 start, start_time, end_time; + GstSegment seek_segment; + guint32 seqnum; + + gst_event_parse_seek (event, &rate, &format, &flags, &start_type, + &start_time, &end_type, &end_time); + + /* we'll handle plain open-ended flushing seeks with the simple approach */ + if (rate != 1.0) { + GST_DEBUG_OBJECT (dec, "unsupported seek: rate"); + return FALSE; + } + + if (start_type != GST_SEEK_TYPE_SET) { + GST_DEBUG_OBJECT (dec, "unsupported seek: start time"); + return FALSE; + } + + if (end_type != GST_SEEK_TYPE_NONE || + (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) { + GST_DEBUG_OBJECT (dec, "unsupported seek: end time"); + return FALSE; + } + + if (!(flags & GST_SEEK_FLAG_FLUSH)) { + GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing"); + return FALSE; + } + + memcpy (&seek_segment, &codec->segment, sizeof (seek_segment)); + gst_segment_set_seek (&seek_segment, rate, format, flags, start_type, + start_time, end_type, end_time, NULL); + start_time = seek_segment.last_stop; + + format = GST_FORMAT_BYTES; + if (!gst_pad_query_convert (codec->sinkpad, GST_FORMAT_TIME, start_time, + &format, &start)) { + GST_DEBUG_OBJECT (dec, "conversion failed"); + return FALSE; + } + + seqnum = gst_event_get_seqnum (event); + event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags, + GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1); + gst_event_set_seqnum (event, seqnum); + + GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %" + G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start); + + return gst_pad_push_event (codec->sinkpad, event); +} + static gboolean gst_base_video_decoder_src_event (GstPad * pad, GstEvent * event) { @@ -307,42 +595,54 @@ gst_base_video_decoder_src_event (GstPad * pad, GstEvent * event) base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); + GST_DEBUG_OBJECT (base_video_decoder, + "received event %d, %s", GST_EVENT_TYPE (event), + GST_EVENT_TYPE_NAME (event)); + switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEEK: { GstFormat format, tformat; gdouble rate; - GstEvent *real_seek; GstSeekFlags flags; GstSeekType cur_type, stop_type; gint64 cur, stop; - gint64 tcur = -1, tstop = -1; - - GST_DEBUG ("seek event"); - - gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, - &cur, &stop_type, &stop); - gst_event_unref (event); + gint64 tcur, tstop; + guint32 seqnum; + + gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur, + &stop_type, &stop); + seqnum = gst_event_get_seqnum (event); + + /* upstream gets a chance first */ + if ((res = + gst_pad_push_event (GST_BASE_VIDEO_CODEC_SINK_PAD + (base_video_decoder), event))) + break; + + /* if upstream fails for a time seek, maybe we can help if allowed */ + if (format == GST_FORMAT_TIME) { + if (gst_base_video_decoder_do_byte (base_video_decoder)) + res = gst_base_video_decoder_do_seek (base_video_decoder, event); + break; + } + /* ... though a non-time seek can be aided as well */ + /* First bring the requested format to time */ tformat = GST_FORMAT_TIME; - res = - gst_base_video_decoder_src_convert (pad, format, cur, &tformat, - &tcur); - if (!res) + if (!(res = gst_pad_query_convert (pad, format, cur, &tformat, &tcur))) goto convert_error; - res = - gst_base_video_decoder_src_convert (pad, format, stop, &tformat, - &tstop); - if (!res) + if (!(res = gst_pad_query_convert (pad, format, stop, &tformat, &tstop))) goto convert_error; - real_seek = gst_event_new_seek (rate, GST_FORMAT_TIME, + /* then seek with time on the peer */ + event = gst_event_new_seek (rate, GST_FORMAT_TIME, flags, cur_type, tcur, stop_type, tstop); + gst_event_set_seqnum (event, seqnum); res = gst_pad_push_event (GST_BASE_VIDEO_CODEC_SINK_PAD - (base_video_decoder), real_seek); - + (base_video_decoder), event); break; } case GST_EVENT_QOS: @@ -401,117 +701,6 @@ convert_error: goto done; } - -#if 0 -static gboolean -gst_base_video_decoder_sink_convert (GstPad * pad, - GstFormat src_format, gint64 src_value, - GstFormat * dest_format, gint64 * dest_value) -{ - gboolean res = TRUE; - GstBaseVideoDecoder *enc; - - if (src_format == *dest_format) { - *dest_value = src_value; - return TRUE; - } - - enc = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); - - /* FIXME: check if we are in a decoding state */ - - switch (src_format) { - case GST_FORMAT_BYTES: - switch (*dest_format) { -#if 0 - case GST_FORMAT_DEFAULT: - *dest_value = gst_util_uint64_scale_int (src_value, 1, - enc->bytes_per_picture); - break; -#endif - case GST_FORMAT_TIME: - /* seems like a rather silly conversion, implement me if you like */ - default: - res = FALSE; - } - break; - case GST_FORMAT_DEFAULT: - switch (*dest_format) { - case GST_FORMAT_TIME: - *dest_value = gst_util_uint64_scale (src_value, - GST_SECOND * enc->fps_d, enc->fps_n); - break; -#if 0 - case GST_FORMAT_BYTES: - *dest_value = gst_util_uint64_scale_int (src_value, - enc->bytes_per_picture, 1); - break; -#endif - default: - res = FALSE; - } - break; - default: - res = FALSE; - break; - } -} -#endif - -static gboolean -gst_base_video_decoder_src_convert (GstPad * pad, - GstFormat src_format, gint64 src_value, - GstFormat * dest_format, gint64 * dest_value) -{ - gboolean res = TRUE; - GstBaseVideoDecoder *enc; - - if (src_format == *dest_format) { - *dest_value = src_value; - return TRUE; - } - - enc = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); - - /* FIXME: check if we are in a encoding state */ - - GST_DEBUG ("src convert"); - switch (src_format) { -#if 0 - case GST_FORMAT_DEFAULT: - switch (*dest_format) { - case GST_FORMAT_TIME: - *dest_value = gst_util_uint64_scale (granulepos_to_frame (src_value), - enc->fps_d * GST_SECOND, enc->fps_n); - break; - default: - res = FALSE; - } - break; - case GST_FORMAT_TIME: - switch (*dest_format) { - case GST_FORMAT_DEFAULT: - { - *dest_value = gst_util_uint64_scale (src_value, - enc->fps_n, enc->fps_d * GST_SECOND); - break; - } - default: - res = FALSE; - break; - } - break; -#endif - default: - res = FALSE; - break; - } - - gst_object_unref (enc); - - return res; -} - static const GstQueryType * gst_base_video_decoder_get_query_types (GstPad * pad) { @@ -528,39 +717,74 @@ gst_base_video_decoder_get_query_types (GstPad * pad) static gboolean gst_base_video_decoder_src_query (GstPad * pad, GstQuery * query) { - GstBaseVideoDecoder *enc; + GstBaseVideoDecoder *dec; gboolean res = TRUE; - enc = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); + dec = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); - switch GST_QUERY_TYPE - (query) { + GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query); + + switch (GST_QUERY_TYPE (query)) { case GST_QUERY_POSITION: { GstFormat format; - gint64 time; - - gst_query_parse_position (query, &format, NULL); - GST_DEBUG ("query in format %d", format); - - if (format != GST_FORMAT_TIME) { - goto error; + gint64 time, value; + + /* upstream gets a chance first */ + if ((res = + gst_pad_peer_query (GST_BASE_VIDEO_CODEC_SINK_PAD (dec), + query))) { + GST_LOG_OBJECT (dec, "returning peer response"); + break; } - time = enc->last_timestamp; - time = - gst_segment_to_stream_time (&GST_BASE_VIDEO_CODEC (enc)->segment, + /* we start from the last seen time */ + time = dec->last_timestamp; + /* correct for the segment values */ + time = gst_segment_to_stream_time (&GST_BASE_VIDEO_CODEC (dec)->segment, GST_FORMAT_TIME, time); - gst_query_set_position (query, format, time); + GST_LOG_OBJECT (dec, + "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time)); - res = TRUE; + /* and convert to the final format */ + gst_query_parse_position (query, &format, NULL); + if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time, + &format, &value))) + break; + + gst_query_set_position (query, format, value); + GST_LOG_OBJECT (dec, + "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value, + format); break; } case GST_QUERY_DURATION: { - res = gst_pad_peer_query (enc->base_video_codec.sinkpad, query); + GstFormat format; + + /* upstream in any case */ + if ((res = gst_pad_query_default (pad, query))) + break; + + gst_query_parse_duration (query, &format, NULL); + /* try answering TIME by converting from BYTE if subclass allows */ + if (format == GST_FORMAT_TIME && gst_base_video_decoder_do_byte (dec)) { + gint64 value; + + format = GST_FORMAT_BYTES; + if (gst_pad_query_peer_duration (GST_BASE_VIDEO_CODEC_SINK_PAD (dec), + &format, &value)) { + GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value); + format = GST_FORMAT_TIME; + if (gst_pad_query_convert (GST_BASE_VIDEO_CODEC_SINK_PAD (dec), + GST_FORMAT_BYTES, value, &format, &value)) { + gst_query_set_duration (query, GST_FORMAT_TIME, value); + res = TRUE; + } + } + } break; } case GST_QUERY_CONVERT: @@ -568,12 +792,11 @@ gst_base_video_decoder_src_query (GstPad * pad, GstQuery * query) GstFormat src_fmt, dest_fmt; gint64 src_val, dest_val; - GST_DEBUG ("convert query"); + GST_DEBUG_OBJECT (dec, "convert query"); gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - res = - gst_base_video_decoder_src_convert (pad, src_fmt, src_val, &dest_fmt, - &dest_val); + res = gst_base_video_rawvideo_convert (&GST_BASE_VIDEO_CODEC (dec)->state, + src_fmt, src_val, &dest_fmt, &dest_val); if (!res) goto error; gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); @@ -581,13 +804,13 @@ gst_base_video_decoder_src_query (GstPad * pad, GstQuery * query) } default: res = gst_pad_query_default (pad, query); - } - gst_object_unref (enc); + } + gst_object_unref (dec); return res; error: - GST_ERROR_OBJECT (enc, "query failed"); - gst_object_unref (enc); + GST_ERROR_OBJECT (dec, "query failed"); + gst_object_unref (dec); return res; } @@ -599,19 +822,19 @@ gst_base_video_decoder_sink_query (GstPad * pad, GstQuery * query) base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); - GST_DEBUG_OBJECT (base_video_decoder, "sink query fps=%d/%d", - GST_BASE_VIDEO_CODEC (base_video_decoder)->state.fps_n, - GST_BASE_VIDEO_CODEC (base_video_decoder)->state.fps_d); + GST_LOG_OBJECT (base_video_decoder, "handling query: %" GST_PTR_FORMAT, + query); + switch (GST_QUERY_TYPE (query)) { case GST_QUERY_CONVERT: { + GstBaseVideoCodec *codec = GST_BASE_VIDEO_CODEC (base_video_decoder); GstFormat src_fmt, dest_fmt; gint64 src_val, dest_val; gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - res = - gst_base_video_rawvideo_convert (&GST_BASE_VIDEO_CODEC - (base_video_decoder)->state, src_fmt, src_val, &dest_fmt, &dest_val); + res = gst_base_video_encoded_video_convert (&codec->state, codec->bytes, + codec->time, src_fmt, src_val, &dest_fmt, &dest_val); if (!res) goto error; gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); @@ -630,25 +853,6 @@ error: goto done; } - -#if 0 -static gboolean -gst_pad_is_negotiated (GstPad * pad) -{ - GstCaps *caps; - - g_return_val_if_fail (pad != NULL, FALSE); - - caps = gst_pad_get_negotiated_caps (pad); - if (caps) { - gst_caps_unref (caps); - return TRUE; - } - - return FALSE; -} -#endif - typedef struct _Timestamp Timestamp; struct _Timestamp { @@ -665,7 +869,8 @@ gst_base_video_decoder_add_timestamp (GstBaseVideoDecoder * base_video_decoder, ts = g_malloc (sizeof (Timestamp)); - GST_DEBUG ("adding timestamp %" GST_TIME_FORMAT " %" GST_TIME_FORMAT, + GST_LOG_OBJECT (base_video_decoder, + "adding timestamp %" GST_TIME_FORMAT " %" GST_TIME_FORMAT, GST_TIME_ARGS (base_video_decoder->input_offset), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer))); @@ -703,120 +908,89 @@ gst_base_video_decoder_get_timestamp_at_offset (GstBaseVideoDecoder * } } - GST_DEBUG ("got timestamp %" GST_TIME_FORMAT " %" GST_TIME_FORMAT, + GST_LOG_OBJECT (base_video_decoder, + "got timestamp %" GST_TIME_FORMAT " %" GST_TIME_FORMAT, GST_TIME_ARGS (offset), GST_TIME_ARGS (*timestamp)); } static void -gst_base_video_decoder_reset (GstBaseVideoDecoder * base_video_decoder) +gst_base_video_decoder_clear_queues (GstBaseVideoDecoder * dec) { - GstBaseVideoDecoderClass *base_video_decoder_class; + g_list_foreach (dec->queued, (GFunc) gst_mini_object_unref, NULL); + g_list_free (dec->queued); + dec->queued = NULL; + g_list_foreach (dec->gather, (GFunc) gst_mini_object_unref, NULL); + g_list_free (dec->gather); + dec->gather = NULL; + g_list_foreach (dec->decode, (GFunc) gst_base_video_codec_free_frame, NULL); + g_list_free (dec->decode); + dec->decode = NULL; + g_list_foreach (dec->parse, (GFunc) gst_mini_object_unref, NULL); + g_list_free (dec->parse); + dec->parse = NULL; + g_list_foreach (dec->parse_gather, (GFunc) gst_base_video_codec_free_frame, + NULL); + g_list_free (dec->parse_gather); + dec->parse_gather = NULL; +} - base_video_decoder_class = - GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); +static void +gst_base_video_decoder_reset (GstBaseVideoDecoder * base_video_decoder, + gboolean full) +{ + GST_DEBUG_OBJECT (base_video_decoder, "reset full %d", full); - GST_DEBUG ("reset"); + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); - base_video_decoder->started = FALSE; + if (full) { + gst_segment_init (&GST_BASE_VIDEO_CODEC (base_video_decoder)->segment, + GST_FORMAT_UNDEFINED); + gst_base_video_decoder_clear_queues (base_video_decoder); + base_video_decoder->error_count = 0; + } - base_video_decoder->discont = TRUE; + GST_BASE_VIDEO_CODEC (base_video_decoder)->discont = TRUE; base_video_decoder->have_sync = FALSE; base_video_decoder->timestamp_offset = GST_CLOCK_TIME_NONE; - GST_BASE_VIDEO_CODEC (base_video_decoder)->system_frame_number = 0; - base_video_decoder->presentation_frame_number = 0; - base_video_decoder->base_picture_number = 0; + base_video_decoder->field_index = 0; base_video_decoder->last_timestamp = GST_CLOCK_TIME_NONE; base_video_decoder->input_offset = 0; base_video_decoder->frame_offset = 0; - - /* This function could be called from finalize() */ - if (base_video_decoder->input_adapter) { - gst_adapter_clear (base_video_decoder->input_adapter); - } - if (base_video_decoder->output_adapter) { - gst_adapter_clear (base_video_decoder->output_adapter); - } - //gst_segment_init (&base_video_decoder->segment, GST_FORMAT_TIME); + gst_adapter_clear (base_video_decoder->input_adapter); + gst_adapter_clear (base_video_decoder->output_adapter); + g_list_foreach (base_video_decoder->timestamps, (GFunc) g_free, NULL); + g_list_free (base_video_decoder->timestamps); + base_video_decoder->timestamps = NULL; if (base_video_decoder->current_frame) { - gst_base_video_decoder_free_frame (base_video_decoder->current_frame); + gst_base_video_codec_free_frame (base_video_decoder->current_frame); base_video_decoder->current_frame = NULL; } + GST_BASE_VIDEO_CODEC (base_video_decoder)->system_frame_number = 0; + base_video_decoder->base_picture_number = 0; + GST_OBJECT_LOCK (base_video_decoder); GST_BASE_VIDEO_CODEC (base_video_decoder)->earliest_time = GST_CLOCK_TIME_NONE; GST_BASE_VIDEO_CODEC (base_video_decoder)->proportion = 0.5; GST_OBJECT_UNLOCK (base_video_decoder); - - if (base_video_decoder_class->reset) { - base_video_decoder_class->reset (base_video_decoder); - } + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); } static GstFlowReturn -gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf) +gst_base_video_decoder_chain_forward (GstBaseVideoDecoder * base_video_decoder, + GstBuffer * buf) { - GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *klass; GstFlowReturn ret; - GST_DEBUG ("chain %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT " size %d", - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), - GST_TIME_ARGS (GST_BUFFER_DURATION (buf)), GST_BUFFER_SIZE (buf)); - -#if 0 - /* requiring the pad to be negotiated makes it impossible to use - * oggdemux or filesrc ! decoder */ - if (!gst_pad_is_negotiated (pad)) { - GST_DEBUG ("not negotiated"); - return GST_FLOW_NOT_NEGOTIATED; - } -#endif - - base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); klass = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); - GST_DEBUG_OBJECT (base_video_decoder, "chain"); - - if (!base_video_decoder->have_segment) { - GstEvent *event; - GstFlowReturn ret; - - GST_WARNING_OBJECT (base_video_decoder, - "Received buffer without a new-segment. Assuming timestamps start from 0."); - - gst_segment_set_newsegment_full (&GST_BASE_VIDEO_CODEC - (base_video_decoder)->segment, FALSE, 1.0, 1.0, GST_FORMAT_TIME, 0, - GST_CLOCK_TIME_NONE, 0); - base_video_decoder->have_segment = TRUE; - - event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, - GST_CLOCK_TIME_NONE, 0); - - ret = - gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), - event); - if (!ret) { -#if 0 - /* Other base classes tend to ignore the return value */ - GST_ERROR ("new segment event ret=%d", ret); - return GST_FLOW_ERROR; -#endif - } - } - - if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) { - GST_DEBUG_OBJECT (base_video_decoder, "received DISCONT buffer"); - gst_base_video_decoder_reset (base_video_decoder); - } - - if (!base_video_decoder->started) { - klass->start (base_video_decoder); - base_video_decoder->started = TRUE; - } + g_return_val_if_fail (base_video_decoder->packetized || klass->parse_data, + GST_FLOW_ERROR); if (base_video_decoder->current_frame == NULL) { base_video_decoder->current_frame = @@ -828,18 +1002,12 @@ gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf) } base_video_decoder->input_offset += GST_BUFFER_SIZE (buf); -#if 0 - if (base_video_decoder->timestamp_offset == GST_CLOCK_TIME_NONE && - GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE) { - GST_DEBUG ("got new offset %" GST_TIME_FORMAT, - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf))); - base_video_decoder->timestamp_offset = GST_BUFFER_TIMESTAMP (buf); - } -#endif - if (base_video_decoder->packetized) { base_video_decoder->current_frame->sink_buffer = buf; + if (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) + base_video_decoder->current_frame->is_sync_point = TRUE; + ret = gst_base_video_decoder_have_frame_2 (base_video_decoder); } else { @@ -848,7 +1016,7 @@ gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf) if (!base_video_decoder->have_sync) { int n, m; - GST_DEBUG ("no sync, scanning"); + GST_DEBUG_OBJECT (base_video_decoder, "no sync, scanning"); n = gst_adapter_available (base_video_decoder->input_adapter); if (klass->capture_mask != 0) { @@ -860,21 +1028,22 @@ gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf) m = 0; } if (m == -1) { - GST_ERROR ("scan returned no sync"); + GST_ERROR_OBJECT (base_video_decoder, "scan returned no sync"); gst_adapter_flush (base_video_decoder->input_adapter, n - 3); - gst_object_unref (base_video_decoder); return GST_FLOW_OK; } else { if (m > 0) { if (m >= n) { - GST_ERROR ("subclass scanned past end %d >= %d", m, n); + GST_ERROR_OBJECT (base_video_decoder, + "subclass scanned past end %d >= %d", m, n); } gst_adapter_flush (base_video_decoder->input_adapter, m); if (m < n) { - GST_DEBUG ("found possible sync after %d bytes (of %d)", m, n); + GST_DEBUG_OBJECT (base_video_decoder, + "found possible sync after %d bytes (of %d)", m, n); /* this is only "maybe" sync */ base_video_decoder->have_sync = TRUE; @@ -889,12 +1058,241 @@ gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf) } while (ret == GST_FLOW_OK); if (ret == GST_BASE_VIDEO_DECODER_FLOW_NEED_DATA) { - gst_object_unref (base_video_decoder); return GST_FLOW_OK; } } - gst_object_unref (base_video_decoder); + return ret; +} + +static GstFlowReturn +gst_base_video_decoder_flush_decode (GstBaseVideoDecoder * dec) +{ + GstFlowReturn res = GST_FLOW_OK; + GList *walk; + + walk = dec->decode; + + GST_DEBUG_OBJECT (dec, "flushing buffers to decode"); + + /* clear buffer and decoder state */ + gst_base_video_decoder_flush (dec, FALSE); + + /* signal have_frame it should not capture frames */ + dec->process = TRUE; + + while (walk) { + GList *next; + GstVideoFrame *frame = (GstVideoFrame *) (walk->data); + GstBuffer *buf = frame->sink_buffer; + + GST_DEBUG_OBJECT (dec, "decoding frame %p, ts %" GST_TIME_FORMAT, + buf, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf))); + + next = g_list_next (walk); + if (dec->current_frame) + gst_base_video_codec_free_frame (dec->current_frame); + dec->current_frame = frame; + /* decode buffer, resulting data prepended to queue */ + res = gst_base_video_decoder_have_frame_2 (dec); + + walk = next; + } + + dec->process = FALSE; + + return res; +} + +static GstFlowReturn +gst_base_video_decoder_flush_parse (GstBaseVideoDecoder * dec) +{ + GstFlowReturn res = GST_FLOW_OK; + GList *walk; + + walk = dec->parse; + + GST_DEBUG_OBJECT (dec, "flushing buffers to parsing"); + + /* clear buffer and decoder state */ + gst_base_video_decoder_flush (dec, FALSE); + + while (walk) { + GList *next; + GstBuffer *buf = GST_BUFFER_CAST (walk->data); + + GST_DEBUG_OBJECT (dec, "parsing buffer %p, ts %" GST_TIME_FORMAT, + buf, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf))); + + next = g_list_next (walk); + /* parse buffer, resulting frames prepended to parse_gather queue */ + gst_buffer_ref (buf); + res = gst_base_video_decoder_chain_forward (dec, buf); + + /* if we generated output, we can discard the buffer, else we + * keep it in the queue */ + if (dec->parse_gather) { + GST_DEBUG_OBJECT (dec, "parsed buffer to %p", dec->parse_gather->data); + dec->parse = g_list_delete_link (dec->parse, walk); + gst_buffer_unref (buf); + } else { + GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping"); + } + walk = next; + } + + /* now we can process frames */ + GST_DEBUG_OBJECT (dec, "checking frames"); + while (dec->parse_gather) { + GstVideoFrame *frame; + + frame = (GstVideoFrame *) (dec->parse_gather->data); + /* remove from the gather list */ + dec->parse_gather = + g_list_delete_link (dec->parse_gather, dec->parse_gather); + /* copy to decode queue */ + dec->decode = g_list_prepend (dec->decode, frame); + + /* if we copied a keyframe, flush and decode the decode queue */ + if (frame->is_sync_point) { + GST_DEBUG_OBJECT (dec, "copied keyframe"); + res = gst_base_video_decoder_flush_decode (dec); + } + } + + /* now send queued data downstream */ + while (dec->queued) { + GstBuffer *buf = GST_BUFFER_CAST (dec->queued->data); + + if (G_LIKELY (res == GST_FLOW_OK)) { + GST_DEBUG_OBJECT (dec, "pushing buffer %p of size %u, " + "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf, + GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), + GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); + /* should be already, but let's be sure */ + buf = gst_buffer_make_metadata_writable (buf); + /* avoid stray DISCONT from forward processing, + * which have no meaning in reverse pushing */ + GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT); + res = gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (dec), buf); + } else { + gst_buffer_unref (buf); + } + + dec->queued = g_list_delete_link (dec->queued, dec->queued); + } + + return res; +} + +static GstFlowReturn +gst_base_video_decoder_chain_reverse (GstBaseVideoDecoder * dec, + GstBuffer * buf) +{ + GstFlowReturn result = GST_FLOW_OK; + + /* if we have a discont, move buffers to the decode list */ + if (!buf || GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT)) { + GST_DEBUG_OBJECT (dec, "received discont"); + while (dec->gather) { + GstBuffer *gbuf; + + gbuf = GST_BUFFER_CAST (dec->gather->data); + /* remove from the gather list */ + dec->gather = g_list_delete_link (dec->gather, dec->gather); + /* copy to parse queue */ + dec->parse = g_list_prepend (dec->parse, gbuf); + } + /* parse and decode stuff in the parse queue */ + gst_base_video_decoder_flush_parse (dec); + } + + if (G_LIKELY (buf)) { + GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %u, " + "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf, + GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), + GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); + + /* add buffer to gather queue */ + dec->gather = g_list_prepend (dec->gather, buf); + } + + return result; +} + +static GstFlowReturn +gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf) +{ + GstBaseVideoDecoder *base_video_decoder; + GstFlowReturn ret = GST_FLOW_OK; + + base_video_decoder = GST_BASE_VIDEO_DECODER (GST_PAD_PARENT (pad)); + + GST_LOG_OBJECT (base_video_decoder, + "chain %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT " size %d", + GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), + GST_TIME_ARGS (GST_BUFFER_DURATION (buf)), GST_BUFFER_SIZE (buf)); + + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); + + /* NOTE: + * requiring the pad to be negotiated makes it impossible to use + * oggdemux or filesrc ! decoder */ + + if (GST_BASE_VIDEO_CODEC (base_video_decoder)->segment.format == + GST_FORMAT_UNDEFINED) { + GstEvent *event; + GstFlowReturn ret; + + GST_WARNING_OBJECT (base_video_decoder, + "Received buffer without a new-segment. " + "Assuming timestamps start from 0."); + + gst_segment_set_newsegment_full (&GST_BASE_VIDEO_CODEC + (base_video_decoder)->segment, FALSE, 1.0, 1.0, GST_FORMAT_TIME, 0, + GST_CLOCK_TIME_NONE, 0); + + event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, + GST_CLOCK_TIME_NONE, 0); + + ret = gst_base_video_decoder_push_src_event (base_video_decoder, event); + if (!ret) { + GST_ERROR_OBJECT (base_video_decoder, "new segment event ret=%d", ret); + ret = GST_FLOW_ERROR; + goto done; + } + } + + if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) { + gint64 ts, index; + + GST_DEBUG_OBJECT (base_video_decoder, "received DISCONT buffer"); + + /* track present position */ + ts = base_video_decoder->timestamp_offset; + index = base_video_decoder->field_index; + + gst_base_video_decoder_flush (base_video_decoder, FALSE); + + /* buffer may claim DISCONT loudly, if it can't tell us where we are now, + * we'll stick to where we were ... + * Particularly useful/needed for upstream BYTE based */ + if (GST_BASE_VIDEO_CODEC (base_video_decoder)->segment.rate > 0.0 && + !GST_BUFFER_TIMESTAMP_IS_VALID (buf)) { + GST_DEBUG_OBJECT (base_video_decoder, + "... but restoring previous ts tracking"); + base_video_decoder->timestamp_offset = ts; + base_video_decoder->field_index = index & ~1; + } + } + + if (GST_BASE_VIDEO_CODEC (base_video_decoder)->segment.rate > 0.0) + ret = gst_base_video_decoder_chain_forward (base_video_decoder, buf); + else + ret = gst_base_video_decoder_chain_reverse (base_video_decoder, buf); + +done: + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); return ret; } @@ -910,6 +1308,10 @@ gst_base_video_decoder_change_state (GstElement * element, base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (element); switch (transition) { + case GST_STATE_CHANGE_READY_TO_PAUSED: + if (base_video_decoder_class->start) { + base_video_decoder_class->start (base_video_decoder); + } default: break; } @@ -921,11 +1323,14 @@ gst_base_video_decoder_change_state (GstElement * element, if (base_video_decoder_class->stop) { base_video_decoder_class->stop (base_video_decoder); } - gst_segment_init (&GST_BASE_VIDEO_CODEC (base_video_decoder)->segment, - GST_FORMAT_TIME); - g_list_foreach (base_video_decoder->timestamps, (GFunc) g_free, NULL); - g_list_free (base_video_decoder->timestamps); - base_video_decoder->timestamps = NULL; + + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); + gst_base_video_decoder_reset (base_video_decoder, TRUE); + g_list_foreach (base_video_decoder->current_frame_events, + (GFunc) gst_event_unref, NULL); + g_list_free (base_video_decoder->current_frame_events); + base_video_decoder->current_frame_events = NULL; + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); break; default: break; @@ -934,31 +1339,15 @@ gst_base_video_decoder_change_state (GstElement * element, return ret; } -static void -gst_base_video_decoder_free_frame (GstVideoFrame * frame) -{ - g_return_if_fail (frame != NULL); - - if (frame->sink_buffer) { - gst_buffer_unref (frame->sink_buffer); - } - if (frame->src_buffer) { - gst_buffer_unref (frame->src_buffer); - } - - g_free (frame); -} - static GstVideoFrame * gst_base_video_decoder_new_frame (GstBaseVideoDecoder * base_video_decoder) { GstVideoFrame *frame; - frame = g_malloc0 (sizeof (GstVideoFrame)); - - frame->system_frame_number = - GST_BASE_VIDEO_CODEC (base_video_decoder)->system_frame_number; - GST_BASE_VIDEO_CODEC (base_video_decoder)->system_frame_number++; + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); + frame = + gst_base_video_codec_new_frame (GST_BASE_VIDEO_CODEC + (base_video_decoder)); frame->decode_frame_number = frame->system_frame_number - base_video_decoder->reorder_depth; @@ -968,28 +1357,75 @@ gst_base_video_decoder_new_frame (GstBaseVideoDecoder * base_video_decoder) frame->presentation_duration = GST_CLOCK_TIME_NONE; frame->n_fields = 2; + frame->events = base_video_decoder->current_frame_events; + base_video_decoder->current_frame_events = NULL; + + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); + return frame; } +/** + * gst_base_video_decoder_finish_frame: + * @base_video_decoder: a #GstBaseVideoDecoder + * @frame: a decoded #GstVideoFrame + * + * @frame should have a valid decoded data buffer, whose metadata fields + * are then appropriately set according to frame data and pushed downstream. + * If no output data is provided, @frame is considered skipped. + * In any case, the frame is considered finished and released. + * + * Returns: a #GstFlowReturn resulting from sending data downstream + */ GstFlowReturn gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, GstVideoFrame * frame) { GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; GstBuffer *src_buffer; + GstFlowReturn ret = GST_FLOW_OK; + GList *l, *events = NULL; + + GST_LOG_OBJECT (base_video_decoder, "finish frame"); + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); - GST_DEBUG ("finish frame"); - GST_DEBUG ("n %d in %d out %d", +#ifndef GST_DISABLE_GST_DEBUG + GST_LOG_OBJECT (base_video_decoder, "n %d in %d out %d", g_list_length (GST_BASE_VIDEO_CODEC (base_video_decoder)->frames), gst_adapter_available (base_video_decoder->input_adapter), gst_adapter_available (base_video_decoder->output_adapter)); +#endif - GST_DEBUG ("finish frame sync=%d pts=%" GST_TIME_FORMAT, frame->is_sync_point, + GST_LOG_OBJECT (base_video_decoder, + "finish frame sync=%d pts=%" GST_TIME_FORMAT, frame->is_sync_point, GST_TIME_ARGS (frame->presentation_timestamp)); + /* Push all pending events that arrived before this frame */ + for (l = base_video_decoder->base_video_codec.frames; l; l = l->next) { + GstVideoFrame *tmp = l->data; + + if (tmp->events) { + GList *k; + + for (k = g_list_last (tmp->events); k; k = k->prev) + events = g_list_prepend (events, k->data); + g_list_free (tmp->events); + tmp->events = NULL; + } + + if (tmp == frame) + break; + } + + for (l = g_list_last (events); l; l = l->next) + gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), + l->data); + g_list_free (events); + if (GST_CLOCK_TIME_IS_VALID (frame->presentation_timestamp)) { if (frame->presentation_timestamp != base_video_decoder->timestamp_offset) { - GST_DEBUG ("sync timestamp %" GST_TIME_FORMAT " diff %" GST_TIME_FORMAT, + GST_DEBUG_OBJECT (base_video_decoder, + "sync timestamp %" GST_TIME_FORMAT " diff %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->presentation_timestamp), GST_TIME_ARGS (frame->presentation_timestamp - GST_BASE_VIDEO_CODEC (base_video_decoder)->segment.start)); @@ -998,15 +1434,17 @@ gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, } else { /* This case is for one initial timestamp and no others, e.g., * filesrc ! decoder ! xvimagesink */ - GST_WARNING ("sync timestamp didn't change, ignoring"); + GST_WARNING_OBJECT (base_video_decoder, + "sync timestamp didn't change, ignoring"); frame->presentation_timestamp = GST_CLOCK_TIME_NONE; } } else { if (frame->is_sync_point) { - GST_WARNING ("sync point doesn't have timestamp"); + GST_WARNING_OBJECT (base_video_decoder, + "sync point doesn't have timestamp"); if (!GST_CLOCK_TIME_IS_VALID (base_video_decoder->timestamp_offset)) { - GST_WARNING - ("No base timestamp. Assuming frames start at segment start"); + GST_WARNING_OBJECT (base_video_decoder, + "No base timestamp. Assuming frames start at segment start"); base_video_decoder->timestamp_offset = GST_BASE_VIDEO_CODEC (base_video_decoder)->segment.start; base_video_decoder->field_index &= 1; @@ -1033,13 +1471,21 @@ gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, if (GST_CLOCK_TIME_IS_VALID (base_video_decoder->last_timestamp)) { if (frame->presentation_timestamp < base_video_decoder->last_timestamp) { - GST_WARNING ("decreasing timestamp (%" GST_TIME_FORMAT " < %" + GST_WARNING_OBJECT (base_video_decoder, + "decreasing timestamp (%" GST_TIME_FORMAT " < %" GST_TIME_FORMAT ")", GST_TIME_ARGS (frame->presentation_timestamp), GST_TIME_ARGS (base_video_decoder->last_timestamp)); } } base_video_decoder->last_timestamp = frame->presentation_timestamp; + /* no buffer data means this frame is skipped/dropped */ + if (!frame->src_buffer) { + GST_DEBUG_OBJECT (base_video_decoder, "skipping frame %" GST_TIME_FORMAT, + GST_TIME_ARGS (frame->presentation_timestamp)); + goto done; + } + src_buffer = gst_buffer_make_metadata_writable (frame->src_buffer); frame->src_buffer = NULL; @@ -1063,9 +1509,9 @@ gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_ONEFIELD); } } - if (base_video_decoder->discont) { + if (GST_BASE_VIDEO_CODEC (base_video_decoder)->discont) { GST_BUFFER_FLAG_SET (src_buffer, GST_BUFFER_FLAG_DISCONT); - base_video_decoder->discont = FALSE; + GST_BASE_VIDEO_CODEC (base_video_decoder)->discont = FALSE; } GST_BUFFER_TIMESTAMP (src_buffer) = frame->presentation_timestamp; @@ -1073,17 +1519,24 @@ gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, GST_BUFFER_OFFSET (src_buffer) = GST_BUFFER_OFFSET_NONE; GST_BUFFER_OFFSET_END (src_buffer) = GST_BUFFER_OFFSET_NONE; - GST_DEBUG ("pushing frame %" GST_TIME_FORMAT, - GST_TIME_ARGS (frame->presentation_timestamp)); - - GST_BASE_VIDEO_CODEC (base_video_decoder)->frames = - g_list_remove (GST_BASE_VIDEO_CODEC (base_video_decoder)->frames, frame); + /* update rate estimate */ + GST_BASE_VIDEO_CODEC (base_video_decoder)->bytes += + GST_BUFFER_SIZE (src_buffer); + if (GST_CLOCK_TIME_IS_VALID (frame->presentation_duration)) { + GST_BASE_VIDEO_CODEC (base_video_decoder)->time += + frame->presentation_duration; + } else { + /* better none than nothing valid */ + GST_BASE_VIDEO_CODEC (base_video_decoder)->time = GST_CLOCK_TIME_NONE; + } - gst_base_video_decoder_set_src_caps (base_video_decoder); gst_buffer_set_caps (src_buffer, GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder))); - gst_base_video_decoder_free_frame (frame); + GST_LOG_OBJECT (base_video_decoder, "pushing frame ts %" GST_TIME_FORMAT + ", duration %" GST_TIME_FORMAT, + GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer)), + GST_TIME_ARGS (GST_BUFFER_DURATION (src_buffer))); if (base_video_decoder->sink_clipping) { gint64 start = GST_BUFFER_TIMESTAMP (src_buffer); @@ -1094,7 +1547,8 @@ gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &start, &stop)) { GST_BUFFER_TIMESTAMP (src_buffer) = start; GST_BUFFER_DURATION (src_buffer) = stop - start; - GST_DEBUG ("accepting buffer inside segment: %" GST_TIME_FORMAT + GST_LOG_OBJECT (base_video_decoder, + "accepting buffer inside segment: %" GST_TIME_FORMAT " %" GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT " time %" GST_TIME_FORMAT, @@ -1104,7 +1558,8 @@ gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time)); } else { - GST_DEBUG ("dropping buffer outside segment: %" GST_TIME_FORMAT + GST_LOG_OBJECT (base_video_decoder, + "dropping buffer outside segment: %" GST_TIME_FORMAT " %" GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT " time %" GST_TIME_FORMAT, @@ -1114,119 +1569,53 @@ gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time)); gst_buffer_unref (src_buffer); - return GST_FLOW_OK; + ret = GST_FLOW_OK; + goto done; } } - return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), - src_buffer); -} + /* we got data, so note things are looking up again */ + if (G_UNLIKELY (base_video_decoder->error_count)) + base_video_decoder->error_count--; -GstFlowReturn -gst_base_video_decoder_skip_frame (GstBaseVideoDecoder * base_video_decoder, - GstVideoFrame * frame) -{ - GST_DEBUG ("finish frame"); - GST_DEBUG ("finish frame sync=%d pts=%" GST_TIME_FORMAT, frame->is_sync_point, - GST_TIME_ARGS (frame->presentation_timestamp)); - - if (GST_CLOCK_TIME_IS_VALID (frame->presentation_timestamp)) { - if (frame->presentation_timestamp != base_video_decoder->timestamp_offset) { - GST_DEBUG ("sync timestamp %" GST_TIME_FORMAT " diff %" GST_TIME_FORMAT, - GST_TIME_ARGS (frame->presentation_timestamp), - GST_TIME_ARGS (frame->presentation_timestamp - - GST_BASE_VIDEO_CODEC (base_video_decoder)->segment.start)); - base_video_decoder->timestamp_offset = frame->presentation_timestamp; - base_video_decoder->field_index = 0; - } else { - /* This case is for one initial timestamp and no others, e.g., - * filesrc ! decoder ! xvimagesink */ - GST_WARNING ("sync timestamp didn't change, ignoring"); - frame->presentation_timestamp = GST_CLOCK_TIME_NONE; - } + if (GST_BASE_VIDEO_CODEC (base_video_decoder)->segment.rate < 0.0) { + GST_LOG_OBJECT (base_video_decoder, "queued buffer"); + base_video_decoder->queued = + g_list_prepend (base_video_decoder->queued, src_buffer); } else { - if (frame->is_sync_point) { - GST_WARNING ("sync point doesn't have timestamp"); - if (GST_CLOCK_TIME_IS_VALID (base_video_decoder->timestamp_offset)) { - GST_WARNING - ("No base timestamp. Assuming frames start at segment start"); - base_video_decoder->timestamp_offset = - GST_BASE_VIDEO_CODEC (base_video_decoder)->segment.start; - base_video_decoder->field_index = 0; - } - } + ret = gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), + src_buffer); } - frame->field_index = base_video_decoder->field_index; - base_video_decoder->field_index += frame->n_fields; - - if (frame->presentation_timestamp == GST_CLOCK_TIME_NONE) { - frame->presentation_timestamp = - gst_base_video_decoder_get_field_timestamp (base_video_decoder, - frame->field_index); - frame->presentation_duration = GST_CLOCK_TIME_NONE; - frame->decode_timestamp = - gst_base_video_decoder_get_timestamp (base_video_decoder, - frame->decode_frame_number); - } - if (frame->presentation_duration == GST_CLOCK_TIME_NONE) { - frame->presentation_duration = - gst_base_video_decoder_get_field_duration (base_video_decoder, - frame->n_fields); - } - - base_video_decoder->last_timestamp = frame->presentation_timestamp; - - GST_DEBUG ("skipping frame %" GST_TIME_FORMAT, - GST_TIME_ARGS (frame->presentation_timestamp)); +done: GST_BASE_VIDEO_CODEC (base_video_decoder)->frames = g_list_remove (GST_BASE_VIDEO_CODEC (base_video_decoder)->frames, frame); + gst_base_video_codec_free_frame (frame); - gst_base_video_decoder_free_frame (frame); - - return GST_FLOW_OK; -} - -int -gst_base_video_decoder_get_height (GstBaseVideoDecoder * base_video_decoder) -{ - GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; - - return state->height; -} - -int -gst_base_video_decoder_get_width (GstBaseVideoDecoder * base_video_decoder) -{ - GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; - - return state->width; -} - -GstFlowReturn -gst_base_video_decoder_end_of_stream (GstBaseVideoDecoder * base_video_decoder, - GstBuffer * buffer) -{ - - if (GST_BASE_VIDEO_CODEC (base_video_decoder)->frames) { - GST_DEBUG ("EOS with frames left over"); - } + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); - return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), - buffer); + return ret; } +/** + * gst_base_video_decoder_finish_frame: + * @base_video_decoder: a #GstBaseVideoDecoder + * @n_bytes: an encoded #GstVideoFrame + * + * Removes next @n_bytes of input data and adds it to currently parsed frame. + */ void gst_base_video_decoder_add_to_frame (GstBaseVideoDecoder * base_video_decoder, int n_bytes) { GstBuffer *buf; - GST_DEBUG ("add to frame"); + GST_LOG_OBJECT (base_video_decoder, "add %d bytes to frame", n_bytes); if (n_bytes == 0) return; + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); if (gst_adapter_available (base_video_decoder->output_adapter) == 0) { base_video_decoder->frame_offset = base_video_decoder->input_offset - gst_adapter_available (base_video_decoder->input_adapter); @@ -1234,6 +1623,7 @@ gst_base_video_decoder_add_to_frame (GstBaseVideoDecoder * base_video_decoder, buf = gst_adapter_take_buffer (base_video_decoder->input_adapter, n_bytes); gst_adapter_push (base_video_decoder->output_adapter, buf); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); } static guint64 @@ -1242,7 +1632,7 @@ gst_base_video_decoder_get_timestamp (GstBaseVideoDecoder * base_video_decoder, { GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; - if (state->fps_d == 0) { + if (state->fps_d == 0 || state->fps_n == 0) { return -1; } if (picture_number < base_video_decoder->base_picture_number) { @@ -1263,11 +1653,11 @@ gst_base_video_decoder_get_field_timestamp (GstBaseVideoDecoder * { GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; - if (state->fps_d == 0) { + if (state->fps_d == 0 || state->fps_n == 0) { return GST_CLOCK_TIME_NONE; } if (field_offset < 0) { - GST_WARNING ("field offset < 0"); + GST_WARNING_OBJECT (base_video_decoder, "field offset < 0"); return GST_CLOCK_TIME_NONE; } return base_video_decoder->timestamp_offset + @@ -1281,18 +1671,26 @@ gst_base_video_decoder_get_field_duration (GstBaseVideoDecoder * { GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; - if (state->fps_d == 0) { + if (state->fps_d == 0 || state->fps_n == 0) { return GST_CLOCK_TIME_NONE; } if (n_fields < 0) { - GST_WARNING ("n_fields < 0"); + GST_WARNING_OBJECT (base_video_decoder, "n_fields < 0"); return GST_CLOCK_TIME_NONE; } return gst_util_uint64_scale (n_fields, state->fps_d * GST_SECOND, state->fps_n * 2); } - +/** + * gst_base_video_decoder_have_frame: + * @base_video_decoder: a #GstBaseVideoDecoder + * + * Gathers all data collected for currently parsed frame, gathers corresponding + * metadata and passes it along for further processing, i.e. @handle_frame. + * + * Returns: a #GstFlowReturn + */ GstFlowReturn gst_base_video_decoder_have_frame (GstBaseVideoDecoder * base_video_decoder) { @@ -1300,8 +1698,11 @@ gst_base_video_decoder_have_frame (GstBaseVideoDecoder * base_video_decoder) int n_available; GstClockTime timestamp; GstClockTime duration; + GstFlowReturn ret = GST_FLOW_OK; + + GST_LOG_OBJECT (base_video_decoder, "have_frame"); - GST_DEBUG ("have_frame"); + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); n_available = gst_adapter_available (base_video_decoder->output_adapter); if (n_available) { @@ -1319,7 +1720,15 @@ gst_base_video_decoder_have_frame (GstBaseVideoDecoder * base_video_decoder) GST_BUFFER_TIMESTAMP (buffer) = timestamp; GST_BUFFER_DURATION (buffer) = duration; - return gst_base_video_decoder_have_frame_2 (base_video_decoder); + GST_LOG_OBJECT (base_video_decoder, "collected frame size %d, " + "ts %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, + n_available, GST_TIME_ARGS (timestamp), GST_TIME_ARGS (duration)); + + ret = gst_base_video_decoder_have_frame_2 (base_video_decoder); + + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); + + return ret; } static GstFlowReturn @@ -1332,16 +1741,28 @@ gst_base_video_decoder_have_frame_2 (GstBaseVideoDecoder * base_video_decoder) base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); + g_return_val_if_fail (base_video_decoder_class->handle_frame != NULL, + GST_FLOW_ERROR); + + /* capture frames and queue for later processing */ + if (GST_BASE_VIDEO_CODEC (base_video_decoder)->segment.rate < 0.0 && + !base_video_decoder->process) { + base_video_decoder->parse_gather = + g_list_prepend (base_video_decoder->parse_gather, frame); + goto exit; + } + frame->distance_from_sync = base_video_decoder->distance_from_sync; base_video_decoder->distance_from_sync++; frame->presentation_timestamp = GST_BUFFER_TIMESTAMP (frame->sink_buffer); frame->presentation_duration = GST_BUFFER_DURATION (frame->sink_buffer); - GST_DEBUG ("pts %" GST_TIME_FORMAT, + GST_LOG_OBJECT (base_video_decoder, "pts %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->presentation_timestamp)); - GST_DEBUG ("dts %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->decode_timestamp)); - GST_DEBUG ("dist %d", frame->distance_from_sync); + GST_LOG_OBJECT (base_video_decoder, "dts %" GST_TIME_FORMAT, + GST_TIME_ARGS (frame->decode_timestamp)); + GST_LOG_OBJECT (base_video_decoder, "dist %d", frame->distance_from_sync); GST_BASE_VIDEO_CODEC (base_video_decoder)->frames = g_list_append (GST_BASE_VIDEO_CODEC (base_video_decoder)->frames, frame); @@ -1354,9 +1775,11 @@ gst_base_video_decoder_have_frame_2 (GstBaseVideoDecoder * base_video_decoder) /* do something with frame */ ret = base_video_decoder_class->handle_frame (base_video_decoder, frame); if (ret != GST_FLOW_OK) { - GST_DEBUG ("flow error!"); + GST_DEBUG_OBJECT (base_video_decoder, "flow error %s", + gst_flow_get_name (ret)); } +exit: /* create new frame */ base_video_decoder->current_frame = gst_base_video_decoder_new_frame (base_video_decoder); @@ -1364,84 +1787,139 @@ gst_base_video_decoder_have_frame_2 (GstBaseVideoDecoder * base_video_decoder) return ret; } +/** + * gst_base_video_decoder_get_state: + * @base_video_decoder: a #GstBaseVideoDecoder + * + * Returns: #GstVideoState describing format of video data. + */ GstVideoState * gst_base_video_decoder_get_state (GstBaseVideoDecoder * base_video_decoder) { return &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; - -} - -void -gst_base_video_decoder_set_state (GstBaseVideoDecoder * base_video_decoder, - GstVideoState * state) -{ - memcpy (&GST_BASE_VIDEO_CODEC (base_video_decoder)->state, - state, sizeof (*state)); - } +/** + * gst_base_video_decoder_lost_sync: + * @base_video_decoder: a #GstBaseVideoDecoder + * + * Advances out-of-sync input data by 1 byte and marks it accordingly. + */ void gst_base_video_decoder_lost_sync (GstBaseVideoDecoder * base_video_decoder) { g_return_if_fail (GST_IS_BASE_VIDEO_DECODER (base_video_decoder)); - GST_DEBUG ("lost_sync"); + GST_DEBUG_OBJECT (base_video_decoder, "lost_sync"); + + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); if (gst_adapter_available (base_video_decoder->input_adapter) >= 1) { gst_adapter_flush (base_video_decoder->input_adapter, 1); } base_video_decoder->have_sync = FALSE; + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); } +/* FIXME not quite exciting; get rid of this ? */ +/** + * gst_base_video_decoder_set_sync_point: + * @base_video_decoder: a #GstBaseVideoDecoder + * + * Marks current frame as a sync point, i.e. keyframe. + */ void gst_base_video_decoder_set_sync_point (GstBaseVideoDecoder * base_video_decoder) { - GST_DEBUG ("set_sync_point"); + GST_DEBUG_OBJECT (base_video_decoder, "set_sync_point"); + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); base_video_decoder->current_frame->is_sync_point = TRUE; base_video_decoder->distance_from_sync = 0; + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); } +/** + * gst_base_video_decoder_get_oldest_frame: + * @base_video_decoder: a #GstBaseVideoDecoder + * + * Returns: oldest pending unfinished #GstVideoFrame. + */ GstVideoFrame * gst_base_video_decoder_get_oldest_frame (GstBaseVideoDecoder * base_video_decoder) { GList *g; + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); g = g_list_first (GST_BASE_VIDEO_CODEC (base_video_decoder)->frames); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); if (g == NULL) return NULL; return (GstVideoFrame *) (g->data); } +/** + * gst_base_video_decoder_get_frame: + * @base_video_decoder: a #GstBaseVideoDecoder + * @frame_number: system_frame_number of a frame + * + * Returns: pending unfinished #GstVideoFrame identified by @frame_number. + */ GstVideoFrame * gst_base_video_decoder_get_frame (GstBaseVideoDecoder * base_video_decoder, int frame_number) { GList *g; + GstVideoFrame *frame = NULL; + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); for (g = g_list_first (GST_BASE_VIDEO_CODEC (base_video_decoder)->frames); g; g = g_list_next (g)) { - GstVideoFrame *frame = g->data; + GstVideoFrame *tmp = g->data; if (frame->system_frame_number == frame_number) { - return frame; + frame = tmp; + break; } } + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); - return NULL; + return frame; } -void +/** + * gst_base_video_decoder_set_src_caps: + * @base_video_decoder: a #GstBaseVideoDecoder + * + * Sets src pad caps according to currently configured #GstVideoState. + * + */ +gboolean gst_base_video_decoder_set_src_caps (GstBaseVideoDecoder * base_video_decoder) { GstCaps *caps; GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; + gboolean ret; - if (GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder)) != NULL) - return; + /* minimum sense */ + g_return_val_if_fail (state->format != GST_VIDEO_FORMAT_UNKNOWN, FALSE); + g_return_val_if_fail (state->width != 0, FALSE); + g_return_val_if_fail (state->height != 0, FALSE); + + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); + + /* sanitize */ + if (state->fps_n == 0 || state->fps_d == 0) { + state->fps_n = 0; + state->fps_d = 1; + } + if (state->par_n == 0 || state->par_d == 0) { + state->par_n = 1; + state->par_d = 1; + } caps = gst_video_format_new_caps (state->format, state->width, state->height, @@ -1449,28 +1927,46 @@ gst_base_video_decoder_set_src_caps (GstBaseVideoDecoder * base_video_decoder) gst_caps_set_simple (caps, "interlaced", G_TYPE_BOOLEAN, state->interlaced, NULL); - GST_DEBUG ("setting caps %" GST_PTR_FORMAT, caps); - - gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), caps); + GST_DEBUG_OBJECT (base_video_decoder, "setting caps %" GST_PTR_FORMAT, caps); + ret = + gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), + caps); gst_caps_unref (caps); + + /* arrange for derived info */ + state->bytes_per_picture = + gst_video_format_get_size (state->format, state->width, state->height); + + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); + + return ret; } +/** + * gst_base_video_decoder_alloc_src_buffer: + * @base_video_decoder: a #GstBaseVideoDecoder + * + * Helper function that uses gst_pad_alloc_buffer_and_set_caps + * to allocate a buffer to hold a video frame for @base_video_decoder's + * current #GstVideoState. + * + * Returns: allocated buffer + */ GstBuffer * gst_base_video_decoder_alloc_src_buffer (GstBaseVideoDecoder * base_video_decoder) { GstBuffer *buffer; GstFlowReturn flow_ret; - int num_bytes; GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; + int num_bytes = state->bytes_per_picture; - gst_base_video_decoder_set_src_caps (base_video_decoder); - - num_bytes = gst_video_format_get_size (state->format, state->width, - state->height); GST_DEBUG ("alloc src buffer caps=%" GST_PTR_FORMAT, GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder))); + + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); + flow_ret = gst_pad_alloc_buffer_and_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), GST_BUFFER_OFFSET_NONE, num_bytes, @@ -1483,21 +1979,37 @@ gst_base_video_decoder_alloc_src_buffer (GstBaseVideoDecoder * GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder))); } + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); return buffer; } +/** + * gst_base_video_decoder_alloc_src_frame: + * @base_video_decoder: a #GstBaseVideoDecoder + * @frame: a #GstVideoFrame + * + * Helper function that uses gst_pad_alloc_buffer_and_set_caps + * to allocate a buffer to hold a video frame for @base_video_decoder's + * current #GstVideoState. Subclass should already have configured video state + * and set src pad caps. + * + * Returns: result from pad alloc call + */ GstFlowReturn gst_base_video_decoder_alloc_src_frame (GstBaseVideoDecoder * base_video_decoder, GstVideoFrame * frame) { GstFlowReturn flow_ret; - int num_bytes; GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state; + int num_bytes = state->bytes_per_picture; + + g_return_val_if_fail (state->bytes_per_picture != 0, GST_FLOW_ERROR); + g_return_val_if_fail (GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD + (base_video_decoder)) != NULL, GST_FLOW_ERROR); - gst_base_video_decoder_set_src_caps (base_video_decoder); + GST_LOG_OBJECT (base_video_decoder, "alloc buffer size %d", num_bytes); + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder); - num_bytes = gst_video_format_get_size (state->format, state->width, - state->height); flow_ret = gst_pad_alloc_buffer_and_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), GST_BUFFER_OFFSET_NONE, num_bytes, @@ -1505,12 +2017,27 @@ gst_base_video_decoder_alloc_src_frame (GstBaseVideoDecoder * &frame->src_buffer); if (flow_ret != GST_FLOW_OK) { - GST_WARNING ("failed to get buffer"); + GST_WARNING_OBJECT (base_video_decoder, "failed to get buffer %s", + gst_flow_get_name (flow_ret)); } + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder); + return flow_ret; } +/** + * gst_base_video_decoder_get_max_decode_time: + * @base_video_decoder: a #GstBaseVideoDecoder + * @frame: a #GstVideoFrame + * + * Determines maximum possible decoding time for @frame that will + * allow it to decode and arrive in time (as determined by QoS messages). + * In particular, a negative result means decoding in time is no longer possible + * and should therefore occur as soon/skippy as possible. + * + * Returns: max decoding time. + */ GstClockTimeDiff gst_base_video_decoder_get_max_decode_time (GstBaseVideoDecoder * base_video_decoder, GstVideoFrame * frame) @@ -1518,21 +2045,59 @@ gst_base_video_decoder_get_max_decode_time (GstBaseVideoDecoder * GstClockTimeDiff deadline; GstClockTime earliest_time; + GST_OBJECT_LOCK (base_video_decoder); earliest_time = GST_BASE_VIDEO_CODEC (base_video_decoder)->earliest_time; if (GST_CLOCK_TIME_IS_VALID (earliest_time)) deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline); else deadline = G_MAXINT64; + GST_LOG_OBJECT (base_video_decoder, "earliest %" GST_TIME_FORMAT + ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_TIME_FORMAT, + GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline), + GST_TIME_ARGS (deadline)); + + GST_OBJECT_UNLOCK (base_video_decoder); + return deadline; } +/** + * gst_base_video_decoder_get_oldest_frame: + * @base_video_decoder_class: a #GstBaseVideoDecoderClass + * + * Sets the mask and pattern that will be scanned for to obtain parse sync. + * Note that a non-zero @mask implies that @scan_for_sync will be ignored. + * + */ void gst_base_video_decoder_class_set_capture_pattern (GstBaseVideoDecoderClass * base_video_decoder_class, guint32 mask, guint32 pattern) { g_return_if_fail (((~mask) & pattern) == 0); + GST_DEBUG ("capture mask %08x, pattern %08x", mask, pattern); + base_video_decoder_class->capture_mask = mask; base_video_decoder_class->capture_pattern = pattern; } + +GstFlowReturn +_gst_base_video_decoder_error (GstBaseVideoDecoder * dec, gint weight, + GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file, + const gchar * function, gint line) +{ + if (txt) + GST_WARNING_OBJECT (dec, "error: %s", txt); + if (dbg) + GST_WARNING_OBJECT (dec, "error: %s", dbg); + dec->error_count += weight; + GST_BASE_VIDEO_CODEC (dec)->discont = TRUE; + if (dec->max_errors < dec->error_count) { + gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR, + domain, code, txt, dbg, file, function, line); + return GST_FLOW_ERROR; + } else { + return GST_FLOW_OK; + } +} diff --git a/gst-libs/gst/video/gstbasevideodecoder.h b/gst-libs/gst/video/gstbasevideodecoder.h index ff3f9fe..98c29e5 100644 --- a/gst-libs/gst/video/gstbasevideodecoder.h +++ b/gst-libs/gst/video/gstbasevideodecoder.h @@ -1,5 +1,8 @@ /* GStreamer * Copyright (C) 2008 David Schleef <ds@schleef.org> + * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>. + * Copyright (C) 2011 Nokia Corporation. All rights reserved. + * Contact: Stefan Kost <stefan.kost@nokia.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public @@ -56,125 +59,216 @@ G_BEGIN_DECLS #define GST_BASE_VIDEO_DECODER_SRC_NAME "src" /** - * * GST_BASE_VIDEO_DECODER_FLOW_NEED_DATA: - * * - * */ + * GST_BASE_VIDEO_DECODER_FLOW_NEED_DATA: + * + * Returned while parsing to indicate more data is needed. + **/ #define GST_BASE_VIDEO_DECODER_FLOW_NEED_DATA GST_FLOW_CUSTOM_SUCCESS +/** + * GST_BASE_VIDEO_DECODER_FLOW_DROPPED: + * + * Returned when the event/buffer should be dropped. + */ +#define GST_BASE_VIDEO_DECODER_FLOW_DROPPED GST_FLOW_CUSTOM_SUCCESS_1 typedef struct _GstBaseVideoDecoder GstBaseVideoDecoder; typedef struct _GstBaseVideoDecoderClass GstBaseVideoDecoderClass; + +/* do not use this one, use macro below */ +GstFlowReturn _gst_base_video_decoder_error (GstBaseVideoDecoder *dec, gint weight, + GQuark domain, gint code, + gchar *txt, gchar *debug, + const gchar *file, const gchar *function, + gint line); + +/** + * GST_BASE_VIDEO_DECODER_ERROR: + * @el: the base video decoder element that generates the error + * @weight: element defined weight of the error, added to error count + * @domain: like CORE, LIBRARY, RESOURCE or STREAM (see #gstreamer-GstGError) + * @code: error code defined for that domain (see #gstreamer-GstGError) + * @text: the message to display (format string and args enclosed in + * parentheses) + * @debug: debugging information for the message (format string and args + * enclosed in parentheses) + * @ret: variable to receive return value + * + * Utility function that audio decoder elements can use in case they encountered + * a data processing error that may be fatal for the current "data unit" but + * need not prevent subsequent decoding. Such errors are counted and if there + * are too many, as configured in the context's max_errors, the pipeline will + * post an error message and the application will be requested to stop further + * media processing. Otherwise, it is considered a "glitch" and only a warning + * is logged. In either case, @ret is set to the proper value to + * return to upstream/caller (indicating either GST_FLOW_ERROR or GST_FLOW_OK). + */ +#define GST_BASE_AUDIO_DECODER_ERROR(el, w, domain, code, text, debug, ret) \ +G_STMT_START { \ + gchar *__txt = _gst_element_error_printf text; \ + gchar *__dbg = _gst_element_error_printf debug; \ + GstBaseVideoDecoder *dec = GST_BASE_VIDEO_DECODER (el); \ + ret = _gst_base_video_decoder_error (dec, w, GST_ ## domain ## _ERROR, \ + GST_ ## domain ## _ERROR_ ## code, __txt, __dbg, __FILE__, \ + GST_FUNCTION, __LINE__); \ +} G_STMT_END + + +/** + * GstBaseVideoDecoder: + * + * The opaque #GstBaseVideoDecoder data structure. + */ struct _GstBaseVideoDecoder { GstBaseVideoCodec base_video_codec; - /*< private >*/ - GstAdapter *input_adapter; - GstAdapter *output_adapter; + /*< protected >*/ + gboolean sink_clipping; + gboolean do_byte_time; + gboolean packetized; + gint max_errors; - gboolean have_sync; - gboolean discont; - gboolean started; + /* parse tracking */ + /* input data */ + GstAdapter *input_adapter; + /* assembles current frame */ + GstAdapter *output_adapter; - gboolean sink_clipping; + /*< private >*/ + /* FIXME move to real private part ? + * (and introduce a context ?) */ + /* ... being tracked here; + * only available during parsing */ + /* FIXME remove and add parameter to method */ + GstVideoFrame *current_frame; + /* events that should apply to the current frame */ + GList *current_frame_events; + /* relative offset of input data */ + guint64 input_offset; + /* relative offset of frame */ + guint64 frame_offset; + /* tracking ts and offsets */ + GList *timestamps; + /* whether parsing is in sync */ + gboolean have_sync; + + /* maybe sort-of protected ? */ + + /* combine to yield (presentation) ts */ + GstClockTime timestamp_offset; + int field_index; + + /* last outgoing ts */ + GstClockTime last_timestamp; + gint error_count; + + /* reverse playback */ + /* collect input */ + GList *gather; + /* to-be-parsed */ + GList *parse; + /* collected parsed frames */ + GList *parse_gather; + /* frames to be handled == decoded */ + GList *decode; + /* collected output */ + GList *queued; + gboolean process; + + /* no comment ... */ + guint64 base_picture_number; + int reorder_depth; + int distance_from_sync; - guint64 presentation_frame_number; + /* FIXME before moving to base */ + void *padding[GST_PADDING_LARGE]; +}; - gboolean have_src_caps; +/** + * GstBaseAudioDecoderClass: + * @start: Optional. + * Called when the element starts processing. + * Allows opening external resources. + * @stop: Optional. + * Called when the element stops processing. + * Allows closing external resources. + * @set_format: Notifies subclass of incoming data format (caps). + * @scan_for_sync: Optional. + * Allows subclass to obtain sync for subsequent parsing + * by custom means (above an beyond scanning for specific + * marker and mask). + * @parse_data: Required for non-packetized input. + * Allows chopping incoming data into manageable units (frames) + * for subsequent decoding. + * @reset: Optional. + * Allows subclass (codec) to perform post-seek semantics reset. + * @handle_frame: Provides input data frame to subclass. + * @finish: Optional. + * Called to request subclass to dispatch any pending remaining + * data (e.g. at EOS). + * + * Subclasses can override any of the available virtual methods or not, as + * needed. At minimum @handle_frame needs to be overridden, and @set_format + * and likely as well. If non-packetized input is supported or expected, + * @parse needs to be overridden as well. + */ +struct _GstBaseVideoDecoderClass +{ + GstBaseVideoCodecClass base_video_codec_class; - GstVideoFrame *current_frame; + gboolean (*start) (GstBaseVideoDecoder *coder); - int distance_from_sync; - int reorder_depth; + gboolean (*stop) (GstBaseVideoDecoder *coder); - GstClockTime buffer_timestamp; + int (*scan_for_sync) (GstBaseVideoDecoder *decoder, gboolean at_eos, + int offset, int n); - GstClockTime timestamp_offset; + GstFlowReturn (*parse_data) (GstBaseVideoDecoder *decoder, gboolean at_eos); - //GstBuffer *codec_data; + gboolean (*set_format) (GstBaseVideoDecoder *coder, GstVideoState * state); - guint64 input_offset; - guint64 frame_offset; - GstClockTime last_timestamp; + gboolean (*reset) (GstBaseVideoDecoder *coder); - guint64 base_picture_number; + GstFlowReturn (*finish) (GstBaseVideoDecoder *coder); - int field_index; + GstFlowReturn (*handle_frame) (GstBaseVideoDecoder *coder, GstVideoFrame *frame); - gboolean is_delta_unit; - gboolean packetized; - GList *timestamps; - gboolean have_segment; + /*< private >*/ + guint32 capture_mask; + guint32 capture_pattern; /* FIXME before moving to base */ - void *padding[GST_PADDING_LARGE]; + void *padding[GST_PADDING_LARGE]; }; -struct _GstBaseVideoDecoderClass -{ - GstBaseVideoCodecClass base_video_codec_class; - - gboolean (*set_format) (GstBaseVideoDecoder *coder, GstVideoFormat, - int width, int height, int fps_n, int fps_d, - int par_n, int par_d); - gboolean (*start) (GstBaseVideoDecoder *coder); - gboolean (*stop) (GstBaseVideoDecoder *coder); - gboolean (*reset) (GstBaseVideoDecoder *coder); - int (*scan_for_sync) (GstBaseVideoDecoder *decoder, gboolean at_eos, - int offset, int n); - GstFlowReturn (*parse_data) (GstBaseVideoDecoder *decoder, gboolean at_eos); - GstFlowReturn (*finish) (GstBaseVideoDecoder *coder); - GstFlowReturn (*handle_frame) (GstBaseVideoDecoder *coder, GstVideoFrame *frame); - GstFlowReturn (*shape_output) (GstBaseVideoDecoder *coder, GstVideoFrame *frame); - GstCaps *(*get_caps) (GstBaseVideoDecoder *coder); - - guint32 capture_mask; - guint32 capture_pattern; +void gst_base_video_decoder_class_set_capture_pattern (GstBaseVideoDecoderClass *klass, + guint32 mask, guint32 pattern); - /* FIXME before moving to base */ - void *padding[GST_PADDING_LARGE]; -}; +GstVideoFrame *gst_base_video_decoder_get_frame (GstBaseVideoDecoder *coder, + int frame_number); +GstVideoFrame *gst_base_video_decoder_get_oldest_frame (GstBaseVideoDecoder *coder); -GType gst_base_video_decoder_get_type (void); - -void gst_base_video_decoder_class_set_capture_pattern (GstBaseVideoDecoderClass *klass, - guint32 mask, guint32 pattern); - -int gst_base_video_decoder_get_width (GstBaseVideoDecoder *coder); -int gst_base_video_decoder_get_height (GstBaseVideoDecoder *coder); - -guint64 gst_base_video_decoder_get_timestamp_offset (GstBaseVideoDecoder *coder); - -GstVideoFrame *gst_base_video_decoder_get_frame (GstBaseVideoDecoder *coder, - int frame_number); -GstVideoFrame *gst_base_video_decoder_get_oldest_frame (GstBaseVideoDecoder *coder); -void gst_base_video_decoder_add_to_frame (GstBaseVideoDecoder *base_video_decoder, - int n_bytes); -GstFlowReturn gst_base_video_decoder_finish_frame (GstBaseVideoDecoder *base_video_decoder, - GstVideoFrame *frame); -GstFlowReturn gst_base_video_decoder_skip_frame (GstBaseVideoDecoder * base_video_decoder, - GstVideoFrame * frame); -GstFlowReturn gst_base_video_decoder_end_of_stream (GstBaseVideoDecoder *base_video_decoder, - GstBuffer *buffer); -GstFlowReturn -gst_base_video_decoder_have_frame (GstBaseVideoDecoder *base_video_decoder); -GstVideoState * gst_base_video_decoder_get_state (GstBaseVideoDecoder *base_video_decoder); -void gst_base_video_decoder_set_state (GstBaseVideoDecoder *base_video_decoder, - GstVideoState *state); -void gst_base_video_decoder_lost_sync (GstBaseVideoDecoder *base_video_decoder); -void gst_base_video_decoder_set_sync_point (GstBaseVideoDecoder *base_video_decoder); - -void gst_base_video_decoder_set_src_caps (GstBaseVideoDecoder *base_video_decoder); - -GstBuffer * gst_base_video_decoder_alloc_src_buffer (GstBaseVideoDecoder * - base_video_decoder); -GstFlowReturn gst_base_video_decoder_alloc_src_frame (GstBaseVideoDecoder *base_video_decoder, - GstVideoFrame *frame); +void gst_base_video_decoder_add_to_frame (GstBaseVideoDecoder *base_video_decoder, + int n_bytes); +void gst_base_video_decoder_lost_sync (GstBaseVideoDecoder *base_video_decoder); +GstFlowReturn gst_base_video_decoder_have_frame (GstBaseVideoDecoder *base_video_decoder); +void gst_base_video_decoder_set_sync_point (GstBaseVideoDecoder *base_video_decoder); +gboolean gst_base_video_decoder_set_src_caps (GstBaseVideoDecoder *base_video_decoder); +GstBuffer *gst_base_video_decoder_alloc_src_buffer (GstBaseVideoDecoder * base_video_decoder); +GstFlowReturn gst_base_video_decoder_alloc_src_frame (GstBaseVideoDecoder *base_video_decoder, + GstVideoFrame *frame); +GstVideoState *gst_base_video_decoder_get_state (GstBaseVideoDecoder *base_video_decoder); GstClockTimeDiff gst_base_video_decoder_get_max_decode_time ( - GstBaseVideoDecoder *base_video_decoder, GstVideoFrame *frame); + GstBaseVideoDecoder *base_video_decoder, + GstVideoFrame *frame); +GstFlowReturn gst_base_video_decoder_finish_frame (GstBaseVideoDecoder *base_video_decoder, + GstVideoFrame *frame); + +GType gst_base_video_decoder_get_type (void); G_END_DECLS diff --git a/gst-libs/gst/video/gstbasevideoencoder.c b/gst-libs/gst/video/gstbasevideoencoder.c index 7926f53..70c20b2 100644 --- a/gst-libs/gst/video/gstbasevideoencoder.c +++ b/gst-libs/gst/video/gstbasevideoencoder.c @@ -1,5 +1,8 @@ /* GStreamer * Copyright (C) 2008 David Schleef <ds@schleef.org> + * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>. + * Copyright (C) 2011 Nokia Corporation. All rights reserved. + * Contact: Stefan Kost <stefan.kost@nokia.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public @@ -17,11 +20,95 @@ * Boston, MA 02111-1307, USA. */ +/** + * SECTION:gstbasevideoencoder + * @short_description: Base class for video encoders + * @see_also: #GstBaseTransform + * + * This base class is for video encoders turning raw video into + * encoded video data. + * + * GstBaseVideoEncoder and subclass should cooperate as follows. + * <orderedlist> + * <listitem> + * <itemizedlist><title>Configuration</title> + * <listitem><para> + * Initially, GstBaseVideoEncoder calls @start when the encoder element + * is activated, which allows subclass to perform any global setup. + * </para></listitem> + * <listitem><para> + * GstBaseVideoEncoder calls @set_format to inform subclass of the format + * of input video data that it is about to receive. Subclass should + * setup for encoding and configure base class as appropriate + * (e.g. latency). While unlikely, it might be called more than once, + * if changing input parameters require reconfiguration. Baseclass + * will ensure that processing of current configuration is finished. + * </para></listitem> + * <listitem><para> + * GstBaseVideoEncoder calls @stop at end of all processing. + * </para></listitem> + * </itemizedlist> + * </listitem> + * <listitem> + * <itemizedlist> + * <title>Data processing</title> + * <listitem><para> + * Base class collects input data and metadata into a frame and hands + * this to subclass' @handle_frame. + * </para></listitem> + * <listitem><para> + * If codec processing results in encoded data, subclass should call + * @gst_base_video_encoder_finish_frame to have encoded data pushed + * downstream. + * </para></listitem> + * <listitem><para> + * If implemented, baseclass calls subclass @shape_output which then sends + * data downstream in desired form. Otherwise, it is sent as-is. + * </para></listitem> + * <listitem><para> + * GstBaseVideoEncoderClass will handle both srcpad and sinkpad events. + * Sink events will be passed to subclass if @event callback has been + * provided. + * </para></listitem> + * </itemizedlist> + * </listitem> + * <listitem> + * <itemizedlist><title>Shutdown phase</title> + * <listitem><para> + * GstBaseVideoEncoder class calls @stop to inform the subclass that data + * parsing will be stopped. + * </para></listitem> + * </itemizedlist> + * </listitem> + * </orderedlist> + * + * Subclass is responsible for providing pad template caps for + * source and sink pads. The pads need to be named "sink" and "src". It should + * also be able to provide fixed src pad caps in @getcaps by the time it calls + * @gst_base_video_encoder_finish_frame. + * + * Things that subclass need to take care of: + * <itemizedlist> + * <listitem><para>Provide pad templates</para></listitem> + * <listitem><para> + * Provide source pad caps before pushing the first buffer + * </para></listitem> + * <listitem><para> + * Accept data in @handle_frame and provide encoded results to + * @gst_base_video_encoder_finish_frame. + * </para></listitem> + * </itemizedlist> + * + */ + #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "gstbasevideoencoder.h" +#include "gstbasevideoutils.h" + +#include <string.h> GST_DEBUG_CATEGORY (basevideoencoder_debug); #define GST_CAT_DEFAULT basevideoencoder_debug @@ -30,13 +117,13 @@ static void gst_base_video_encoder_finalize (GObject * object); static gboolean gst_base_video_encoder_sink_setcaps (GstPad * pad, GstCaps * caps); +static GstCaps *gst_base_video_encoder_sink_getcaps (GstPad * pad); static gboolean gst_base_video_encoder_src_event (GstPad * pad, GstEvent * event); static gboolean gst_base_video_encoder_sink_event (GstPad * pad, GstEvent * event); static GstFlowReturn gst_base_video_encoder_chain (GstPad * pad, GstBuffer * buf); -//static GstFlowReturn gst_base_video_encoder_process (GstBaseVideoEncoder *base_video_encoder); static GstStateChangeReturn gst_base_video_encoder_change_state (GstElement * element, GstStateChange transition); static const GstQueryType *gst_base_video_encoder_get_query_types (GstPad * @@ -45,8 +132,21 @@ static gboolean gst_base_video_encoder_src_query (GstPad * pad, GstQuery * query); -GST_BOILERPLATE (GstBaseVideoEncoder, gst_base_video_encoder, GstBaseVideoCodec, - GST_TYPE_BASE_VIDEO_CODEC); +static void +_do_init (GType object_type) +{ + const GInterfaceInfo preset_interface_info = { + NULL, /* interface_init */ + NULL, /* interface_finalize */ + NULL /* interface_data */ + }; + + g_type_add_interface_static (object_type, GST_TYPE_PRESET, + &preset_interface_info); +} + +GST_BOILERPLATE_FULL (GstBaseVideoEncoder, gst_base_video_encoder, + GstBaseVideoCodec, GST_TYPE_BASE_VIDEO_CODEC, _do_init); static void gst_base_video_encoder_base_init (gpointer g_class) @@ -67,9 +167,34 @@ gst_base_video_encoder_class_init (GstBaseVideoEncoderClass * klass) gobject_class->finalize = gst_base_video_encoder_finalize; - gstelement_class->change_state = gst_base_video_encoder_change_state; + gstelement_class->change_state = + GST_DEBUG_FUNCPTR (gst_base_video_encoder_change_state); +} + +static void +gst_base_video_encoder_reset (GstBaseVideoEncoder * base_video_encoder) +{ + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder); + + base_video_encoder->presentation_frame_number = 0; + base_video_encoder->distance_from_sync = 0; + base_video_encoder->force_keyframe = FALSE; + + base_video_encoder->drained = TRUE; + base_video_encoder->min_latency = 0; + base_video_encoder->max_latency = 0; + + if (base_video_encoder->force_keyunit_event) { + gst_event_unref (base_video_encoder->force_keyunit_event); + base_video_encoder->force_keyunit_event = NULL; + } + + g_list_foreach (base_video_encoder->current_frame_events, + (GFunc) gst_event_unref, NULL); + g_list_free (base_video_encoder->current_frame_events); + base_video_encoder->current_frame_events = NULL; - parent_class = g_type_class_peek_parent (klass); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder); } static void @@ -78,22 +203,69 @@ gst_base_video_encoder_init (GstBaseVideoEncoder * base_video_encoder, { GstPad *pad; - GST_DEBUG ("gst_base_video_encoder_init"); + GST_DEBUG_OBJECT (base_video_encoder, "gst_base_video_encoder_init"); pad = GST_BASE_VIDEO_CODEC_SINK_PAD (base_video_encoder); - gst_pad_set_chain_function (pad, gst_base_video_encoder_chain); - gst_pad_set_event_function (pad, gst_base_video_encoder_sink_event); - gst_pad_set_setcaps_function (pad, gst_base_video_encoder_sink_setcaps); - //gst_pad_set_query_function (pad, gst_base_video_encoder_sink_query); + gst_pad_set_chain_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_encoder_chain)); + gst_pad_set_event_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_encoder_sink_event)); + gst_pad_set_setcaps_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_encoder_sink_setcaps)); + gst_pad_set_getcaps_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_encoder_sink_getcaps)); pad = GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder); - gst_pad_set_query_type_function (pad, gst_base_video_encoder_get_query_types); - gst_pad_set_query_function (pad, gst_base_video_encoder_src_query); - gst_pad_set_event_function (pad, gst_base_video_encoder_src_event); + gst_pad_set_query_type_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_encoder_get_query_types)); + gst_pad_set_query_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_encoder_src_query)); + gst_pad_set_event_function (pad, + GST_DEBUG_FUNCPTR (gst_base_video_encoder_src_event)); base_video_encoder->a.at_eos = FALSE; + + /* encoder is expected to do so */ + base_video_encoder->sink_clipping = TRUE; +} + +static gboolean +gst_base_video_encoder_drain (GstBaseVideoEncoder * enc) +{ + GstBaseVideoCodec *codec; + GstBaseVideoEncoderClass *enc_class; + gboolean ret = TRUE; + + codec = GST_BASE_VIDEO_CODEC (enc); + enc_class = GST_BASE_VIDEO_ENCODER_GET_CLASS (enc); + + GST_DEBUG_OBJECT (enc, "draining"); + + if (enc->drained) { + GST_DEBUG_OBJECT (enc, "already drained"); + return TRUE; + } + + if (enc_class->reset) { + GST_DEBUG_OBJECT (enc, "requesting subclass to finish"); + ret = enc_class->reset (enc); + } + /* everything should be away now */ + if (codec->frames) { + /* not fatal/impossible though if subclass/codec eats stuff */ + GST_WARNING_OBJECT (enc, "still %d frames left after draining", + g_list_length (codec->frames)); +#if 0 + /* FIXME should do this, but subclass may come up with it later on ? + * and would then need refcounting or so on frames */ + g_list_foreach (codec->frames, + (GFunc) gst_base_video_codec_free_frame, NULL); +#endif + } + + return ret; } static gboolean @@ -102,45 +274,92 @@ gst_base_video_encoder_sink_setcaps (GstPad * pad, GstCaps * caps) GstBaseVideoEncoder *base_video_encoder; GstBaseVideoEncoderClass *base_video_encoder_class; GstStructure *structure; - GstVideoState *state; + GstVideoState *state, tmp_state; gboolean ret; + gboolean changed = FALSE; base_video_encoder = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad)); base_video_encoder_class = GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder); - GST_DEBUG ("setcaps"); + /* subclass should do something here ... */ + g_return_val_if_fail (base_video_encoder_class->set_format != NULL, FALSE); + + GST_DEBUG_OBJECT (base_video_encoder, "setcaps %" GST_PTR_FORMAT, caps); + + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder); state = &GST_BASE_VIDEO_CODEC (base_video_encoder)->state; + memset (&tmp_state, 0, sizeof (tmp_state)); + + tmp_state.caps = gst_caps_ref (caps); structure = gst_caps_get_structure (caps, 0); - gst_video_format_parse_caps (caps, &state->format, - &state->width, &state->height); + ret = + gst_video_format_parse_caps (caps, &tmp_state.format, &tmp_state.width, + &tmp_state.height); + if (!ret) + goto exit; - state->fps_n = 0; - state->fps_d = 1; - gst_video_parse_caps_framerate (caps, &state->fps_n, &state->fps_d); - if (state->fps_d == 0) { - state->fps_n = 0; - state->fps_d = 1; - } + changed = (tmp_state.format != state->format + || tmp_state.width != state->width || tmp_state.height != state->height); - state->par_n = 1; - state->par_d = 1; - gst_video_parse_caps_pixel_aspect_ratio (caps, &state->par_n, &state->par_d); + if (!gst_video_parse_caps_framerate (caps, &tmp_state.fps_n, + &tmp_state.fps_d)) { + tmp_state.fps_n = 0; + tmp_state.fps_d = 1; + } + changed = changed || (tmp_state.fps_n != state->fps_n + || tmp_state.fps_d != state->fps_d); - state->have_interlaced = gst_structure_get_boolean (structure, - "interlaced", &state->interlaced); + if (!gst_video_parse_caps_pixel_aspect_ratio (caps, &tmp_state.par_n, + &tmp_state.par_d)) { + tmp_state.par_n = 1; + tmp_state.par_d = 1; + } + changed = changed || (tmp_state.par_n != state->par_n + || tmp_state.par_d != state->par_d); + + tmp_state.have_interlaced = + gst_structure_get_boolean (structure, "interlaced", + &tmp_state.interlaced); + changed = changed || (tmp_state.have_interlaced != state->have_interlaced + || tmp_state.interlaced != state->interlaced); + + tmp_state.bytes_per_picture = + gst_video_format_get_size (tmp_state.format, tmp_state.width, + tmp_state.height); + tmp_state.clean_width = tmp_state.width; + tmp_state.clean_height = tmp_state.height; + tmp_state.clean_offset_left = 0; + tmp_state.clean_offset_top = 0; + + if (changed) { + /* arrange draining pending frames */ + gst_base_video_encoder_drain (base_video_encoder); + + /* and subclass should be ready to configure format at any time around */ + if (base_video_encoder_class->set_format) + ret = + base_video_encoder_class->set_format (base_video_encoder, &tmp_state); + if (ret) { + gst_caps_replace (&state->caps, NULL); + *state = tmp_state; + } + } else { + /* no need to stir things up */ + GST_DEBUG_OBJECT (base_video_encoder, + "new video format identical to configured format"); + gst_caps_unref (tmp_state.caps); + ret = TRUE; + } - state->clean_width = state->width; - state->clean_height = state->height; - state->clean_offset_left = 0; - state->clean_offset_top = 0; +exit: + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder); - ret = base_video_encoder_class->set_format (base_video_encoder, - &GST_BASE_VIDEO_CODEC (base_video_encoder)->state); - if (ret) { - ret = base_video_encoder_class->start (base_video_encoder); + if (!ret) { + GST_WARNING_OBJECT (base_video_encoder, "rejected caps %" GST_PTR_FORMAT, + caps); } g_object_unref (base_video_encoder); @@ -148,6 +367,75 @@ gst_base_video_encoder_sink_setcaps (GstPad * pad, GstCaps * caps) return ret; } +static GstCaps * +gst_base_video_encoder_sink_getcaps (GstPad * pad) +{ + GstBaseVideoEncoder *base_video_encoder; + const GstCaps *templ_caps; + GstCaps *allowed; + GstCaps *fcaps, *filter_caps; + gint i, j; + + base_video_encoder = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad)); + + /* FIXME: Allow subclass to override this? */ + + /* Allow downstream to specify width/height/framerate/PAR constraints + * and forward them upstream for video converters to handle + */ + templ_caps = + gst_pad_get_pad_template_caps (GST_BASE_VIDEO_CODEC_SINK_PAD + (base_video_encoder)); + allowed = + gst_pad_get_allowed_caps (GST_BASE_VIDEO_CODEC_SRC_PAD + (base_video_encoder)); + if (!allowed || gst_caps_is_empty (allowed) || gst_caps_is_any (allowed)) { + fcaps = gst_caps_copy (templ_caps); + goto done; + } + + GST_LOG_OBJECT (base_video_encoder, "template caps %" GST_PTR_FORMAT, + templ_caps); + GST_LOG_OBJECT (base_video_encoder, "allowed caps %" GST_PTR_FORMAT, allowed); + + filter_caps = gst_caps_new_empty (); + + for (i = 0; i < gst_caps_get_size (templ_caps); i++) { + GQuark q_name = + gst_structure_get_name_id (gst_caps_get_structure (templ_caps, i)); + + for (j = 0; j < gst_caps_get_size (allowed); j++) { + const GstStructure *allowed_s = gst_caps_get_structure (allowed, j); + const GValue *val; + GstStructure *s; + + s = gst_structure_id_empty_new (q_name); + if ((val = gst_structure_get_value (allowed_s, "width"))) + gst_structure_set_value (s, "width", val); + if ((val = gst_structure_get_value (allowed_s, "height"))) + gst_structure_set_value (s, "height", val); + if ((val = gst_structure_get_value (allowed_s, "framerate"))) + gst_structure_set_value (s, "framerate", val); + if ((val = gst_structure_get_value (allowed_s, "pixel-aspect-ratio"))) + gst_structure_set_value (s, "pixel-aspect-ratio", val); + + gst_caps_merge_structure (filter_caps, s); + } + } + + fcaps = gst_caps_intersect (filter_caps, templ_caps); + gst_caps_unref (filter_caps); + +done: + + gst_caps_replace (&allowed, NULL); + + GST_LOG_OBJECT (base_video_encoder, "Returning caps %" GST_PTR_FORMAT, fcaps); + + g_object_unref (base_video_encoder); + return fcaps; +} + static void gst_base_video_encoder_finalize (GObject * object) { @@ -157,29 +445,33 @@ gst_base_video_encoder_finalize (GObject * object) } static gboolean -gst_base_video_encoder_sink_event (GstPad * pad, GstEvent * event) +gst_base_video_encoder_sink_eventfunc (GstBaseVideoEncoder * base_video_encoder, + GstEvent * event) { - GstBaseVideoEncoder *base_video_encoder; GstBaseVideoEncoderClass *base_video_encoder_class; gboolean ret = FALSE; - base_video_encoder = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad)); base_video_encoder_class = GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_EOS: { + GstFlowReturn flow_ret; + + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder); base_video_encoder->a.at_eos = TRUE; + if (base_video_encoder_class->finish) { - base_video_encoder_class->finish (base_video_encoder); + flow_ret = base_video_encoder_class->finish (base_video_encoder); + } else { + flow_ret = GST_FLOW_OK; } - ret = - gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), - event); - } + ret = (flow_ret == GST_BASE_VIDEO_ENCODER_FLOW_DROPPED); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder); break; + } case GST_EVENT_NEWSEGMENT: { gboolean update; @@ -190,25 +482,30 @@ gst_base_video_encoder_sink_event (GstPad * pad, GstEvent * event) gint64 stop; gint64 position; - gst_event_parse_new_segment_full (event, &update, &rate, - &applied_rate, &format, &start, &stop, &position); + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder); + gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate, + &format, &start, &stop, &position); - if (format != GST_FORMAT_TIME) - goto newseg_wrong_format; + GST_DEBUG_OBJECT (base_video_encoder, "newseg rate %g, applied rate %g, " + "format %d, start = %" GST_TIME_FORMAT ", stop = %" GST_TIME_FORMAT + ", pos = %" GST_TIME_FORMAT, rate, applied_rate, format, + GST_TIME_ARGS (start), GST_TIME_ARGS (stop), + GST_TIME_ARGS (position)); - GST_DEBUG ("new segment %" GST_TIME_FORMAT " %" GST_TIME_FORMAT, - GST_TIME_ARGS (start), GST_TIME_ARGS (position)); + if (format != GST_FORMAT_TIME) { + GST_DEBUG_OBJECT (base_video_encoder, "received non TIME newsegment"); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder); + break; + } base_video_encoder->a.at_eos = FALSE; + gst_segment_set_newsegment_full (&GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, update, rate, applied_rate, format, start, stop, position); - - ret = - gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), - event); - } + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder); break; + } case GST_EVENT_CUSTOM_DOWNSTREAM: { const GstStructure *s; @@ -218,34 +515,70 @@ gst_base_video_encoder_sink_event (GstPad * pad, GstEvent * event) if (gst_structure_has_name (s, "GstForceKeyUnit")) { GST_OBJECT_LOCK (base_video_encoder); base_video_encoder->force_keyframe = TRUE; + if (base_video_encoder->force_keyunit_event) + gst_event_unref (base_video_encoder->force_keyunit_event); + base_video_encoder->force_keyunit_event = gst_event_copy (event); GST_OBJECT_UNLOCK (base_video_encoder); gst_event_unref (event); - ret = GST_FLOW_OK; - } else { - ret = - gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD - (base_video_encoder), event); + ret = TRUE; } break; } default: - /* FIXME this changes the order of events */ - ret = - gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), - event); break; } -done: - gst_object_unref (base_video_encoder); return ret; +} -newseg_wrong_format: - { - GST_DEBUG_OBJECT (base_video_encoder, "received non TIME newsegment"); - gst_event_unref (event); - goto done; +static gboolean +gst_base_video_encoder_sink_event (GstPad * pad, GstEvent * event) +{ + GstBaseVideoEncoder *enc; + GstBaseVideoEncoderClass *klass; + gboolean handled = FALSE; + gboolean ret = TRUE; + + enc = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad)); + klass = GST_BASE_VIDEO_ENCODER_GET_CLASS (enc); + + GST_DEBUG_OBJECT (enc, "received event %d, %s", GST_EVENT_TYPE (event), + GST_EVENT_TYPE_NAME (event)); + + if (klass->event) + handled = klass->event (enc, event); + + if (!handled) + handled = gst_base_video_encoder_sink_eventfunc (enc, event); + + if (!handled) { + /* Forward non-serialized events and EOS/FLUSH_STOP immediately. + * For EOS this is required because no buffer or serialized event + * will come after EOS and nothing could trigger another + * _finish_frame() call. * + * If the subclass handles sending of EOS manually it can return + * _DROPPED from ::finish() and all other subclasses should have + * decoded/flushed all remaining data before this + * + * For FLUSH_STOP this is required because it is expected + * to be forwarded immediately and no buffers are queued anyway. + */ + if (!GST_EVENT_IS_SERIALIZED (event) + || GST_EVENT_TYPE (event) == GST_EVENT_EOS + || GST_EVENT_TYPE (event) == GST_EVENT_FLUSH_STOP) { + ret = gst_pad_push_event (enc->base_video_codec.srcpad, event); + } else { + GST_BASE_VIDEO_CODEC_STREAM_LOCK (enc); + enc->current_frame_events = + g_list_prepend (enc->current_frame_events, event); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (enc); + } } + + GST_DEBUG_OBJECT (enc, "event handled"); + + gst_object_unref (enc); + return ret; } static gboolean @@ -256,6 +589,9 @@ gst_base_video_encoder_src_event (GstPad * pad, GstEvent * event) base_video_encoder = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad)); + GST_LOG_OBJECT (base_video_encoder, "handling event: %" GST_PTR_FORMAT, + event); + switch (GST_EVENT_TYPE (event)) { case GST_EVENT_CUSTOM_UPSTREAM: { @@ -292,8 +628,6 @@ static const GstQueryType * gst_base_video_encoder_get_query_types (GstPad * pad) { static const GstQueryType query_types[] = { - //GST_QUERY_POSITION, - //GST_QUERY_DURATION, GST_QUERY_CONVERT, GST_QUERY_LATENCY, 0 @@ -312,17 +646,18 @@ gst_base_video_encoder_src_query (GstPad * pad, GstQuery * query) enc = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad)); peerpad = gst_pad_get_peer (GST_BASE_VIDEO_CODEC_SINK_PAD (enc)); - switch GST_QUERY_TYPE - (query) { + GST_LOG_OBJECT (enc, "handling query: %" GST_PTR_FORMAT, query); + + switch (GST_QUERY_TYPE (query)) { case GST_QUERY_CONVERT: { + GstBaseVideoCodec *codec = GST_BASE_VIDEO_CODEC (enc); GstFormat src_fmt, dest_fmt; gint64 src_val, dest_val; gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - res = - gst_base_video_encoded_video_convert (&GST_BASE_VIDEO_CODEC - (enc)->state, src_fmt, src_val, &dest_fmt, &dest_val); + res = gst_base_video_encoded_video_convert (&codec->state, + codec->bytes, codec->time, src_fmt, src_val, &dest_fmt, &dest_val); if (!res) goto error; gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); @@ -336,11 +671,16 @@ gst_base_video_encoder_src_query (GstPad * pad, GstQuery * query) res = gst_pad_query (peerpad, query); if (res) { gst_query_parse_latency (query, &live, &min_latency, &max_latency); + GST_DEBUG_OBJECT (enc, "Peer latency: live %d, min %" + GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live, + GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency)); + GST_OBJECT_LOCK (enc); min_latency += enc->min_latency; if (max_latency != GST_CLOCK_TIME_NONE) { max_latency += enc->max_latency; } + GST_OBJECT_UNLOCK (enc); gst_query_set_latency (query, live, min_latency, max_latency); } @@ -348,7 +688,7 @@ gst_base_video_encoder_src_query (GstPad * pad, GstQuery * query) break; default: res = gst_pad_query_default (pad, query); - } + } gst_object_unref (peerpad); gst_object_unref (enc); return res; @@ -360,38 +700,35 @@ error: return res; } -static gboolean -gst_pad_is_negotiated (GstPad * pad) -{ - GstCaps *caps; - - g_return_val_if_fail (pad != NULL, FALSE); - - caps = gst_pad_get_negotiated_caps (pad); - if (caps) { - gst_caps_unref (caps); - return TRUE; - } - - return FALSE; -} - static GstFlowReturn gst_base_video_encoder_chain (GstPad * pad, GstBuffer * buf) { GstBaseVideoEncoder *base_video_encoder; GstBaseVideoEncoderClass *klass; GstVideoFrame *frame; - - if (!gst_pad_is_negotiated (pad)) { - return GST_FLOW_NOT_NEGOTIATED; - } + GstFlowReturn ret = GST_FLOW_OK; base_video_encoder = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad)); klass = GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder); + g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR); + + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder); + + if (!GST_PAD_CAPS (pad)) { + ret = GST_FLOW_NOT_NEGOTIATED; + goto done; + } + + GST_LOG_OBJECT (base_video_encoder, + "received buffer of size %d with ts %" GST_TIME_FORMAT + ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf), + GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), + GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); + if (base_video_encoder->a.at_eos) { - return GST_FLOW_UNEXPECTED; + ret = GST_FLOW_UNEXPECTED; + goto done; } if (base_video_encoder->sink_clipping) { @@ -402,30 +739,48 @@ gst_base_video_encoder_chain (GstPad * pad, GstBuffer * buf) if (!gst_segment_clip (&GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, GST_FORMAT_TIME, start, stop, &clip_start, &clip_stop)) { - GST_DEBUG ("clipping to segment dropped frame"); + GST_DEBUG_OBJECT (base_video_encoder, + "clipping to segment dropped frame"); goto done; } } + if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) { + GST_LOG_OBJECT (base_video_encoder, "marked discont"); + GST_BASE_VIDEO_CODEC (base_video_encoder)->discont = TRUE; + } + frame = gst_base_video_codec_new_frame (GST_BASE_VIDEO_CODEC (base_video_encoder)); + frame->events = base_video_encoder->current_frame_events; + base_video_encoder->current_frame_events = NULL; frame->sink_buffer = buf; frame->presentation_timestamp = GST_BUFFER_TIMESTAMP (buf); frame->presentation_duration = GST_BUFFER_DURATION (buf); frame->presentation_frame_number = base_video_encoder->presentation_frame_number; base_video_encoder->presentation_frame_number++; + frame->force_keyframe = base_video_encoder->force_keyframe; + base_video_encoder->force_keyframe = FALSE; GST_BASE_VIDEO_CODEC (base_video_encoder)->frames = g_list_append (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame); - klass->handle_frame (base_video_encoder, frame); + /* new data, more finish needed */ + base_video_encoder->drained = FALSE; + + GST_LOG_OBJECT (base_video_encoder, "passing frame pfn %d to subclass", + frame->presentation_frame_number); + + ret = klass->handle_frame (base_video_encoder, frame); done: + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder); + g_object_unref (base_video_encoder); - return GST_FLOW_OK; + return ret; } static GstStateChangeReturn @@ -440,6 +795,14 @@ gst_base_video_encoder_change_state (GstElement * element, base_video_encoder_class = GST_BASE_VIDEO_ENCODER_GET_CLASS (element); switch (transition) { + case GST_STATE_CHANGE_READY_TO_PAUSED: + gst_base_video_encoder_reset (base_video_encoder); + gst_base_video_encoder_reset (base_video_encoder); + if (base_video_encoder_class->start) { + if (!base_video_encoder_class->start (base_video_encoder)) + goto start_error; + } + break; default: break; } @@ -448,8 +811,10 @@ gst_base_video_encoder_change_state (GstElement * element, switch (transition) { case GST_STATE_CHANGE_PAUSED_TO_READY: + gst_base_video_encoder_reset (base_video_encoder); if (base_video_encoder_class->stop) { - base_video_encoder_class->stop (base_video_encoder); + if (!base_video_encoder_class->stop (base_video_encoder)) + goto stop_error; } break; default: @@ -457,19 +822,106 @@ gst_base_video_encoder_change_state (GstElement * element, } return ret; + +start_error: + GST_WARNING_OBJECT (base_video_encoder, "failed to start"); + return GST_STATE_CHANGE_FAILURE; + +stop_error: + GST_WARNING_OBJECT (base_video_encoder, "failed to stop"); + return GST_STATE_CHANGE_FAILURE; } +/** + * gst_base_video_encoder_finish_frame: + * @base_video_encoder: a #GstBaseVideoEncoder + * @frame: an encoded #GstVideoFrame + * + * @frame must have a valid encoded data buffer, whose metadata fields + * are then appropriately set according to frame data or no buffer at + * all if the frame should be dropped. + * It is subsequently pushed downstream or provided to @shape_output. + * In any case, the frame is considered finished and released. + * + * Returns: a #GstFlowReturn resulting from sending data downstream + */ GstFlowReturn gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame) { - GstFlowReturn ret; + GstFlowReturn ret = GST_FLOW_OK; GstBaseVideoEncoderClass *base_video_encoder_class; + GList *l; base_video_encoder_class = GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder); + GST_LOG_OBJECT (base_video_encoder, + "finish frame fpn %d", frame->presentation_frame_number); + + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder); + + /* Push all pending events that arrived before this frame */ + for (l = base_video_encoder->base_video_codec.frames; l; l = l->next) { + GstVideoFrame *tmp = l->data; + + if (tmp->events) { + GList *k; + + for (k = g_list_last (tmp->events); k; k = k->prev) + gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), + k->data); + g_list_free (tmp->events); + tmp->events = NULL; + } + + if (tmp == frame) + break; + } + + if (frame->force_keyframe) { + GstClockTime stream_time; + GstClockTime running_time; + GstEvent *ev; + + running_time = + gst_segment_to_running_time (&GST_BASE_VIDEO_CODEC + (base_video_encoder)->segment, GST_FORMAT_TIME, + frame->presentation_timestamp); + stream_time = + gst_segment_to_stream_time (&GST_BASE_VIDEO_CODEC + (base_video_encoder)->segment, GST_FORMAT_TIME, + frame->presentation_timestamp); + + /* re-use upstream event if any so it also conveys any additional + * info upstream arranged in there */ + GST_OBJECT_LOCK (base_video_encoder); + if (base_video_encoder->force_keyunit_event) { + ev = base_video_encoder->force_keyunit_event; + base_video_encoder->force_keyunit_event = NULL; + } else { + ev = gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, + gst_structure_new ("GstForceKeyUnit", NULL)); + } + GST_OBJECT_UNLOCK (base_video_encoder); + + gst_structure_set (ev->structure, + "timestamp", G_TYPE_UINT64, frame->presentation_timestamp, + "stream-time", G_TYPE_UINT64, stream_time, + "running-time", G_TYPE_UINT64, running_time, NULL); + + gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), ev); + } + + /* no buffer data means this frame is skipped/dropped */ + if (!frame->src_buffer) { + GST_DEBUG_OBJECT (base_video_encoder, "skipping frame %" GST_TIME_FORMAT, + GST_TIME_ARGS (frame->presentation_timestamp)); + goto done; + } + if (frame->is_sync_point) { + GST_LOG_OBJECT (base_video_encoder, "key frame"); base_video_encoder->distance_from_sync = 0; GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT); } else { @@ -492,49 +944,25 @@ gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder, GST_BUFFER_DURATION (frame->src_buffer) = frame->presentation_duration; GST_BUFFER_OFFSET (frame->src_buffer) = frame->decode_timestamp; - GST_BASE_VIDEO_CODEC (base_video_encoder)->frames = - g_list_remove (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame); + /* update rate estimate */ + GST_BASE_VIDEO_CODEC (base_video_encoder)->bytes += + GST_BUFFER_SIZE (frame->src_buffer); + if (GST_CLOCK_TIME_IS_VALID (frame->presentation_duration)) { + GST_BASE_VIDEO_CODEC (base_video_encoder)->time += + frame->presentation_duration; + } else { + /* better none than nothing valid */ + GST_BASE_VIDEO_CODEC (base_video_encoder)->time = GST_CLOCK_TIME_NONE; + } - if (!base_video_encoder->set_output_caps) { - if (base_video_encoder_class->get_caps) { - GST_BASE_VIDEO_CODEC (base_video_encoder)->caps = - base_video_encoder_class->get_caps (base_video_encoder); - } else { - GST_BASE_VIDEO_CODEC (base_video_encoder)->caps = - gst_caps_new_simple ("video/unknown", NULL); - } - gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), - GST_BASE_VIDEO_CODEC (base_video_encoder)->caps); - base_video_encoder->set_output_caps = TRUE; + if (G_UNLIKELY (GST_BASE_VIDEO_CODEC (base_video_encoder)->discont)) { + GST_LOG_OBJECT (base_video_encoder, "marking discont"); + GST_BUFFER_FLAG_SET (frame->src_buffer, GST_BUFFER_FLAG_DISCONT); + GST_BASE_VIDEO_CODEC (base_video_encoder)->discont = FALSE; } gst_buffer_set_caps (GST_BUFFER (frame->src_buffer), - GST_BASE_VIDEO_CODEC (base_video_encoder)->caps); - - if (frame->force_keyframe) { - GstClockTime stream_time; - GstClockTime running_time; - GstStructure *s; - - running_time = - gst_segment_to_running_time (&GST_BASE_VIDEO_CODEC - (base_video_encoder)->segment, GST_FORMAT_TIME, - frame->presentation_timestamp); - stream_time = - gst_segment_to_stream_time (&GST_BASE_VIDEO_CODEC - (base_video_encoder)->segment, GST_FORMAT_TIME, - frame->presentation_timestamp); - - /* FIXME this should send the event that we got on the sink pad - instead of creating a new one */ - s = gst_structure_new ("GstForceKeyUnit", - "timestamp", G_TYPE_UINT64, frame->presentation_timestamp, - "stream-time", G_TYPE_UINT64, stream_time, - "running-time", G_TYPE_UINT64, running_time, NULL); - - gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), - gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, s)); - } + GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder))); if (base_video_encoder_class->shape_output) { ret = base_video_encoder_class->shape_output (base_video_encoder, frame); @@ -543,43 +971,40 @@ gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder, gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), frame->src_buffer); } + frame->src_buffer = NULL; + +done: + /* handed out */ + GST_BASE_VIDEO_CODEC (base_video_encoder)->frames = + g_list_remove (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame); gst_base_video_codec_free_frame (frame); - return ret; -} + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder); -int -gst_base_video_encoder_get_height (GstBaseVideoEncoder * base_video_encoder) -{ - return GST_BASE_VIDEO_CODEC (base_video_encoder)->state.height; -} - -int -gst_base_video_encoder_get_width (GstBaseVideoEncoder * base_video_encoder) -{ - return GST_BASE_VIDEO_CODEC (base_video_encoder)->state.width; + return ret; } +/** + * gst_base_video_encoder_get_state: + * @base_video_encoder: a #GstBaseVideoEncoder + * + * Returns: #GstVideoState describing format of video data. + */ const GstVideoState * gst_base_video_encoder_get_state (GstBaseVideoEncoder * base_video_encoder) { return &GST_BASE_VIDEO_CODEC (base_video_encoder)->state; } -GstFlowReturn -gst_base_video_encoder_end_of_stream (GstBaseVideoEncoder * base_video_encoder, - GstBuffer * buffer) -{ - - if (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames) { - GST_WARNING ("EOS with frames left over"); - } - - return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), - buffer); -} - +/** + * gst_base_video_encoder_set_latency: + * @base_video_encoder: a #GstBaseVideoEncoder + * @min_latency: minimum latency + * @max_latency: maximum latency + * + * Informs baseclass of encoding latency. + */ void gst_base_video_encoder_set_latency (GstBaseVideoEncoder * base_video_encoder, GstClockTime min_latency, GstClockTime max_latency) @@ -587,19 +1012,33 @@ gst_base_video_encoder_set_latency (GstBaseVideoEncoder * base_video_encoder, g_return_if_fail (min_latency >= 0); g_return_if_fail (max_latency >= min_latency); + GST_OBJECT_LOCK (base_video_encoder); base_video_encoder->min_latency = min_latency; base_video_encoder->max_latency = max_latency; + GST_OBJECT_UNLOCK (base_video_encoder); gst_element_post_message (GST_ELEMENT_CAST (base_video_encoder), gst_message_new_latency (GST_OBJECT_CAST (base_video_encoder))); } +/** + * gst_base_video_encoder_set_latency_fields: + * @base_video_encoder: a #GstBaseVideoEncoder + * @fields: latency in fields + * + * Informs baseclass of encoding latency in terms of fields (both min + * and max latency). + */ void gst_base_video_encoder_set_latency_fields (GstBaseVideoEncoder * base_video_encoder, int n_fields) { gint64 latency; + /* 0 numerator is used for "don't know" */ + if (GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_n == 0) + return; + latency = gst_util_uint64_scale (n_fields, GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_d * GST_SECOND, 2 * GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_n); @@ -608,15 +1047,26 @@ gst_base_video_encoder_set_latency_fields (GstBaseVideoEncoder * } +/** + * gst_base_video_encoder_get_oldest_frame: + * @base_video_encoder: a #GstBaseVideoEncoder + * + * Returns: oldest unfinished pending #GstVideoFrame + */ GstVideoFrame * gst_base_video_encoder_get_oldest_frame (GstBaseVideoEncoder * base_video_encoder) { GList *g; + GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder); g = g_list_first (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames); + GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder); if (g == NULL) return NULL; return (GstVideoFrame *) (g->data); } + +/* FIXME there could probably be more of these; + * get by presentation_number, by presentation_time ? */ diff --git a/gst-libs/gst/video/gstbasevideoencoder.h b/gst-libs/gst/video/gstbasevideoencoder.h index 228c517..c712fe8 100644 --- a/gst-libs/gst/video/gstbasevideoencoder.h +++ b/gst-libs/gst/video/gstbasevideoencoder.h @@ -1,5 +1,8 @@ /* GStreamer * Copyright (C) 2008 David Schleef <ds@schleef.org> + * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>. + * Copyright (C) 2011 Nokia Corporation. All rights reserved. + * Contact: Stefan Kost <stefan.kost@nokia.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public @@ -55,72 +58,126 @@ G_BEGIN_DECLS */ #define GST_BASE_VIDEO_ENCODER_SRC_NAME "src" +/** + * GST_BASE_VIDEO_ENCODER_FLOW_DROPPED: + * + * Returned when the event/buffer should be dropped. + */ +#define GST_BASE_VIDEO_ENCODER_FLOW_DROPPED GST_FLOW_CUSTOM_SUCCESS_1 typedef struct _GstBaseVideoEncoder GstBaseVideoEncoder; typedef struct _GstBaseVideoEncoderClass GstBaseVideoEncoderClass; +/** + * GstBaseVideoEncoder: + * @element: the parent element. + * + * The opaque #GstBaseVideoEncoder data structure. + */ struct _GstBaseVideoEncoder { GstBaseVideoCodec base_video_codec; - /*< private >*/ - gboolean sink_clipping; + /*< protected >*/ + gboolean sink_clipping; + + guint64 presentation_frame_number; + int distance_from_sync; + + gboolean force_keyframe; - guint64 presentation_frame_number; - int distance_from_sync; + /*< private >*/ + /* FIXME move to real private part ? + * (and introduce a context ?) */ + gboolean drained; - gboolean set_output_caps; + gint64 min_latency; + gint64 max_latency; - gint64 min_latency; - gint64 max_latency; + GstEvent *force_keyunit_event; + GList *current_frame_events; - gboolean force_keyframe; union { void *padding; gboolean at_eos; } a; /* FIXME before moving to base */ - void *padding[GST_PADDING_LARGE-1]; + void *padding[GST_PADDING_LARGE-1]; }; +/** + * GstBaseVideoEncoderClass: + * @start: Optional. + * Called when the element starts processing. + * Allows opening external resources. + * @stop: Optional. + * Called when the element stops processing. + * Allows closing external resources. + * @set_format: Optional. + * Notifies subclass of incoming data format. + * GstVideoState fields have already been + * set according to provided caps. + * @handle_frame: Provides input frame to subclass. + * @finish: Optional. + * Called to request subclass to dispatch any pending remaining + * data (e.g. at EOS). + * @shape_output: Optional. + * Allows subclass to push frame downstream in whatever + * shape or form it deems appropriate. If not provided, + * provided encoded frame data is simply pushed downstream. + * @event: Optional. + * Event handler on the sink pad. This function should return + * TRUE if the event was handled and should be discarded + * (i.e. not unref'ed). + * + * Subclasses can override any of the available virtual methods or not, as + * needed. At minimum @handle_frame needs to be overridden, and @set_format + * and @get_caps are likely needed as well. + */ struct _GstBaseVideoEncoderClass { - GstBaseVideoCodecClass base_video_codec_class; + GstBaseVideoCodecClass base_video_codec_class; - gboolean (*set_format) (GstBaseVideoEncoder *coder, GstVideoState *state); - gboolean (*start) (GstBaseVideoEncoder *coder); - gboolean (*stop) (GstBaseVideoEncoder *coder); - gboolean (*finish) (GstBaseVideoEncoder *coder); - gboolean (*handle_frame) (GstBaseVideoEncoder *coder, GstVideoFrame *frame); - GstFlowReturn (*shape_output) (GstBaseVideoEncoder *coder, GstVideoFrame *frame); - GstCaps *(*get_caps) (GstBaseVideoEncoder *coder); + /*< public >*/ + /* virtual methods for subclasses */ - /* FIXME before moving to base */ - void *padding[GST_PADDING_LARGE]; -}; + gboolean (*start) (GstBaseVideoEncoder *coder); + + gboolean (*stop) (GstBaseVideoEncoder *coder); + + gboolean (*set_format) (GstBaseVideoEncoder *coder, + GstVideoState *state); + + GstFlowReturn (*handle_frame) (GstBaseVideoEncoder *coder, + GstVideoFrame *frame); -GType gst_base_video_encoder_get_type (void); + gboolean (*reset) (GstBaseVideoEncoder *coder); + GstFlowReturn (*finish) (GstBaseVideoEncoder *coder); -int gst_base_video_encoder_get_width (GstBaseVideoEncoder *coder); -int gst_base_video_encoder_get_height (GstBaseVideoEncoder *coder); -const GstVideoState *gst_base_video_encoder_get_state (GstBaseVideoEncoder *coder); + GstFlowReturn (*shape_output) (GstBaseVideoEncoder *coder, + GstVideoFrame *frame); + + gboolean (*event) (GstBaseVideoEncoder *coder, + GstEvent *event); + + /*< private >*/ + /* FIXME before moving to base */ + gpointer _gst_reserved[GST_PADDING_LARGE]; +}; -guint64 gst_base_video_encoder_get_timestamp_offset (GstBaseVideoEncoder *coder); +GType gst_base_video_encoder_get_type (void); -GstVideoFrame *gst_base_video_encoder_get_frame (GstBaseVideoEncoder *coder, - int frame_number); -GstVideoFrame *gst_base_video_encoder_get_oldest_frame (GstBaseVideoEncoder *coder); -GstFlowReturn gst_base_video_encoder_finish_frame (GstBaseVideoEncoder *base_video_encoder, - GstVideoFrame *frame); -GstFlowReturn gst_base_video_encoder_end_of_stream (GstBaseVideoEncoder *base_video_encoder, - GstBuffer *buffer); +const GstVideoState* gst_base_video_encoder_get_state (GstBaseVideoEncoder *coder); -void gst_base_video_encoder_set_latency (GstBaseVideoEncoder *base_video_encoder, - GstClockTime min_latency, GstClockTime max_latency); -void gst_base_video_encoder_set_latency_fields (GstBaseVideoEncoder *base_video_encoder, - int n_fields); +GstVideoFrame* gst_base_video_encoder_get_oldest_frame (GstBaseVideoEncoder *coder); +GstFlowReturn gst_base_video_encoder_finish_frame (GstBaseVideoEncoder *base_video_encoder, + GstVideoFrame *frame); +void gst_base_video_encoder_set_latency (GstBaseVideoEncoder *base_video_encoder, + GstClockTime min_latency, GstClockTime max_latency); +void gst_base_video_encoder_set_latency_fields (GstBaseVideoEncoder *base_video_encoder, + int n_fields); G_END_DECLS diff --git a/gst-libs/gst/video/gstbasevideoutils.c b/gst-libs/gst/video/gstbasevideoutils.c index d706394..507ad07 100644 --- a/gst-libs/gst/video/gstbasevideoutils.c +++ b/gst-libs/gst/video/gstbasevideoutils.c @@ -21,7 +21,7 @@ #include "config.h" #endif -#include "gstbasevideocodec.h" +#include "gstbasevideoutils.h" #include <string.h> @@ -29,21 +29,6 @@ GST_DEBUG_CATEGORY_EXTERN (basevideocodec_debug); #define GST_CAT_DEFAULT basevideocodec_debug -#if 0 -guint64 -gst_base_video_convert_bytes_to_frames (GstVideoState * state, guint64 bytes) -{ - return gst_util_uint64_scale_int (bytes, 1, state->bytes_per_picture); -} - -guint64 -gst_base_video_convert_frames_to_bytes (GstVideoState * state, guint64 frames) -{ - return frames * state->bytes_per_picture; -} -#endif - - gboolean gst_base_video_rawvideo_convert (GstVideoState * state, GstFormat src_format, gint64 src_value, @@ -51,7 +36,10 @@ gst_base_video_rawvideo_convert (GstVideoState * state, { gboolean res = FALSE; - if (src_format == *dest_format) { + g_return_val_if_fail (dest_format != NULL, FALSE); + g_return_val_if_fail (dest_value != NULL, FALSE); + + if (src_format == *dest_format || src_value == 0 || src_value == -1) { *dest_value = src_value; return TRUE; } @@ -81,43 +69,77 @@ gst_base_video_rawvideo_convert (GstVideoState * state, *dest_value = gst_util_uint64_scale (src_value, state->fps_n, GST_SECOND * state->fps_d); res = TRUE; + } else if (src_format == GST_FORMAT_TIME && + *dest_format == GST_FORMAT_BYTES && state->fps_d != 0 && + state->bytes_per_picture != 0) { + /* convert time to frames */ + /* FIXME subtract segment time? */ + *dest_value = gst_util_uint64_scale (src_value, + state->fps_n * state->bytes_per_picture, GST_SECOND * state->fps_d); + res = TRUE; + } else if (src_format == GST_FORMAT_BYTES && + *dest_format == GST_FORMAT_TIME && state->fps_n != 0 && + state->bytes_per_picture != 0) { + /* convert frames to time */ + /* FIXME add segment time? */ + *dest_value = gst_util_uint64_scale (src_value, + GST_SECOND * state->fps_d, state->fps_n * state->bytes_per_picture); + res = TRUE; } - /* FIXME add bytes <--> time */ - return res; } gboolean gst_base_video_encoded_video_convert (GstVideoState * state, - GstFormat src_format, gint64 src_value, - GstFormat * dest_format, gint64 * dest_value) + gint64 bytes, gint64 time, GstFormat src_format, + gint64 src_value, GstFormat * dest_format, gint64 * dest_value) { gboolean res = FALSE; - if (src_format == *dest_format) { - *dest_value = src_value; + g_return_val_if_fail (dest_format != NULL, FALSE); + g_return_val_if_fail (dest_value != NULL, FALSE); + + if (G_UNLIKELY (src_format == *dest_format || src_value == 0 || + src_value == -1)) { + if (dest_value) + *dest_value = src_value; return TRUE; } - GST_DEBUG ("src convert"); + if (bytes <= 0 || time <= 0) { + GST_DEBUG ("not enough metadata yet to convert"); + goto exit; + } -#if 0 - if (src_format == GST_FORMAT_DEFAULT && *dest_format == GST_FORMAT_TIME) { - if (dec->fps_d != 0) { - *dest_value = gst_util_uint64_scale (granulepos_to_frame (src_value), - dec->fps_d * GST_SECOND, dec->fps_n); - res = TRUE; - } else { + switch (src_format) { + case GST_FORMAT_BYTES: + switch (*dest_format) { + case GST_FORMAT_TIME: + *dest_value = gst_util_uint64_scale (src_value, time, bytes); + res = TRUE; + break; + default: + res = FALSE; + } + break; + case GST_FORMAT_TIME: + switch (*dest_format) { + case GST_FORMAT_BYTES: + *dest_value = gst_util_uint64_scale (src_value, bytes, time); + res = TRUE; + break; + default: + res = FALSE; + } + break; + default: + GST_DEBUG ("unhandled conversion from %d to %d", src_format, + *dest_format); res = FALSE; - } - } else { - GST_WARNING ("unhandled conversion from %d to %d", src_format, - *dest_format); - res = FALSE; } -#endif +exit: return res; } diff --git a/gst-libs/gst/video/gstbasevideoutils.h b/gst-libs/gst/video/gstbasevideoutils.h new file mode 100644 index 0000000..aeca2d1 --- /dev/null +++ b/gst-libs/gst/video/gstbasevideoutils.h @@ -0,0 +1,46 @@ +/* GStreamer + * Copyright (C) 2008 David Schleef <ds@schleef.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifndef _GST_BASE_VIDEO_UTILS_H_ +#define _GST_BASE_VIDEO_UTILS_H_ + +#ifndef GST_USE_UNSTABLE_API +#warning "GstBaseVideoCodec is unstable API and may change in future." +#warning "You can define GST_USE_UNSTABLE_API to avoid this warning." +#endif + +#include <gst/gst.h> +#include <gst/video/video.h> +#include "gstbasevideocodec.h" + +G_BEGIN_DECLS + +gboolean gst_base_video_rawvideo_convert (GstVideoState *state, + GstFormat src_format, gint64 src_value, + GstFormat * dest_format, gint64 *dest_value); +gboolean gst_base_video_encoded_video_convert (GstVideoState * state, + gint64 bytes, gint64 time, GstFormat src_format, + gint64 src_value, GstFormat * dest_format, gint64 * dest_value); + +GstClockTime gst_video_state_get_timestamp (const GstVideoState *state, + GstSegment *segment, int frame_number); + +G_END_DECLS + +#endif diff --git a/gst/camerabin2/Makefile.am b/gst/camerabin2/Makefile.am index 400641c..89686a6 100644 --- a/gst/camerabin2/Makefile.am +++ b/gst/camerabin2/Makefile.am @@ -1,9 +1,9 @@ plugin_LTLIBRARIES = libgstcamerabin2.la libgstcamerabin2_la_SOURCES = gstviewfinderbin.c \ - gstimagecapturebin.c \ camerabingeneral.c \ gstwrappercamerabinsrc.c \ + gstomxcamerabinsrc.c \ gstcamerabin2.c \ gstplugin.c @@ -23,9 +23,9 @@ libgstcamerabin2_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS) libgstcamerabin2_la_LIBTOOLFLAGS = --tag=disable-static noinst_HEADERS = gstviewfinderbin.h \ - gstimagecapturebin.h \ camerabingeneral.h \ gstwrappercamerabinsrc.h \ + gstomxcamerabinsrc.h \ gstcamerabin2.h Android.mk: Makefile.am $(BUILT_SOURCES) diff --git a/gst/camerabin2/gstcamerabin2.c b/gst/camerabin2/gstcamerabin2.c index 315a37e..82961d9 100644 --- a/gst/camerabin2/gstcamerabin2.c +++ b/gst/camerabin2/gstcamerabin2.c @@ -110,8 +110,7 @@ enum PROP_AUDIO_CAPTURE_CAPS, PROP_ZOOM, PROP_MAX_ZOOM, - PROP_IMAGE_CAPTURE_ENCODER, - PROP_IMAGE_CAPTURE_MUXER, + PROP_IMAGE_ENCODING_PROFILE, PROP_IDLE }; @@ -147,6 +146,8 @@ static void gst_camera_bin_dispose (GObject * object); static void gst_camera_bin_finalize (GObject * object); static void gst_camera_bin_handle_message (GstBin * bin, GstMessage * message); +static gboolean gst_camera_bin_send_event (GstElement * element, + GstEvent * event); GType gst_camera_bin_get_type (void) @@ -211,6 +212,11 @@ gst_camera_bin_start_capture (GstCameraBin * camerabin) GST_DEBUG_OBJECT (camerabin, "Received start-capture"); GST_CAMERA_BIN_PROCESSING_INC (camerabin); + if (camerabin->mode == MODE_IMAGE) { + g_timer_start (camerabin->shot_to_save_timer); + + } + taglist = gst_tag_setter_get_tag_list (GST_TAG_SETTER (camerabin)); if (taglist) { GstPad *active_pad; @@ -232,17 +238,26 @@ gst_camera_bin_start_capture (GstCameraBin * camerabin) } if (camerabin->mode == MODE_VIDEO && camerabin->audio_src) { + GstClock *clock = gst_pipeline_get_clock (GST_PIPELINE_CAST (camerabin)); + + camerabin->audio_send_newseg = TRUE; gst_element_set_state (camerabin->audio_src, GST_STATE_READY); /* need to reset eos status (pads could be flushing) */ gst_element_set_state (camerabin->audio_queue, GST_STATE_READY); - gst_element_set_state (camerabin->audio_convert, GST_STATE_READY); gst_element_set_state (camerabin->audio_capsfilter, GST_STATE_READY); gst_element_set_state (camerabin->audio_volume, GST_STATE_READY); gst_element_sync_state_with_parent (camerabin->audio_queue); - gst_element_sync_state_with_parent (camerabin->audio_convert); gst_element_sync_state_with_parent (camerabin->audio_capsfilter); gst_element_sync_state_with_parent (camerabin->audio_volume); + gst_element_set_state (camerabin->audio_src, GST_STATE_PAUSED); + + gst_element_set_base_time (camerabin->audio_src, + gst_element_get_base_time (GST_ELEMENT_CAST (camerabin))); + if (clock) { + gst_element_set_clock (camerabin->audio_src, clock); + gst_object_unref (clock); + } } g_signal_emit_by_name (camerabin->src, "start-capture", NULL); @@ -258,6 +273,7 @@ gst_camera_bin_stop_capture (GstCameraBin * camerabin) g_signal_emit_by_name (camerabin->src, "stop-capture", NULL); if (camerabin->mode == MODE_VIDEO && camerabin->audio_src) { + camerabin->audio_drop_eos = FALSE; gst_element_send_event (camerabin->audio_src, gst_event_new_eos ()); } } @@ -284,27 +300,40 @@ gst_camera_bin_src_notify_readyforcapture (GObject * obj, GParamSpec * pspec, GstCameraBin *camera = GST_CAMERA_BIN_CAST (user_data); gboolean ready; - if (camera->mode == MODE_VIDEO) { - g_object_get (camera->src, "ready-for-capture", &ready, NULL); - if (!ready) { - gchar *location; + g_object_get (camera->src, "ready-for-capture", &ready, NULL); + if (!ready) { + gchar *location = NULL; + if (camera->mode == MODE_VIDEO) { /* a video recording is about to start, we reset the videobin to clear eos/flushing state * also need to clean the queue ! capsfilter before it */ - gst_element_set_state (camera->encodebin, GST_STATE_NULL); gst_element_set_state (camera->videosink, GST_STATE_NULL); - gst_element_set_state (camera->videobin_queue, GST_STATE_NULL); + gst_element_set_state (camera->video_encodebin, GST_STATE_NULL); gst_element_set_state (camera->videobin_capsfilter, GST_STATE_NULL); + gst_element_set_state (camera->videobin_queue, GST_STATE_NULL); location = g_strdup_printf (camera->video_location, camera->video_index++); GST_DEBUG_OBJECT (camera, "Switching videobin location to %s", location); g_object_set (camera->videosink, "location", location, NULL); g_free (location); - gst_element_set_state (camera->encodebin, GST_STATE_PLAYING); gst_element_set_state (camera->videosink, GST_STATE_PLAYING); + gst_element_set_state (camera->video_encodebin, GST_STATE_PLAYING); gst_element_set_state (camera->videobin_capsfilter, GST_STATE_PLAYING); gst_element_set_state (camera->videobin_queue, GST_STATE_PLAYING); + } else if (camera->mode == MODE_IMAGE) { + gst_element_set_state (camera->image_encodebin, GST_STATE_NULL); + gst_element_set_state (camera->imagesink, GST_STATE_NULL); + gst_element_set_state (camera->imagebin_queue, GST_STATE_NULL); + gst_element_set_state (camera->imagebin_capsfilter, GST_STATE_NULL); + GST_DEBUG_OBJECT (camera, "Switching imagebin location to %s", location); + g_object_set (camera->imagesink, "location", camera->image_location, + NULL); + gst_element_set_state (camera->image_encodebin, GST_STATE_PLAYING); + gst_element_set_state (camera->imagesink, GST_STATE_PLAYING); + gst_element_set_state (camera->imagebin_capsfilter, GST_STATE_PLAYING); + gst_element_set_state (camera->imagebin_queue, GST_STATE_PLAYING); } + } } @@ -333,8 +362,6 @@ gst_camera_bin_dispose (GObject * object) gst_object_unref (camerabin->audio_capsfilter); if (camerabin->audio_queue) gst_object_unref (camerabin->audio_queue); - if (camerabin->audio_convert) - gst_object_unref (camerabin->audio_convert); if (camerabin->audio_volume) gst_object_unref (camerabin->audio_volume); @@ -345,9 +372,9 @@ gst_camera_bin_dispose (GObject * object) if (camerabin->viewfinderbin_capsfilter) gst_object_unref (camerabin->viewfinderbin_capsfilter); - if (camerabin->encodebin_signal_id) - g_signal_handler_disconnect (camerabin->encodebin, - camerabin->encodebin_signal_id); + if (camerabin->video_encodebin_signal_id) + g_signal_handler_disconnect (camerabin->video_encodebin, + camerabin->video_encodebin_signal_id); if (camerabin->videosink_probe) { GstPad *pad = gst_element_get_static_pad (camerabin->videosink, "sink"); @@ -357,15 +384,20 @@ gst_camera_bin_dispose (GObject * object) if (camerabin->videosink) gst_object_unref (camerabin->videosink); - if (camerabin->encodebin) - gst_object_unref (camerabin->encodebin); + if (camerabin->video_encodebin) + gst_object_unref (camerabin->video_encodebin); if (camerabin->videobin_queue) gst_object_unref (camerabin->videobin_queue); if (camerabin->videobin_capsfilter) gst_object_unref (camerabin->videobin_capsfilter); - if (camerabin->imagebin) - gst_object_unref (camerabin->imagebin); + if (camerabin->image_encodebin_signal_id) + g_signal_handler_disconnect (camerabin->image_encodebin, + camerabin->image_encodebin_signal_id); + if (camerabin->imagesink) + gst_object_unref (camerabin->imagesink); + if (camerabin->image_encodebin) + gst_object_unref (camerabin->image_encodebin); if (camerabin->imagebin_queue) gst_object_unref (camerabin->imagebin_queue); if (camerabin->imagebin_capsfilter) @@ -387,6 +419,8 @@ gst_camera_bin_dispose (GObject * object) if (camerabin->video_profile) gst_encoding_profile_unref (camerabin->video_profile); + if (camerabin->image_profile) + gst_encoding_profile_unref (camerabin->image_profile); if (camerabin->preview_caps) gst_caps_replace (&camerabin->preview_caps, NULL); @@ -395,6 +429,8 @@ gst_camera_bin_dispose (GObject * object) camerabin->preview_filter = NULL; } + g_timer_destroy (camerabin->shot_to_save_timer); + G_OBJECT_CLASS (parent_class)->dispose (object); } @@ -432,6 +468,7 @@ gst_camera_bin_class_init (GstCameraBinClass * klass) object_class->get_property = gst_camera_bin_get_property; element_class->change_state = GST_DEBUG_FUNCPTR (gst_camera_bin_change_state); + element_class->send_event = GST_DEBUG_FUNCPTR (gst_camera_bin_send_event); bin_class->handle_message = gst_camera_bin_handle_message; @@ -591,15 +628,11 @@ gst_camera_bin_class_init (GstCameraBinClass * klass) * it autoplugs a videorate that ony starts outputing buffers after * getting the 2nd buffer. */ - g_object_class_install_property (object_class, PROP_IMAGE_CAPTURE_ENCODER, - g_param_spec_object ("image-capture-encoder", "Image capture encoder", - "The image encoder element to be used on image captures.", - GST_TYPE_ELEMENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - - g_object_class_install_property (object_class, PROP_IMAGE_CAPTURE_MUXER, - g_param_spec_object ("image-capture-muxer", "Image capture encoder", - "The image encoder element to be used on image captures.", - GST_TYPE_ELEMENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + g_object_class_install_property (object_class, PROP_IMAGE_ENCODING_PROFILE, + gst_param_spec_mini_object ("image-profile", "Image Profile", + "The GstEncodingProfile to use for image captures.", + GST_TYPE_ENCODING_PROFILE, + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (object_class, PROP_IDLE, g_param_spec_boolean ("idle", "Idle", @@ -654,7 +687,6 @@ gst_camera_bin_init (GstCameraBin * camera) camera->video_location = g_strdup (DEFAULT_VID_LOCATION); camera->image_location = g_strdup (DEFAULT_IMG_LOCATION); camera->viewfinderbin = gst_element_factory_make ("viewfinderbin", "vf-bin"); - camera->imagebin = gst_element_factory_make ("imagecapturebin", "imagebin"); camera->zoom = DEFAULT_ZOOM; camera->max_zoom = MAX_ZOOM; @@ -678,6 +710,8 @@ gst_camera_bin_init (GstCameraBin * camera) camera->audio_capsfilter = gst_element_factory_make ("capsfilter", "audio-capsfilter"); camera->audio_volume = gst_element_factory_make ("volume", "audio-volume"); + + camera->shot_to_save_timer = g_timer_new (); } static void @@ -688,6 +722,9 @@ gst_image_capture_bin_post_image_done (GstCameraBin * camera, g_return_if_fail (filename != NULL); + printf ("*** Shot-to-save delay: %lf\n", + g_timer_elapsed (camera->shot_to_save_timer, NULL)); + msg = gst_message_new_element (GST_OBJECT_CAST (camera), gst_structure_new ("image-done", "filename", G_TYPE_STRING, filename, NULL)); @@ -799,12 +836,12 @@ encodebin_element_added (GstElement * encodebin, GstElement * new_element, #define VIDEO_PAD 1 #define AUDIO_PAD 2 static GstPad * -encodebin_find_pad (GstCameraBin * camera, gint pad_type) +encodebin_find_pad (GstCameraBin * camera, GstElement * encodebin, + gint pad_type) { GstPad *pad = NULL; GstIterator *iter; gboolean done; - GstElement *encodebin = camera->encodebin; GST_DEBUG_OBJECT (camera, "Looking at encodebin pads, searching for %s pad", VIDEO_PAD ? "video" : "audio"); @@ -887,15 +924,15 @@ gst_camera_bin_video_profile_has_audio (GstCameraBin * camera) } static GstPadLinkReturn -gst_camera_bin_link_encodebin (GstCameraBin * camera, GstElement * element, - gint padtype) +gst_camera_bin_link_encodebin (GstCameraBin * camera, GstElement * encodebin, + GstElement * element, gint padtype) { GstPadLinkReturn ret; GstPad *srcpad; GstPad *sinkpad = NULL; srcpad = gst_element_get_static_pad (element, "src"); - sinkpad = encodebin_find_pad (camera, padtype); + sinkpad = encodebin_find_pad (camera, encodebin, padtype); g_assert (srcpad != NULL); g_assert (sinkpad != NULL); @@ -918,6 +955,53 @@ gst_camera_bin_src_notify_max_zoom_cb (GObject * self, GParamSpec * pspec, g_object_notify (G_OBJECT (camera), "max-zoom"); } +static gboolean +gst_camera_bin_audio_src_data_probe (GstPad * pad, GstMiniObject * obj, + gpointer data) +{ + GstCameraBin *camera = data; + gboolean ret = TRUE; + + if (GST_IS_BUFFER (obj)) { + if (G_UNLIKELY (camera->audio_send_newseg)) { + GstBuffer *buf = GST_BUFFER_CAST (obj); + GstClockTime ts = GST_BUFFER_TIMESTAMP (buf); + GstPad *peer; + + if (!GST_CLOCK_TIME_IS_VALID (ts)) { + ts = 0; + } + + peer = gst_pad_get_peer (pad); + g_return_val_if_fail (peer != NULL, TRUE); + + gst_pad_send_event (peer, gst_event_new_new_segment (FALSE, 1.0, + GST_FORMAT_TIME, ts, -1, 0)); + + gst_object_unref (peer); + + camera->audio_send_newseg = FALSE; + } + } else { + GstEvent *event = GST_EVENT_CAST (obj); + if (GST_EVENT_TYPE (event) == GST_EVENT_EOS) { + /* we only let an EOS pass when the user is stopping a capture */ + if (camera->audio_drop_eos) { + ret = FALSE; + } else { + camera->audio_drop_eos = TRUE; + /* should already be false, but reinforce in case no buffers get + * pushed */ + camera->audio_send_newseg = FALSE; + } + } else if (GST_EVENT_TYPE (event) == GST_EVENT_NEWSEGMENT) { + ret = FALSE; + } + } + + return ret; +} + /** * gst_camera_bin_create_elements: * @param camera: the #GstCameraBin @@ -942,9 +1026,10 @@ gst_camera_bin_create_elements (GstCameraBin * camera) /* TODO check that elements created in _init were really created */ /* TODO add proper missing plugin error handling */ - camera->encodebin = gst_element_factory_make ("encodebin", NULL); - camera->encodebin_signal_id = g_signal_connect (camera->encodebin, - "element-added", (GCallback) encodebin_element_added, camera); + camera->video_encodebin = gst_element_factory_make ("encodebin", NULL); + camera->video_encodebin_signal_id = + g_signal_connect (camera->video_encodebin, "element-added", + (GCallback) encodebin_element_added, camera); camera->videosink = gst_element_factory_make ("filesink", "videobin-filesink"); @@ -952,36 +1037,61 @@ gst_camera_bin_create_elements (GstCameraBin * camera) /* audio elements */ camera->audio_queue = gst_element_factory_make ("queue", "audio-queue"); - camera->audio_convert = gst_element_factory_make ("audioconvert", - "audio-convert"); if (camera->video_profile == NULL) { GstEncodingContainerProfile *prof; + GstEncodingVideoProfile *video_prof; GstCaps *caps; - caps = gst_caps_new_simple ("application/ogg", NULL); - prof = gst_encoding_container_profile_new ("ogg", "theora+vorbis+ogg", + caps = gst_caps_new_simple ("video/quicktime", NULL); + prof = gst_encoding_container_profile_new ("mp4", "h264+isomp4", caps, NULL); gst_caps_unref (caps); - caps = gst_caps_new_simple ("video/x-theora", NULL); + caps = gst_caps_new_simple ("video/x-h264", NULL); + video_prof = gst_encoding_video_profile_new (caps, NULL, NULL, 1); + gst_encoding_video_profile_set_variableframerate (video_prof, TRUE); if (!gst_encoding_container_profile_add_profile (prof, - (GstEncodingProfile *) gst_encoding_video_profile_new (caps, - NULL, NULL, 1))) { + (GstEncodingProfile *) video_prof)) { GST_WARNING_OBJECT (camera, "Failed to create encoding profiles"); } gst_caps_unref (caps); - caps = gst_caps_new_simple ("audio/x-vorbis", NULL); + caps = + gst_caps_new_simple ("audio/mpeg", "mpegversion", G_TYPE_INT, 4, + NULL); if (!gst_encoding_container_profile_add_profile (prof, - (GstEncodingProfile *) gst_encoding_audio_profile_new (caps, - NULL, NULL, 1))) { + (GstEncodingProfile *) gst_encoding_audio_profile_new (caps, NULL, + NULL, 1))) { GST_WARNING_OBJECT (camera, "Failed to create encoding profiles"); } gst_caps_unref (caps); camera->video_profile = (GstEncodingProfile *) prof; - camera->profile_switch = TRUE; + camera->video_profile_switch = TRUE; + } + + camera->image_encodebin = gst_element_factory_make ("encodebin", NULL); + camera->image_encodebin_signal_id = + g_signal_connect (camera->image_encodebin, "element-added", + (GCallback) encodebin_element_added, camera); + + camera->imagesink = + gst_element_factory_make ("multifilesink", "imagebin-filesink"); + g_object_set (camera->imagesink, "async", FALSE, "post-messages", TRUE, + NULL); + + if (camera->image_profile == NULL) { + GstEncodingVideoProfile *prof; + GstCaps *caps; + + caps = gst_caps_new_simple ("image/jpeg", NULL); + prof = gst_encoding_video_profile_new (caps, NULL, NULL, 1); + gst_encoding_video_profile_set_variableframerate (prof, TRUE); + gst_caps_unref (caps); + + camera->image_profile = (GstEncodingProfile *) prof; + camera->image_profile_switch = TRUE; } camera->videobin_queue = @@ -998,9 +1108,10 @@ gst_camera_bin_create_elements (GstCameraBin * camera) g_object_set (camera->videobin_queue, "silent", TRUE, NULL); gst_bin_add_many (GST_BIN_CAST (camera), - gst_object_ref (camera->encodebin), + gst_object_ref (camera->video_encodebin), gst_object_ref (camera->videosink), - gst_object_ref (camera->imagebin), + gst_object_ref (camera->image_encodebin), + gst_object_ref (camera->imagesink), gst_object_ref (camera->videobin_queue), gst_object_ref (camera->imagebin_queue), gst_object_ref (camera->viewfinderbin_queue), NULL); @@ -1008,10 +1119,11 @@ gst_camera_bin_create_elements (GstCameraBin * camera) /* Linking can be optimized TODO */ gst_element_link_many (camera->videobin_queue, camera->videobin_capsfilter, NULL); - gst_element_link (camera->encodebin, camera->videosink); + gst_element_link (camera->video_encodebin, camera->videosink); gst_element_link_many (camera->imagebin_queue, camera->imagebin_capsfilter, - camera->imagebin, NULL); + NULL); + gst_element_link (camera->image_encodebin, camera->imagesink); gst_element_link_many (camera->viewfinderbin_queue, camera->viewfinderbin_capsfilter, camera->viewfinderbin, NULL); /* @@ -1024,21 +1136,33 @@ gst_camera_bin_create_elements (GstCameraBin * camera) * starting recording, so we should prepare the video bin. */ gst_element_set_locked_state (camera->videosink, TRUE); + gst_element_set_locked_state (camera->imagesink, TRUE); g_object_set (camera->videosink, "location", camera->video_location, NULL); - g_object_set (camera->imagebin, "location", camera->image_location, NULL); + g_object_set (camera->imagesink, "location", camera->image_location, NULL); } - if (camera->profile_switch) { + + if (camera->video_profile_switch) { GST_DEBUG_OBJECT (camera, "Switching encodebin's profile"); - g_object_set (camera->encodebin, "profile", camera->video_profile, NULL); - gst_camera_bin_link_encodebin (camera, camera->videobin_capsfilter, - VIDEO_PAD); - camera->profile_switch = FALSE; + g_object_set (camera->video_encodebin, "profile", camera->video_profile, + NULL); + gst_camera_bin_link_encodebin (camera, camera->video_encodebin, + camera->videobin_capsfilter, VIDEO_PAD); + camera->video_profile_switch = FALSE; /* used to trigger relinking further down */ profile_switched = TRUE; } + if (camera->image_profile_switch) { + GST_DEBUG_OBJECT (camera, "Switching encodebin's profile"); + g_object_set (camera->image_encodebin, "profile", camera->image_profile, + NULL); + gst_camera_bin_link_encodebin (camera, camera->image_encodebin, + camera->imagebin_capsfilter, VIDEO_PAD); + camera->image_profile_switch = FALSE; + } + /* check if we need to replace the camera src */ if (camera->src) { if (camera->user_src && camera->user_src != camera->src) { @@ -1057,8 +1181,7 @@ gst_camera_bin_create_elements (GstCameraBin * camera) if (camera->user_src) { camera->src = gst_object_ref (camera->user_src); } else { - camera->src = - gst_element_factory_make ("wrappercamerabinsrc", "camerasrc"); + camera->src = gst_element_factory_make ("omxcamerabinsrc", "camerasrc"); } new_src = TRUE; @@ -1091,8 +1214,8 @@ gst_camera_bin_create_elements (GstCameraBin * camera) } gst_camera_bin_check_and_replace_filter (camera, &camera->image_filter, - camera->user_image_filter, camera->imagebin_queue, - camera->imagebin_capsfilter); + camera->user_image_filter, camera->imagebin_capsfilter, + camera->image_encodebin); gst_camera_bin_check_and_replace_filter (camera, &camera->video_filter, camera->user_video_filter, camera->videobin_queue, camera->videobin_capsfilter); @@ -1109,7 +1232,6 @@ gst_camera_bin_create_elements (GstCameraBin * camera) gst_bin_remove (GST_BIN_CAST (camera), camera->audio_queue); gst_bin_remove (GST_BIN_CAST (camera), camera->audio_volume); gst_bin_remove (GST_BIN_CAST (camera), camera->audio_capsfilter); - gst_bin_remove (GST_BIN_CAST (camera), camera->audio_convert); gst_object_unref (camera->audio_src); camera->audio_src = NULL; } @@ -1128,20 +1250,35 @@ gst_camera_bin_create_elements (GstCameraBin * camera) } if (new_audio_src) { + GstPad *srcpad; + + if (g_object_class_find_property (G_OBJECT_GET_CLASS (camera->audio_src), + "provide-clock")) { + g_object_set (camera->audio_src, "provide-clock", FALSE, NULL); + } gst_bin_add (GST_BIN_CAST (camera), gst_object_ref (camera->audio_src)); gst_bin_add (GST_BIN_CAST (camera), gst_object_ref (camera->audio_queue)); gst_bin_add (GST_BIN_CAST (camera), gst_object_ref (camera->audio_volume)); gst_bin_add (GST_BIN_CAST (camera), gst_object_ref (camera->audio_capsfilter)); - gst_bin_add (GST_BIN_CAST (camera), gst_object_ref (camera->audio_convert)); gst_element_link_many (camera->audio_src, camera->audio_queue, - camera->audio_volume, - camera->audio_capsfilter, camera->audio_convert, NULL); + camera->audio_volume, camera->audio_capsfilter, NULL); + + srcpad = gst_element_get_static_pad (camera->audio_src, "src"); + + /* 1) drop EOS for audiosrc elements that push them on state_changes + * (basesrc does this) + * 2) Fix newsegment events to have start time = first buffer ts */ + gst_pad_add_data_probe (srcpad, + (GCallback) gst_camera_bin_audio_src_data_probe, camera); + + gst_object_unref (srcpad); } if ((profile_switched && has_audio) || new_audio_src) { - gst_camera_bin_link_encodebin (camera, camera->audio_convert, AUDIO_PAD); + gst_camera_bin_link_encodebin (camera, camera->video_encodebin, + camera->audio_capsfilter, AUDIO_PAD); } camera->elements_created = TRUE; @@ -1154,6 +1291,7 @@ gst_camera_bin_change_state (GstElement * element, GstStateChange trans) GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS; GstCameraBin *camera = GST_CAMERA_BIN_CAST (element); + switch (trans) { case GST_STATE_CHANGE_NULL_TO_READY: if (!gst_camera_bin_create_elements (camera)) { @@ -1162,6 +1300,18 @@ gst_camera_bin_change_state (GstElement * element, GstStateChange trans) break; case GST_STATE_CHANGE_READY_TO_PAUSED: GST_CAMERA_BIN_RESET_PROCESSING_COUNTER (camera); + camera->audio_drop_eos = TRUE; + camera->audio_send_newseg = FALSE; + break; + case GST_STATE_CHANGE_PAUSED_TO_READY: + if (GST_STATE (camera->videosink) >= GST_STATE_PAUSED) + gst_element_set_state (camera->videosink, GST_STATE_READY); + if (GST_STATE (camera->imagesink) >= GST_STATE_PAUSED) + gst_element_set_state (camera->imagesink, GST_STATE_READY); + break; + case GST_STATE_CHANGE_READY_TO_NULL: + gst_element_set_state (camera->videosink, GST_STATE_NULL); + gst_element_set_state (camera->imagesink, GST_STATE_NULL); break; default: break; @@ -1171,8 +1321,6 @@ gst_camera_bin_change_state (GstElement * element, GstStateChange trans) switch (trans) { case GST_STATE_CHANGE_PAUSED_TO_READY: - if (GST_STATE (camera->videosink) >= GST_STATE_PAUSED) - gst_element_set_state (camera->videosink, GST_STATE_READY); if (camera->audio_src && GST_STATE (camera->audio_src) >= GST_STATE_READY) gst_element_set_state (camera->audio_src, GST_STATE_READY); @@ -1183,10 +1331,8 @@ gst_camera_bin_change_state (GstElement * element, GstStateChange trans) gst_element_set_state (camera->audio_queue, GST_STATE_READY); gst_element_set_state (camera->audio_volume, GST_STATE_READY); gst_element_set_state (camera->audio_capsfilter, GST_STATE_READY); - gst_element_set_state (camera->audio_convert, GST_STATE_READY); break; case GST_STATE_CHANGE_READY_TO_NULL: - gst_element_set_state (camera->videosink, GST_STATE_NULL); if (camera->audio_src) gst_element_set_state (camera->audio_src, GST_STATE_NULL); @@ -1194,7 +1340,6 @@ gst_camera_bin_change_state (GstElement * element, GstStateChange trans) gst_element_set_state (camera->audio_queue, GST_STATE_NULL); gst_element_set_state (camera->audio_volume, GST_STATE_NULL); gst_element_set_state (camera->audio_capsfilter, GST_STATE_NULL); - gst_element_set_state (camera->audio_convert, GST_STATE_NULL); break; default: @@ -1204,14 +1349,46 @@ gst_camera_bin_change_state (GstElement * element, GstStateChange trans) return ret; } +static gboolean +gst_camera_bin_send_event (GstElement * element, GstEvent * event) +{ + GstCameraBin *camera = GST_CAMERA_BIN_CAST (element); + gboolean res; + + res = GST_ELEMENT_CLASS (parent_class)->send_event (element, event); + switch (GST_EVENT_TYPE (event)) { + case GST_EVENT_EOS: + { + GstState current; + + if (camera->videosink) { + gst_element_get_state (camera->videosink, ¤t, NULL, 0); + if (current <= GST_STATE_READY) + gst_element_post_message (camera->videosink, + gst_message_new_eos (GST_OBJECT (camera->videosink))); + } + if (camera->imagesink) { + gst_element_get_state (camera->imagesink, ¤t, NULL, 0); + if (current <= GST_STATE_READY) + gst_element_post_message (camera->imagesink, + gst_message_new_eos (GST_OBJECT (camera->imagesink))); + } + break; + } + + default: + break; + } + + return res; +} + static void gst_camera_bin_set_location (GstCameraBin * camera, const gchar * location) { GST_DEBUG_OBJECT (camera, "Setting mode %d location to %s", camera->mode, location); if (camera->mode == MODE_IMAGE) { - if (camera->imagebin) - g_object_set (camera->imagebin, "location", location, NULL); g_free (camera->image_location); camera->image_location = g_strdup (location); } else { @@ -1370,7 +1547,7 @@ gst_camera_bin_set_property (GObject * object, guint prop_id, gst_encoding_profile_unref (camera->video_profile); camera->video_profile = (GstEncodingProfile *) gst_value_dup_mini_object (value); - camera->profile_switch = TRUE; + camera->video_profile_switch = TRUE; break; case PROP_IMAGE_FILTER: if (camera->user_image_filter) @@ -1416,13 +1593,12 @@ gst_camera_bin_set_property (GObject * object, guint prop_id, if (camera->src) g_object_set (camera->src, "zoom", camera->zoom, NULL); break; - case PROP_IMAGE_CAPTURE_ENCODER: - g_object_set (camera->imagebin, "image-encoder", - g_value_get_object (value), NULL); - break; - case PROP_IMAGE_CAPTURE_MUXER: - g_object_set (camera->imagebin, "image-muxer", - g_value_get_object (value), NULL); + case PROP_IMAGE_ENCODING_PROFILE: + if (camera->image_profile) + gst_encoding_profile_unref (camera->image_profile); + camera->image_profile = + (GstEncodingProfile *) gst_value_dup_mini_object (value); + camera->image_profile_switch = TRUE; break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); @@ -1576,20 +1752,12 @@ gst_camera_bin_get_property (GObject * object, guint prop_id, case PROP_MAX_ZOOM: g_value_set_float (value, camera->max_zoom); break; - case PROP_IMAGE_CAPTURE_ENCODER:{ - GstElement *enc; - - g_object_get (camera->imagebin, "image-encoder", &enc, NULL); - g_value_take_object (value, enc); - break; - } - case PROP_IMAGE_CAPTURE_MUXER:{ - GstElement *mux; - - g_object_get (camera->imagebin, "image-muxer", &mux, NULL); - g_value_take_object (value, mux); + case PROP_IMAGE_ENCODING_PROFILE: + if (camera->image_profile) { + gst_value_set_mini_object (value, + (GstMiniObject *) camera->image_profile); + } break; - } case PROP_IDLE: g_value_set_boolean (value, g_atomic_int_get (&camera->processing_counter) == 0); diff --git a/gst/camerabin2/gstcamerabin2.h b/gst/camerabin2/gstcamerabin2.h index 05961e2..40b7494 100644 --- a/gst/camerabin2/gstcamerabin2.h +++ b/gst/camerabin2/gstcamerabin2.h @@ -42,8 +42,8 @@ struct _GstCameraBin GstElement *user_src; gulong src_capture_notify_id; - GstElement *encodebin; - gulong encodebin_signal_id; + GstElement *video_encodebin; + gulong video_encodebin_signal_id; GstElement *videosink; gulong videosink_probe; GstElement *videobin_queue; @@ -53,7 +53,9 @@ struct _GstCameraBin GstElement *viewfinderbin_queue; GstElement *viewfinderbin_capsfilter; - GstElement *imagebin; + GstElement *image_encodebin; + gulong image_encodebin_signal_id; + GstElement *imagesink; GstElement *imagebin_queue; GstElement *imagebin_capsfilter; @@ -76,7 +78,11 @@ struct _GstCameraBin /* Index of the auto incrementing file index for video recordings */ gint video_index; - gboolean profile_switch; + gboolean video_profile_switch; + gboolean image_profile_switch; + + gboolean audio_drop_eos; + gboolean audio_send_newseg; /* properties */ gint mode; @@ -86,10 +92,13 @@ struct _GstCameraBin GstCaps *preview_caps; GstElement *preview_filter; GstEncodingProfile *video_profile; + GstEncodingProfile *image_profile; gfloat zoom; gfloat max_zoom; gboolean elements_created; + + GTimer *shot_to_save_timer; }; struct _GstCameraBinClass diff --git a/gst/camerabin2/gstimagecapturebin.c b/gst/camerabin2/gstimagecapturebin.c deleted file mode 100644 index b4aba2a..0000000 --- a/gst/camerabin2/gstimagecapturebin.c +++ /dev/null @@ -1,352 +0,0 @@ -/* GStreamer - * Copyright (C) 2010 Thiago Santos <thiago.sousa.santos@collabora.co.uk> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ -/** - * SECTION:element-gstimagecapturebin - * - * The gstimagecapturebin element does FIXME stuff. - * - * <refsect2> - * <title>Example launch line</title> - * |[ - * gst-launch -v videotestsrc num-buffers=3 ! imagecapturebin - * ]| - * FIXME Describe what the pipeline does. - * </refsect2> - */ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include "gstimagecapturebin.h" -#include "camerabingeneral.h" - -/* prototypes */ - - -enum -{ - PROP_0, - PROP_LOCATION, - PROP_ENCODER, - PROP_MUXER -}; - -#define DEFAULT_LOCATION "img_%d" -#define DEFAULT_COLORSPACE "ffmpegcolorspace" -#define DEFAULT_ENCODER "jpegenc" -#define DEFAULT_MUXER "jifmux" -#define DEFAULT_SINK "multifilesink" - -/* pad templates */ - -static GstStaticPadTemplate sink_template = GST_STATIC_PAD_TEMPLATE ("sink", - GST_PAD_SINK, - GST_PAD_ALWAYS, - GST_STATIC_CAPS ("video/x-raw-yuv; video/x-raw-rgb") - ); - -/* class initialization */ - -GST_BOILERPLATE (GstImageCaptureBin, gst_image_capture_bin, GstBin, - GST_TYPE_BIN); - -/* GObject callbacks */ -static void gst_image_capture_bin_dispose (GObject * object); -static void gst_image_capture_bin_finalize (GObject * object); - -/* Element class functions */ -static GstStateChangeReturn -gst_image_capture_bin_change_state (GstElement * element, GstStateChange trans); - -static void -gst_image_capture_bin_set_encoder (GstImageCaptureBin * imagebin, - GstElement * encoder) -{ - GST_DEBUG_OBJECT (GST_OBJECT (imagebin), - "Setting image encoder %" GST_PTR_FORMAT, encoder); - - if (imagebin->user_encoder) - g_object_unref (imagebin->user_encoder); - - if (encoder) - g_object_ref (encoder); - - imagebin->user_encoder = encoder; -} - -static void -gst_image_capture_bin_set_muxer (GstImageCaptureBin * imagebin, - GstElement * muxer) -{ - GST_DEBUG_OBJECT (GST_OBJECT (imagebin), - "Setting image muxer %" GST_PTR_FORMAT, muxer); - - if (imagebin->user_muxer) - g_object_unref (imagebin->user_muxer); - - if (muxer) - g_object_ref (muxer); - - imagebin->user_muxer = muxer; -} - -static void -gst_image_capture_bin_set_property (GObject * object, guint prop_id, - const GValue * value, GParamSpec * pspec) -{ - GstImageCaptureBin *imagebin = GST_IMAGE_CAPTURE_BIN_CAST (object); - - switch (prop_id) { - case PROP_LOCATION: - g_free (imagebin->location); - imagebin->location = g_value_dup_string (value); - GST_DEBUG_OBJECT (imagebin, "setting location to %s", imagebin->location); - if (imagebin->sink) { - g_object_set (imagebin->sink, "location", imagebin->location, NULL); - } - break; - case PROP_ENCODER: - gst_image_capture_bin_set_encoder (imagebin, g_value_get_object (value)); - break; - case PROP_MUXER: - gst_image_capture_bin_set_muxer (imagebin, g_value_get_object (value)); - break; - default: - G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); - break; - } -} - -static void -gst_image_capture_bin_get_property (GObject * object, guint prop_id, - GValue * value, GParamSpec * pspec) -{ - GstImageCaptureBin *imagebin = GST_IMAGE_CAPTURE_BIN_CAST (object); - - switch (prop_id) { - case PROP_LOCATION: - g_value_set_string (value, imagebin->location); - break; - case PROP_ENCODER: - g_value_set_object (value, imagebin->encoder); - break; - case PROP_MUXER: - g_value_set_object (value, imagebin->muxer); - break; - default: - G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); - break; - } -} - -static void -gst_image_capture_bin_finalize (GObject * object) -{ - GstImageCaptureBin *imgbin = GST_IMAGE_CAPTURE_BIN_CAST (object); - - g_free (imgbin->location); - - G_OBJECT_CLASS (parent_class)->finalize (object); -} - -static void -gst_image_capture_bin_base_init (gpointer g_class) -{ - GstElementClass *element_class = GST_ELEMENT_CLASS (g_class); - - gst_element_class_add_pad_template (element_class, - gst_static_pad_template_get (&sink_template)); - - gst_element_class_set_details_simple (element_class, "Image Capture Bin", - "Sink/Video", "Image Capture Bin used in camerabin2", - "Thiago Santos <thiago.sousa.santos@collabora.co.uk>"); -} - -static void -gst_image_capture_bin_class_init (GstImageCaptureBinClass * klass) -{ - GObjectClass *gobject_class; - GstElementClass *element_class; - - gobject_class = G_OBJECT_CLASS (klass); - element_class = GST_ELEMENT_CLASS (klass); - - gobject_class->dispose = gst_image_capture_bin_dispose; - gobject_class->finalize = gst_image_capture_bin_finalize; - gobject_class->set_property = gst_image_capture_bin_set_property; - gobject_class->get_property = gst_image_capture_bin_get_property; - - element_class->change_state = - GST_DEBUG_FUNCPTR (gst_image_capture_bin_change_state); - - g_object_class_install_property (gobject_class, PROP_LOCATION, - g_param_spec_string ("location", "Location", - "Location to save the captured files. A %%d can be used as a " - "placeholder for a capture count", - DEFAULT_LOCATION, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - - g_object_class_install_property (gobject_class, PROP_ENCODER, - g_param_spec_object ("image-encoder", "Image encoder", - "Image encoder GStreamer element (default is jpegenc)", - GST_TYPE_ELEMENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - - g_object_class_install_property (gobject_class, PROP_MUXER, - g_param_spec_object ("image-muxer", "Image muxer", - "Image muxer GStreamer element (default is jifmux)", - GST_TYPE_ELEMENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); -} - -static void -gst_image_capture_bin_init (GstImageCaptureBin * imagebin, - GstImageCaptureBinClass * imagebin_class) -{ - GstPadTemplate *tmpl; - - tmpl = gst_static_pad_template_get (&sink_template); - imagebin->ghostpad = gst_ghost_pad_new_no_target_from_template ("sink", tmpl); - gst_object_unref (tmpl); - gst_element_add_pad (GST_ELEMENT_CAST (imagebin), imagebin->ghostpad); - - imagebin->sink = NULL; - - imagebin->location = g_strdup (DEFAULT_LOCATION); - imagebin->encoder = NULL; - imagebin->user_encoder = NULL; - imagebin->muxer = NULL; - imagebin->user_muxer = NULL; -} - -static void -gst_image_capture_bin_dispose (GObject * object) -{ - GstImageCaptureBin *imagebin = GST_IMAGE_CAPTURE_BIN_CAST (object); - - if (imagebin->user_encoder) { - gst_object_unref (imagebin->user_encoder); - imagebin->user_encoder = NULL; - } - - if (imagebin->user_muxer) { - gst_object_unref (imagebin->user_muxer); - imagebin->user_muxer = NULL; - } - G_OBJECT_CLASS (parent_class)->dispose ((GObject *) imagebin); -} - -static gboolean -gst_image_capture_bin_create_elements (GstImageCaptureBin * imagebin) -{ - GstElement *colorspace; - GstPad *pad = NULL; - - if (imagebin->elements_created) - return TRUE; - - /* create elements */ - colorspace = - gst_camerabin_create_and_add_element (GST_BIN (imagebin), - DEFAULT_COLORSPACE, "imagebin-colorspace"); - if (!colorspace) - goto error; - - if (imagebin->user_encoder) { - imagebin->encoder = imagebin->user_encoder; - if (!gst_camerabin_add_element (GST_BIN (imagebin), imagebin->encoder)) { - goto error; - } - } else { - imagebin->encoder = - gst_camerabin_create_and_add_element (GST_BIN (imagebin), - DEFAULT_ENCODER, "imagebin-encoder"); - if (!imagebin->encoder) - goto error; - } - - if (imagebin->user_muxer) { - imagebin->muxer = imagebin->user_muxer; - if (!gst_camerabin_add_element (GST_BIN (imagebin), imagebin->muxer)) { - goto error; - } - } else { - imagebin->muxer = - gst_camerabin_create_and_add_element (GST_BIN (imagebin), - DEFAULT_MUXER, "imagebin-muxer"); - if (!imagebin->muxer) - goto error; - } - - imagebin->sink = - gst_camerabin_create_and_add_element (GST_BIN (imagebin), DEFAULT_SINK, - "imagebin-sink"); - if (!imagebin->sink) - goto error; - - g_object_set (imagebin->sink, "location", imagebin->location, "async", FALSE, - "post-messages", TRUE, NULL); - - /* add ghostpad */ - pad = gst_element_get_static_pad (colorspace, "sink"); - if (!gst_ghost_pad_set_target (GST_GHOST_PAD (imagebin->ghostpad), pad)) - goto error; - gst_object_unref (pad); - - imagebin->elements_created = TRUE; - return TRUE; - -error: - if (pad) - gst_object_unref (pad); - return FALSE; -} - -static GstStateChangeReturn -gst_image_capture_bin_change_state (GstElement * element, GstStateChange trans) -{ - GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS; - GstImageCaptureBin *imagebin = GST_IMAGE_CAPTURE_BIN_CAST (element); - - switch (trans) { - case GST_STATE_CHANGE_NULL_TO_READY: - if (!gst_image_capture_bin_create_elements (imagebin)) { - return GST_STATE_CHANGE_FAILURE; - } - break; - default: - break; - } - - ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, trans); - - switch (trans) { - case GST_STATE_CHANGE_READY_TO_NULL: - break; - default: - break; - } - - return ret; -} - -gboolean -gst_image_capture_bin_plugin_init (GstPlugin * plugin) -{ - return gst_element_register (plugin, "imagecapturebin", GST_RANK_NONE, - gst_image_capture_bin_get_type ()); -} diff --git a/gst/camerabin2/gstimagecapturebin.h b/gst/camerabin2/gstimagecapturebin.h deleted file mode 100644 index 98a28e9..0000000 --- a/gst/camerabin2/gstimagecapturebin.h +++ /dev/null @@ -1,63 +0,0 @@ -/* GStreamer - * Copyright (C) 2010 Thiago Santos <thiago.sousa.santos@collabora.co.uk> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ -#ifndef _GST_IMAGE_CAPTURE_BIN_H_ -#define _GST_IMAGE_CAPTURE_BIN_H_ - -#include <gst/gst.h> - -G_BEGIN_DECLS - -#define GST_TYPE_IMAGE_CAPTURE_BIN (gst_image_capture_bin_get_type()) -#define GST_IMAGE_CAPTURE_BIN(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_IMAGE_CAPTURE_BIN,GstImageCaptureBin)) -#define GST_IMAGE_CAPTURE_BIN_CAST(obj) ((GstImageCaptureBin *) obj) -#define GST_IMAGE_CAPTURE_BIN_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_IMAGE_CAPTURE_BIN,GstImageCaptureBinClass)) -#define GST_IS_IMAGE_CAPTURE_BIN(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_IMAGE_CAPTURE_BIN)) -#define GST_IS_IMAGE_CAPTURE_BIN_CLASS(obj) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_IMAGE_CAPTURE_BIN)) - -typedef struct _GstImageCaptureBin GstImageCaptureBin; -typedef struct _GstImageCaptureBinClass GstImageCaptureBinClass; - -struct _GstImageCaptureBin -{ - GstBin bin; - - GstPad *ghostpad; - GstElement *sink; - - /* props */ - gchar *location; - GstElement *encoder; - GstElement *user_encoder; - GstElement *muxer; - GstElement *user_muxer; - - gboolean elements_created; -}; - -struct _GstImageCaptureBinClass -{ - GstBinClass bin_class; -}; - -GType gst_image_capture_bin_get_type (void); -gboolean gst_image_capture_bin_plugin_init (GstPlugin * plugin); - -G_END_DECLS - -#endif diff --git a/gst/camerabin2/gstomxcamerabinsrc.c b/gst/camerabin2/gstomxcamerabinsrc.c new file mode 100644 index 0000000..de60db9 --- /dev/null +++ b/gst/camerabin2/gstomxcamerabinsrc.c @@ -0,0 +1,647 @@ +/* + * GStreamer + * Copyright (C) 2011 Texas Instruments, Inc + * Copyright (C) 2011 Collabora Ltd + * Author: Alessandro Decina <alessandro.decina@collabora.co.uk> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + + +#ifdef HAVE_CONFIG_H +# include <config.h> +#endif + +#include <string.h> + +#include "gstomxcamerabinsrc.h" +#include "camerabingeneral.h" + +/* FIXME: get rid of this */ +enum +{ + OMX_CAMERA_MODE_PREVIEW = 0, + OMX_CAMERA_MODE_VIDEO = 1, + OMX_CAMERA_MODE_VIDEO_IMAGE = 2, + OMX_CAMERA_MODE_IMAGE = 3, + OMX_CAMERA_MODE_IMAGE_HS = 4, +}; + +enum GstVideoRecordingStatus +{ + GST_VIDEO_RECORDING_STATUS_DONE, + GST_VIDEO_RECORDING_STATUS_STARTING, + GST_VIDEO_RECORDING_STATUS_RUNNING, + GST_VIDEO_RECORDING_STATUS_FINISHING +}; + + +enum +{ + PROP_0, + PROP_SOURCE_FILTER, + PROP_ALLOCATE_BUFFERS, + PROP_CAMERA_DEVICE +}; + +typedef enum +{ + OmxCameraPrimary, + OmxCameraSecondary, + OmxCameraStereo +} GstOmxCamerabinDevice; + +GST_DEBUG_CATEGORY (omx_camera_bin_src_debug); +#define GST_CAT_DEFAULT omx_camera_bin_src_debug + +GST_BOILERPLATE (GstOmxCameraBinSrc, gst_omx_camera_bin_src, + GstBaseCameraBinSrc, GST_TYPE_BASE_CAMERA_SRC); + +GType +gst_omx_camerabin_device_get_type (void) +{ + static GType type = 0; + + if (!type) { + static GEnumValue vals[] = { + {OmxCameraPrimary, "Primary", "primary"}, + {OmxCameraSecondary, "Secondary", "secondary"}, + {OmxCameraStereo, "Stereo", "stereo"}, + {0, NULL, NULL}, + }; + + type = g_enum_register_static ("GstOmxCamerabinDevice", vals); + } + + return type; +} + +static void +gst_omx_camera_bin_src_dispose (GObject * object) +{ + GstOmxCameraBinSrc *self = GST_OMX_CAMERA_BIN_SRC (object); + + gst_caps_replace (&self->image_capture_caps, NULL); + gst_object_replace ((GstObject **) & self->user_source_filter, NULL); + + G_OBJECT_CLASS (parent_class)->dispose (object); +} + +static void +gst_omx_camera_bin_src_finalize (GstOmxCameraBinSrc * self) +{ + G_OBJECT_CLASS (parent_class)->finalize ((GObject *) (self)); +} + +static void +gst_omx_camera_bin_src_set_property (GObject * object, + guint prop_id, const GValue * value, GParamSpec * pspec) +{ + GstOmxCameraBinSrc *self = GST_OMX_CAMERA_BIN_SRC (object); + + switch (prop_id) { + case PROP_SOURCE_FILTER: + if (self->source_filter) { + gst_bin_remove (GST_BIN (object), self->source_filter); + gst_element_set_state (self->source_filter, GST_STATE_NULL); + gst_object_unref (self->source_filter); + self->source_filter = NULL; + } + + if (self->user_source_filter) + gst_object_unref (self->user_source_filter); + self->user_source_filter = g_value_dup_object (value); + + if (self->user_source_filter) + self->source_filter = self->user_source_filter; + else + self->source_filter = gst_element_factory_make ("identity", NULL); + self->source_filter = gst_object_ref (self->source_filter); + + gst_bin_add (GST_BIN (self), self->source_filter); + if (!gst_element_link_pads (self->video_source, "src", + self->source_filter, "sink")) + g_assert (FALSE); + if (!gst_element_link_pads (self->source_filter, "src", + self->tee, "sink")) + g_assert (FALSE); + break; + case PROP_ALLOCATE_BUFFERS: + { + gboolean allocate = g_value_get_boolean (value); + if (self->video_source) + g_object_set (self->video_source, "allocate-buffers", allocate, NULL); + break; + } + case PROP_CAMERA_DEVICE: + { + gint device = g_value_get_enum (value); + if (self->video_source) + g_object_set (self->video_source, "device", device, NULL); + break; + } + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (self, prop_id, pspec); + break; + } +} + +static void +gst_omx_camera_bin_src_get_property (GObject * object, + guint prop_id, GValue * value, GParamSpec * pspec) +{ + GstOmxCameraBinSrc *self = GST_OMX_CAMERA_BIN_SRC (object); + + switch (prop_id) { + case PROP_SOURCE_FILTER: + if (self->user_source_filter) + g_value_set_object (value, self->user_source_filter); + break; + case PROP_ALLOCATE_BUFFERS: + { + gboolean allocate = TRUE; + + if (self->video_source) + g_object_get (self->video_source, "allocate-buffers", &allocate, NULL); + + g_value_set_boolean (value, allocate); + break; + } + case PROP_CAMERA_DEVICE: + { + gint device = 0; + + if (self->video_source) + g_object_get (self->video_source, "device", &device, NULL); + + g_value_set_enum (value, device); + break; + } + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (self, prop_id, pspec); + break; + } +} + + +/** + * gst_omx_camera_bin_src_imgsrc_probe: + * + * Buffer probe called before sending each buffer to image queue. + */ +static gboolean +gst_omx_camera_bin_src_imgsrc_probe (GstPad * pad, GstBuffer * buffer, + gpointer data) +{ + GstOmxCameraBinSrc *self = GST_OMX_CAMERA_BIN_SRC (data); + GstBaseCameraBinSrc *camerasrc = GST_BASE_CAMERA_SRC (data); + gboolean ret = FALSE; + + GST_LOG_OBJECT (self, "Image probe, mode %d, capture count %d", + camerasrc->mode, self->image_capture_count); + + g_mutex_lock (camerasrc->capturing_mutex); + if (self->image_capture_count > 0) { + ret = TRUE; + self->image_capture_count--; + + /* post preview */ + /* TODO This can likely be optimized if the viewfinder caps is the same as + * the preview caps, avoiding another scaling of the same buffer. */ + GST_DEBUG_OBJECT (self, "Posting preview for image"); + gst_base_camera_src_post_preview (camerasrc, buffer); + + if (self->image_capture_count == 0) { + gst_base_camera_src_finish_capture (camerasrc); + } + } + g_mutex_unlock (camerasrc->capturing_mutex); + return ret; +} + +/** + * gst_omx_camera_bin_src_vidsrc_probe: + * + * Buffer probe called before sending each buffer to image queue. + */ +static gboolean +gst_omx_camera_bin_src_vidsrc_probe (GstPad * pad, GstBuffer * buffer, + gpointer data) +{ + GstOmxCameraBinSrc *self = GST_OMX_CAMERA_BIN_SRC (data); + GstBaseCameraBinSrc *camerasrc = GST_BASE_CAMERA_SRC_CAST (self); + gboolean ret = FALSE; + + GST_LOG_OBJECT (self, "Video probe, mode %d, capture status %d", + camerasrc->mode, self->video_rec_status); + + /* TODO do we want to lock for every buffer? */ + /* + * Note that we can use gst_pad_push_event here because we are a buffer + * probe. + */ + /* TODO shouldn't access this directly */ + g_mutex_lock (camerasrc->capturing_mutex); + if (self->video_rec_status == GST_VIDEO_RECORDING_STATUS_DONE) { + /* NOP */ + } else if (self->video_rec_status == GST_VIDEO_RECORDING_STATUS_STARTING) { + GstClockTime timestamp; + + GST_DEBUG_OBJECT (self, "Starting video recording"); + self->video_rec_status = GST_VIDEO_RECORDING_STATUS_RUNNING; + + timestamp = GST_BUFFER_TIMESTAMP (buffer); + if (!GST_CLOCK_TIME_IS_VALID (timestamp)) + timestamp = 0; + gst_pad_push_event (pad, gst_event_new_new_segment (FALSE, 1.0, + GST_FORMAT_TIME, timestamp, -1, 0)); + + /* post preview */ + GST_DEBUG_OBJECT (self, "Posting preview for video"); + gst_base_camera_src_post_preview (camerasrc, buffer); + + ret = TRUE; + } else if (self->video_rec_status == GST_VIDEO_RECORDING_STATUS_FINISHING) { + /* send eos */ + GST_DEBUG_OBJECT (self, "Finishing video recording, pushing eos"); + gst_pad_push_event (pad, gst_event_new_eos ()); + self->video_rec_status = GST_VIDEO_RECORDING_STATUS_DONE; + gst_base_camera_src_finish_capture (camerasrc); + } else { + ret = TRUE; + } + g_mutex_unlock (camerasrc->capturing_mutex); + return ret; +} + + +static gboolean +gst_omx_camera_bin_src_construct_pipeline (GstBaseCameraBinSrc * bcamsrc) +{ + GstOmxCameraBinSrc *self = GST_OMX_CAMERA_BIN_SRC (bcamsrc); + GstBin *cbin = GST_BIN (bcamsrc); + gboolean ret = FALSE; + GstPad *pad; + GstCaps *caps; + + if (!self->elements_created) { + GST_DEBUG_OBJECT (self, "constructing pipeline"); + + self->video_source = gst_element_factory_make ("omx_camera", NULL); + if (self->video_source == NULL) + goto done; + + if (bcamsrc->mode == MODE_IMAGE) + g_object_set (self->video_source, "mode", + OMX_CAMERA_MODE_VIDEO_IMAGE, NULL); + else + g_object_set (self->video_source, "mode", OMX_CAMERA_MODE_VIDEO, NULL); + + if (self->source_filter == NULL) + self->source_filter = gst_element_factory_make ("identity", NULL); + self->source_filter = gst_object_ref (self->source_filter); + + self->tee = gst_element_factory_make ("tee", NULL); + + self->vfsrc_filter = gst_element_factory_make ("capsfilter", + "vfsrc-capsfilter"); + caps = gst_caps_from_string ("video/x-raw-yuv"); + g_object_set (self->vfsrc_filter, "caps", caps, NULL); + gst_caps_unref (caps); + + self->vidsrc_filter = gst_element_factory_make ("capsfilter", + "vidsrc-capsfilter"); + caps = gst_caps_from_string ("video/x-raw-yuv"); + g_object_set (self->vidsrc_filter, "caps", caps, NULL); + gst_caps_unref (caps); + + self->imgsrc_stride = + gst_element_factory_make ("identity", "imgsrc-stride"); + + gst_bin_add_many (cbin, self->video_source, self->tee, + self->vfsrc_filter, self->vidsrc_filter, self->imgsrc_stride, NULL); + + gst_bin_add (cbin, self->source_filter); + if (!gst_element_link_pads (self->video_source, "src", + self->source_filter, "sink")) + goto link_error; + if (!gst_element_link_pads (self->source_filter, "src", self->tee, "sink")) + goto link_error; + + pad = gst_element_get_request_pad (self->tee, "src0"); + g_object_set (self->tee, "alloc-pad", pad, NULL); + gst_object_unref (pad); + pad = gst_element_get_request_pad (self->tee, "src1"); + gst_pad_add_buffer_probe (pad, + G_CALLBACK (gst_omx_camera_bin_src_vidsrc_probe), self); + gst_object_unref (pad); + + if (!gst_element_link_pads (self->tee, "src0", self->vfsrc_filter, "sink")) + goto link_error; + + if (!gst_element_link_pads (self->tee, "src1", self->vidsrc_filter, "sink")) + goto link_error; + + if (!gst_element_link_pads (self->video_source, "imgsrc", + self->imgsrc_stride, "sink")) + goto link_error; + + pad = gst_element_get_static_pad (self->video_source, "imgsrc"); + gst_pad_add_buffer_probe (pad, + G_CALLBACK (gst_omx_camera_bin_src_imgsrc_probe), self); + gst_object_unref (pad); + + pad = gst_element_get_static_pad (self->vfsrc_filter, "src"); + gst_ghost_pad_set_target (GST_GHOST_PAD (self->vfsrc), pad); + gst_object_unref (pad); + + pad = gst_element_get_static_pad (self->vidsrc_filter, "src"); + gst_ghost_pad_set_target (GST_GHOST_PAD (self->vidsrc), pad); + gst_object_unref (pad); + + pad = gst_element_get_static_pad (self->imgsrc_stride, "src"); + gst_ghost_pad_set_target (GST_GHOST_PAD (self->imgsrc), pad); + gst_object_unref (pad); + } + + ret = TRUE; + self->elements_created = TRUE; + +done: + return ret; + +link_error: + GST_ERROR_OBJECT (self, "failed to link elements"); + return FALSE; +} + +static gboolean +gst_omx_camera_bin_src_set_mode (GstBaseCameraBinSrc * bcamsrc, + GstCameraBinMode mode) +{ + GstOmxCameraBinSrc *self = GST_OMX_CAMERA_BIN_SRC (bcamsrc); + + GST_INFO_OBJECT (self, "mode %d", (gint) mode); + + if (self->video_source) { + if (mode == MODE_IMAGE) { + g_object_set (self->video_source, "mode", OMX_CAMERA_MODE_VIDEO_IMAGE, + NULL); + } else { + g_object_set (self->video_source, "mode", OMX_CAMERA_MODE_VIDEO, NULL); + } + } + self->mode = mode; + + return TRUE; +} + +static void +gst_omx_camera_bin_src_set_zoom (GstBaseCameraBinSrc * bcamsrc, gfloat zoom) +{ + GstOmxCameraBinSrc *self = GST_OMX_CAMERA_BIN_SRC (bcamsrc); + gint omx_zoom; + + GST_INFO_OBJECT (self, "setting zoom %f", zoom); + + omx_zoom = zoom * 80; + if (omx_zoom < 100) + omx_zoom = 100; + + g_object_set (G_OBJECT (self->video_source), "zoom", omx_zoom, NULL); +} + +static GstCaps * +gst_omx_camera_bin_src_get_allowed_input_caps (GstBaseCameraBinSrc * bcamsrc) +{ + GstOmxCameraBinSrc *self = GST_OMX_CAMERA_BIN_SRC (bcamsrc); + GstCaps *caps = NULL; + GstPad *pad = NULL, *peer_pad = NULL; + GstState state; + GstElement *videosrc; + + videosrc = self->video_source; + + if (!videosrc) { + GST_WARNING_OBJECT (self, "no videosrc, can't get allowed caps"); + goto failed; + } + + if (self->allowed_caps) { + GST_DEBUG_OBJECT (self, "returning cached caps"); + goto done; + } + + pad = gst_element_get_static_pad (videosrc, "src"); + + if (!pad) { + GST_WARNING_OBJECT (self, "no srcpad in videosrc"); + goto failed; + } + + state = GST_STATE (videosrc); + + /* Make this function work also in NULL state */ + if (state == GST_STATE_NULL) { + GST_DEBUG_OBJECT (self, "setting videosrc to ready temporarily"); + peer_pad = gst_pad_get_peer (pad); + if (peer_pad) { + gst_pad_unlink (pad, peer_pad); + } + /* Set videosrc to READY to open video device */ + gst_element_set_locked_state (videosrc, TRUE); + gst_element_set_state (videosrc, GST_STATE_READY); + } + + self->allowed_caps = gst_pad_get_caps (pad); + + /* Restore state and re-link if necessary */ + if (state == GST_STATE_NULL) { + GST_DEBUG_OBJECT (self, "restoring videosrc state %d", state); + /* Reset videosrc to NULL state, some drivers seem to need this */ + gst_element_set_state (videosrc, GST_STATE_NULL); + if (peer_pad) { + gst_pad_link (pad, peer_pad); + gst_object_unref (peer_pad); + } + gst_element_set_locked_state (videosrc, FALSE); + } + + gst_object_unref (pad); + +done: + if (self->allowed_caps) { + caps = gst_caps_copy (self->allowed_caps); + } + GST_DEBUG_OBJECT (self, "allowed caps:%" GST_PTR_FORMAT, caps); +failed: + return caps; +} + +static gboolean +gst_omx_camera_bin_src_start_capture (GstBaseCameraBinSrc * camerasrc) +{ + GstOmxCameraBinSrc *src = GST_OMX_CAMERA_BIN_SRC (camerasrc); + + if (src->mode == MODE_IMAGE) { + src->image_capture_count = 1; + } else if (src->mode == MODE_VIDEO) { + if (src->video_rec_status == GST_VIDEO_RECORDING_STATUS_DONE) { + src->video_rec_status = GST_VIDEO_RECORDING_STATUS_STARTING; + } + } else { + g_assert_not_reached (); + return FALSE; + } + return TRUE; +} + +static void +gst_omx_camera_bin_src_stop_capture (GstBaseCameraBinSrc * camerasrc) +{ + GstOmxCameraBinSrc *src = GST_OMX_CAMERA_BIN_SRC (camerasrc); + + /* TODO shoud we access this directly? Maybe a macro is better? */ + if (src->mode == MODE_VIDEO) { + if (src->video_rec_status == GST_VIDEO_RECORDING_STATUS_STARTING) { + GST_DEBUG_OBJECT (src, "Aborting, had not started recording"); + src->video_rec_status = GST_VIDEO_RECORDING_STATUS_DONE; + + } else if (src->video_rec_status == GST_VIDEO_RECORDING_STATUS_RUNNING) { + GST_DEBUG_OBJECT (src, "Marking video recording as finishing"); + src->video_rec_status = GST_VIDEO_RECORDING_STATUS_FINISHING; + } + } else { + src->image_capture_count = 0; + } +} + +static GstStateChangeReturn +gst_omx_camera_bin_src_change_state (GstElement * element, GstStateChange trans) +{ + GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS; + GstOmxCameraBinSrc *self = GST_OMX_CAMERA_BIN_SRC (element); + + ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, trans); + + if (ret == GST_STATE_CHANGE_FAILURE) + goto end; + + switch (trans) { + case GST_STATE_CHANGE_PAUSED_TO_READY: + self->drop_newseg = FALSE; + break; + case GST_STATE_CHANGE_READY_TO_NULL: + break; + case GST_STATE_CHANGE_NULL_TO_READY: + break; + default: + break; + } + +end: + return ret; +} + +static void +gst_omx_camera_bin_src_base_init (gpointer g_class) +{ + GstElementClass *gstelement_class = GST_ELEMENT_CLASS (g_class); + + GST_DEBUG_CATEGORY_INIT (omx_camera_bin_src_debug, "omxcamerabinsrc", + 0, "omx_camera camerabin2 adapter"); + + gst_element_class_set_details_simple (gstelement_class, + "omx_camera camerabin2 adapter", "Source/Video", + "omx_camera camerabin2 adapter", + "Alessandro Decina <alessandro.decina@collabora.co.uk>"); +} + +static void +gst_omx_camera_bin_src_class_init (GstOmxCameraBinSrcClass * klass) +{ + GObjectClass *gobject_class; + GstElementClass *gstelement_class; + GstBaseCameraBinSrcClass *gstbasecamerasrc_class; + + gobject_class = G_OBJECT_CLASS (klass); + gstelement_class = GST_ELEMENT_CLASS (klass); + gstbasecamerasrc_class = GST_BASE_CAMERA_SRC_CLASS (klass); + + gobject_class->dispose = gst_omx_camera_bin_src_dispose; + gobject_class->finalize = + (GObjectFinalizeFunc) gst_omx_camera_bin_src_finalize; + gobject_class->set_property = gst_omx_camera_bin_src_set_property; + gobject_class->get_property = gst_omx_camera_bin_src_get_property; + + gstelement_class->change_state = gst_omx_camera_bin_src_change_state; + + gstbasecamerasrc_class->construct_pipeline = + gst_omx_camera_bin_src_construct_pipeline; + gstbasecamerasrc_class->set_zoom = gst_omx_camera_bin_src_set_zoom; + gstbasecamerasrc_class->set_mode = gst_omx_camera_bin_src_set_mode; + gstbasecamerasrc_class->get_allowed_input_caps = + gst_omx_camera_bin_src_get_allowed_input_caps; + gstbasecamerasrc_class->start_capture = gst_omx_camera_bin_src_start_capture; + gstbasecamerasrc_class->stop_capture = gst_omx_camera_bin_src_stop_capture; + + g_object_class_install_property (gobject_class, PROP_SOURCE_FILTER, + g_param_spec_object ("source-filter", "Source filter", + "Writeme", + GST_TYPE_ELEMENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + g_object_class_install_property (gobject_class, PROP_ALLOCATE_BUFFERS, + g_param_spec_boolean ("allocate-buffers", "Allocate buffers", + "Use OMX_AllocateBuffer to allocate output buffers", FALSE, + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + g_object_class_install_property (gobject_class, PROP_CAMERA_DEVICE, + g_param_spec_enum ("camera-device", "Camera device", + "Select OMX Camera device", GST_TYPE_OMX_CAMERABIN_DEVICE, 0, + G_PARAM_READWRITE)); +} + +static void +gst_omx_camera_bin_src_init (GstOmxCameraBinSrc * self, + GstOmxCameraBinSrcClass * klass) +{ + self->vfsrc = + gst_ghost_pad_new_no_target (GST_BASE_CAMERA_SRC_VIEWFINDER_PAD_NAME, + GST_PAD_SRC); + gst_element_add_pad (GST_ELEMENT (self), self->vfsrc); + + self->imgsrc = + gst_ghost_pad_new_no_target (GST_BASE_CAMERA_SRC_IMAGE_PAD_NAME, + GST_PAD_SRC); + gst_element_add_pad (GST_ELEMENT (self), self->imgsrc); + + self->vidsrc = + gst_ghost_pad_new_no_target (GST_BASE_CAMERA_SRC_VIDEO_PAD_NAME, + GST_PAD_SRC); + gst_element_add_pad (GST_ELEMENT (self), self->vidsrc); + + self->srcpad_event_func = GST_PAD_EVENTFUNC (self->vfsrc); + + self->video_rec_status = GST_VIDEO_RECORDING_STATUS_DONE; + self->mode = GST_BASE_CAMERA_SRC_CAST (self)->mode; +} + +gboolean +gst_omx_camera_bin_src_plugin_init (GstPlugin * plugin) +{ + return gst_element_register (plugin, "omxcamerabinsrc", GST_RANK_PRIMARY + 1, + gst_omx_camera_bin_src_get_type ()); +} diff --git a/gst/camerabin2/gstomxcamerabinsrc.h b/gst/camerabin2/gstomxcamerabinsrc.h new file mode 100644 index 0000000..d3a89fa --- /dev/null +++ b/gst/camerabin2/gstomxcamerabinsrc.h @@ -0,0 +1,109 @@ +/* + * GStreamer + * Copyright (C) 2011 Texas Instruments, Inc + * Author: Alessandro Decina <alessandro.decina@collabora.co.uk> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + + +#ifndef __GST_OMX_CAMERA_BIN_SRC_H__ +#define __GST_OMX_CAMERA_BIN_SRC_H__ + +#include <gst/gst.h> +#include <gst/basecamerabinsrc/gstbasecamerasrc.h> +#include <gst/basecamerabinsrc/gstcamerabinpreview.h> +#include "camerabingeneral.h" + +G_BEGIN_DECLS +#define GST_TYPE_OMX_CAMERA_BIN_SRC \ + (gst_omx_camera_bin_src_get_type()) +#define GST_OMX_CAMERA_BIN_SRC(obj) \ + (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_OMX_CAMERA_BIN_SRC,GstOmxCameraBinSrc)) +#define GST_OMX_CAMERA_BIN_SRC_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_OMX_CAMERA_BIN_SRC,GstOmxCameraBinSrcClass)) +#define GST_IS_OMX_CAMERA_BIN_SRC(obj) \ + (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_OMX_CAMERA_BIN_SRC)) +#define GST_IS_OMX_CAMERA_BIN_SRC_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_OMX_CAMERA_BIN_SRC)) + GType gst_omx_camera_bin_src_get_type (void); + +typedef struct _GstOmxCameraBinSrc GstOmxCameraBinSrc; +typedef struct _GstOmxCameraBinSrcClass GstOmxCameraBinSrcClass; + +#define GST_TYPE_OMX_CAMERABIN_DEVICE (gst_omx_camerabin_device_get_type ()) +GType gst_omx_camerabin_device_get_type (void); + + +/** + * GstOmxCameraBinSrc: + * + */ +struct _GstOmxCameraBinSrc +{ + GstBaseCameraBinSrc parent; + + GstCameraBinMode mode; + + GstPad *vfsrc; + GstPad *imgsrc; + GstPad *vidsrc; + + /* video recording controls */ + gint video_rec_status; + + /* image capture controls */ + gint image_capture_count; + + GstElement *user_source_filter; + + /* source elements */ + GstElement *video_source; + GstElement *source_filter; + GstElement *tee; + GstElement *vfsrc_filter; + GstElement *vfsrc_stride; + GstElement *vidsrc_filter; + GstElement *vidsrc_stride; + GstElement *imgsrc_stride; + + gboolean elements_created; + + GstPadEventFunction srcpad_event_func; + + /* For changing caps without losing timestamps */ + gboolean drop_newseg; + + /* Caps that videosrc supports */ + GstCaps *allowed_caps; + + /* Caps applied to capsfilters when taking still image */ + GstCaps *image_capture_caps; +}; + + +/** + * GstOmxCameraBinSrcClass: + * + */ +struct _GstOmxCameraBinSrcClass +{ + GstBaseCameraBinSrcClass parent; +}; + +gboolean gst_omx_camera_bin_src_plugin_init (GstPlugin * plugin); + +#endif /* __GST_OMX_CAMERA_BIN_SRC_H__ */ diff --git a/gst/camerabin2/gstplugin.c b/gst/camerabin2/gstplugin.c index 56fa78f..f4187ba 100644 --- a/gst/camerabin2/gstplugin.c +++ b/gst/camerabin2/gstplugin.c @@ -24,8 +24,8 @@ #endif #include "gstviewfinderbin.h" -#include "gstimagecapturebin.h" #include "gstwrappercamerabinsrc.h" +#include "gstomxcamerabinsrc.h" #include "gstcamerabin2.h" static gboolean @@ -33,10 +33,10 @@ plugin_init (GstPlugin * plugin) { if (!gst_viewfinder_bin_plugin_init (plugin)) return FALSE; - if (!gst_image_capture_bin_plugin_init (plugin)) - return FALSE; if (!gst_wrapper_camera_bin_src_plugin_init (plugin)) return FALSE; + if (!gst_omx_camera_bin_src_plugin_init (plugin)) + return FALSE; if (!gst_camera_bin_plugin_init (plugin)) return FALSE; diff --git a/gst/camerabin2/gstviewfinderbin.c b/gst/camerabin2/gstviewfinderbin.c index e03631e..be7fb4b 100644 --- a/gst/camerabin2/gstviewfinderbin.c +++ b/gst/camerabin2/gstviewfinderbin.c @@ -53,7 +53,7 @@ enum static GstStaticPadTemplate sink_template = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, - GST_STATIC_CAPS ("video/x-raw-yuv; video/x-raw-rgb") + GST_STATIC_CAPS ("video/x-raw-yuv") ); /* class initialization */ @@ -150,11 +150,11 @@ gst_viewfinder_bin_create_elements (GstViewfinderBin * vfbin) if (!vfbin->elements_created) { /* create elements */ - csp = gst_element_factory_make ("ffmpegcolorspace", "vfbin-csp"); + csp = gst_element_factory_make ("identity", "vfbin-csp"); if (!csp) goto error; - videoscale = gst_element_factory_make ("videoscale", "vfbin-videoscale"); + videoscale = gst_element_factory_make ("identity", "vfbin-videoscale"); if (!videoscale) goto error; diff --git a/gst/camerabin2/gstwrappercamerabinsrc.c b/gst/camerabin2/gstwrappercamerabinsrc.c index 2cf6ac4..bad473d 100644 --- a/gst/camerabin2/gstwrappercamerabinsrc.c +++ b/gst/camerabin2/gstwrappercamerabinsrc.c @@ -44,7 +44,7 @@ GST_DEBUG_CATEGORY (wrapper_camera_bin_src_debug); #define GST_CAT_DEFAULT wrapper_camera_bin_src_debug GST_BOILERPLATE (GstWrapperCameraBinSrc, gst_wrapper_camera_bin_src, - GstBaseCameraSrc, GST_TYPE_BASE_CAMERA_SRC); + GstBaseCameraBinSrc, GST_TYPE_BASE_CAMERA_SRC); static void set_capsfilter_caps (GstWrapperCameraBinSrc * self, GstCaps * new_caps); @@ -178,7 +178,7 @@ gst_wrapper_camera_bin_src_imgsrc_probe (GstPad * pad, GstBuffer * buffer, gpointer data) { GstWrapperCameraBinSrc *self = GST_WRAPPER_CAMERA_BIN_SRC (data); - GstBaseCameraSrc *camerasrc = GST_BASE_CAMERA_SRC (data); + GstBaseCameraBinSrc *camerasrc = GST_BASE_CAMERA_SRC (data); gboolean ret = FALSE; GST_LOG_OBJECT (self, "Image probe, mode %d, capture count %d", @@ -213,7 +213,7 @@ gst_wrapper_camera_bin_src_vidsrc_probe (GstPad * pad, GstBuffer * buffer, gpointer data) { GstWrapperCameraBinSrc *self = GST_WRAPPER_CAMERA_BIN_SRC (data); - GstBaseCameraSrc *camerasrc = GST_BASE_CAMERA_SRC_CAST (self); + GstBaseCameraBinSrc *camerasrc = GST_BASE_CAMERA_SRC_CAST (self); gboolean ret = FALSE; GST_LOG_OBJECT (self, "Video probe, mode %d, capture status %d", @@ -300,7 +300,7 @@ static void gst_wrapper_camera_bin_src_caps_cb (GObject * gobject, GParamSpec * pspec, gpointer user_data) { - GstBaseCameraSrc *bcamsrc = GST_BASE_CAMERA_SRC (user_data); + GstBaseCameraBinSrc *bcamsrc = GST_BASE_CAMERA_SRC (user_data); GstWrapperCameraBinSrc *self = GST_WRAPPER_CAMERA_BIN_SRC (user_data); GstPad *src_caps_src_pad; GstCaps *caps = NULL; @@ -340,7 +340,7 @@ static void gst_wrapper_camera_bin_src_max_zoom_cb (GObject * self, GParamSpec * pspec, gpointer user_data) { - GstBaseCameraSrc *bcamsrc = (GstBaseCameraSrc *) user_data; + GstBaseCameraBinSrc *bcamsrc = (GstBaseCameraBinSrc *) user_data; g_object_get (self, "max-zoom", &bcamsrc->max_zoom, NULL); g_object_notify (G_OBJECT (bcamsrc), "max-zoom"); @@ -361,7 +361,7 @@ gst_wrapper_camera_bin_src_max_zoom_cb (GObject * self, GParamSpec * pspec, * Returns: TRUE, if elements were successfully created, FALSE otherwise */ static gboolean -gst_wrapper_camera_bin_src_construct_pipeline (GstBaseCameraSrc * bcamsrc) +gst_wrapper_camera_bin_src_construct_pipeline (GstBaseCameraBinSrc * bcamsrc) { GstWrapperCameraBinSrc *self = GST_WRAPPER_CAMERA_BIN_SRC (bcamsrc); GstBin *cbin = GST_BIN (bcamsrc); @@ -539,7 +539,7 @@ copy_missing_fields (GQuark field_id, const GValue * value, gpointer user_data) static void adapt_image_capture (GstWrapperCameraBinSrc * self, GstCaps * in_caps) { - GstBaseCameraSrc *bcamsrc = GST_BASE_CAMERA_SRC (self); + GstBaseCameraBinSrc *bcamsrc = GST_BASE_CAMERA_SRC (self); GstStructure *in_st, *new_st, *req_st; gint in_width = 0, in_height = 0, req_width = 0, req_height = 0, crop = 0; gdouble ratio_w, ratio_h; @@ -647,7 +647,7 @@ img_capture_prepared (gpointer data, GstCaps * caps) static gboolean start_image_capture (GstWrapperCameraBinSrc * self) { - GstBaseCameraSrc *bcamsrc = GST_BASE_CAMERA_SRC (self); + GstBaseCameraBinSrc *bcamsrc = GST_BASE_CAMERA_SRC (self); GstPhotography *photography = gst_base_camera_src_get_photography (bcamsrc); gboolean ret = FALSE; GstCaps *caps; @@ -686,7 +686,7 @@ start_image_capture (GstWrapperCameraBinSrc * self) } static gboolean -gst_wrapper_camera_bin_src_set_mode (GstBaseCameraSrc * bcamsrc, +gst_wrapper_camera_bin_src_set_mode (GstBaseCameraBinSrc * bcamsrc, GstCameraBinMode mode) { GstPhotography *photography = gst_base_camera_src_get_photography (bcamsrc); @@ -734,7 +734,7 @@ static gboolean set_element_zoom (GstWrapperCameraBinSrc * self, gfloat zoom) { gboolean ret = FALSE; - GstBaseCameraSrc *bcamsrc = GST_BASE_CAMERA_SRC (self); + GstBaseCameraBinSrc *bcamsrc = GST_BASE_CAMERA_SRC (self); gint w2_crop = 0, h2_crop = 0; GstPad *pad_zoom_sink = NULL; gint left = self->base_crop_left; @@ -779,7 +779,7 @@ set_element_zoom (GstWrapperCameraBinSrc * self, gfloat zoom) } static void -gst_wrapper_camera_bin_src_set_zoom (GstBaseCameraSrc * bcamsrc, gfloat zoom) +gst_wrapper_camera_bin_src_set_zoom (GstBaseCameraBinSrc * bcamsrc, gfloat zoom) { GstWrapperCameraBinSrc *self = GST_WRAPPER_CAMERA_BIN_SRC (bcamsrc); @@ -796,7 +796,8 @@ gst_wrapper_camera_bin_src_set_zoom (GstBaseCameraSrc * bcamsrc, gfloat zoom) } static GstCaps * -gst_wrapper_camera_bin_src_get_allowed_input_caps (GstBaseCameraSrc * bcamsrc) +gst_wrapper_camera_bin_src_get_allowed_input_caps (GstBaseCameraBinSrc * + bcamsrc) { GstWrapperCameraBinSrc *self = GST_WRAPPER_CAMERA_BIN_SRC (bcamsrc); GstCaps *caps = NULL; @@ -976,7 +977,7 @@ set_capsfilter_caps (GstWrapperCameraBinSrc * self, GstCaps * new_caps) } static gboolean -gst_wrapper_camera_bin_src_start_capture (GstBaseCameraSrc * camerasrc) +gst_wrapper_camera_bin_src_start_capture (GstBaseCameraBinSrc * camerasrc) { GstWrapperCameraBinSrc *src = GST_WRAPPER_CAMERA_BIN_SRC (camerasrc); @@ -1018,7 +1019,7 @@ gst_wrapper_camera_bin_src_start_capture (GstBaseCameraSrc * camerasrc) } static void -gst_wrapper_camera_bin_src_stop_capture (GstBaseCameraSrc * camerasrc) +gst_wrapper_camera_bin_src_stop_capture (GstBaseCameraBinSrc * camerasrc) { GstWrapperCameraBinSrc *src = GST_WRAPPER_CAMERA_BIN_SRC (camerasrc); @@ -1085,7 +1086,7 @@ gst_wrapper_camera_bin_src_class_init (GstWrapperCameraBinSrcClass * klass) { GObjectClass *gobject_class; GstElementClass *gstelement_class; - GstBaseCameraSrcClass *gstbasecamerasrc_class; + GstBaseCameraBinSrcClass *gstbasecamerasrc_class; gobject_class = G_OBJECT_CLASS (klass); gstelement_class = GST_ELEMENT_CLASS (klass); diff --git a/gst/camerabin2/gstwrappercamerabinsrc.h b/gst/camerabin2/gstwrappercamerabinsrc.h index ccc1ef6..9671b1a 100644 --- a/gst/camerabin2/gstwrappercamerabinsrc.h +++ b/gst/camerabin2/gstwrappercamerabinsrc.h @@ -58,7 +58,7 @@ enum GstVideoRecordingStatus { */ struct _GstWrapperCameraBinSrc { - GstBaseCameraSrc parent; + GstBaseCameraBinSrc parent; GstCameraBinMode mode; @@ -121,7 +121,7 @@ struct _GstWrapperCameraBinSrc */ struct _GstWrapperCameraBinSrcClass { - GstBaseCameraSrcClass parent; + GstBaseCameraBinSrcClass parent; }; gboolean gst_wrapper_camera_bin_src_plugin_init (GstPlugin * plugin); diff --git a/gst/dvdspu/gstdvdspu-render.c b/gst/dvdspu/gstdvdspu-render.c index 7731aed..8e61124 100644 --- a/gst/dvdspu/gstdvdspu-render.c +++ b/gst/dvdspu/gstdvdspu-render.c @@ -85,10 +85,13 @@ gstspu_blend_comp_buffers (SpuState * state, guint8 * planes[3]) * inverse alpha is (4 * 0xff) - in_A[x] */ guint16 inv_A = (4 * 0xff) - in_A[x]; - tmp = in_U[x] + inv_A * out_U[x]; - out_U[x] = (guint8) (tmp / (4 * 0xff)); + tmp = in_U[x] + inv_A * *out_U; + *out_U = (guint8) (tmp / (4 * 0xff)); - tmp = in_V[x] + inv_A * out_V[x]; - out_V[x] = (guint8) (tmp / (4 * 0xff)); + tmp = in_V[x] + inv_A * *out_V; + *out_V = (guint8) (tmp / (4 * 0xff)); + + out_U += state->UV_pixstride; + out_V += state->UV_pixstride; } } diff --git a/gst/dvdspu/gstdvdspu.c b/gst/dvdspu/gstdvdspu.c index 89b63d8..4b20983 100644 --- a/gst/dvdspu/gstdvdspu.c +++ b/gst/dvdspu/gstdvdspu.c @@ -55,7 +55,7 @@ static GstStaticPadTemplate video_sink_factory = GST_STATIC_PAD_TEMPLATE ("video", GST_PAD_SINK, GST_PAD_ALWAYS, - GST_STATIC_CAPS ("video/x-raw-yuv, " "format = (fourcc) { I420 }, " + GST_STATIC_CAPS ("video/x-raw-yuv, " "format = (fourcc) { I420, NV12 }, " "width = (int) [ 16, 4096 ], " "height = (int) [ 16, 4096 ]") /* FIXME: Can support YV12 one day too */ ); @@ -63,7 +63,7 @@ GST_STATIC_PAD_TEMPLATE ("video", static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, - GST_STATIC_CAPS ("video/x-raw-yuv, " "format = (fourcc) { I420 }, " + GST_STATIC_CAPS ("video/x-raw-yuv, " "format = (fourcc) { I420, NV12 }, " "width = (int) [ 16, 4096 ], " "height = (int) [ 16, 4096 ]") /* FIXME: Can support YV12 one day too */ ); @@ -320,35 +320,52 @@ gst_dvd_spu_video_set_caps (GstPad * pad, GstCaps * caps) gint w, h; gint i; gint fps_n, fps_d; + guint32 format; + gboolean interlaced = FALSE; SpuState *state; s = gst_caps_get_structure (caps, 0); if (!gst_structure_get_int (s, "width", &w) || !gst_structure_get_int (s, "height", &h) || - !gst_structure_get_fraction (s, "framerate", &fps_n, &fps_d)) { + !gst_structure_get_fraction (s, "framerate", &fps_n, &fps_d) || + !gst_structure_get_fourcc (s, "format", &format)) { goto done; } + /* interlaced field is optional: */ + gst_structure_get_boolean (s, "interlaced", &interlaced); + DVD_SPU_LOCK (dvdspu); state = &dvdspu->spu_state; state->fps_n = fps_n; state->fps_d = fps_d; + state->interlaced = interlaced; state->vid_height = h; state->Y_height = GST_ROUND_UP_2 (h); state->UV_height = state->Y_height / 2; - if (state->vid_width != w) { - state->vid_width = w; - state->Y_stride = GST_ROUND_UP_4 (w); + state->vid_width = w; + state->Y_stride = GST_ROUND_UP_4 (w); + + state->U_offset = state->Y_height * state->Y_stride; + + if (format == GST_STR_FOURCC("NV12")) { + state->UV_stride = GST_ROUND_UP_4 (state->Y_stride); + state->V_offset = 1; + state->UV_pixstride = 2; + } else { state->UV_stride = GST_ROUND_UP_4 (state->Y_stride / 2); - for (i = 0; i < 3; i++) { - state->comp_bufs[i] = g_realloc (state->comp_bufs[i], - sizeof (guint32) * state->UV_stride); - } + state->V_offset = state->UV_stride * state->UV_height; + state->UV_pixstride = 1; + } + + for (i = 0; i < 3; i++) { + state->comp_bufs[i] = g_realloc (state->comp_bufs[i], + sizeof (guint32) * state->UV_stride); } DVD_SPU_UNLOCK (dvdspu); @@ -551,7 +568,7 @@ dvdspu_handle_vid_buffer (GstDVDSpu * dvdspu, GstBuffer * buf) goto no_ref_frame; } - buf = gst_buffer_copy (dvdspu->ref_frame); + buf = gst_buffer_ref (dvdspu->ref_frame); #if 0 g_print ("Duping frame %" GST_TIME_FORMAT " with new TS %" GST_TIME_FORMAT @@ -589,7 +606,7 @@ dvdspu_handle_vid_buffer (GstDVDSpu * dvdspu, GstBuffer * buf) /* Take a copy in case we hit a still frame and need the pristine * frame around */ - copy = gst_buffer_copy (buf); + copy = gst_buffer_ref (buf); gst_buffer_replace (&dvdspu->ref_frame, copy); gst_buffer_unref (copy); } diff --git a/gst/dvdspu/gstdvdspu.h b/gst/dvdspu/gstdvdspu.h index 22b48d1..9a2f4d8 100644 --- a/gst/dvdspu/gstdvdspu.h +++ b/gst/dvdspu/gstdvdspu.h @@ -73,6 +73,9 @@ struct SpuState { gint16 vid_width, vid_height; gint16 Y_stride, UV_stride; gint16 Y_height, UV_height; + gint32 U_offset, V_offset; + gint8 UV_pixstride; + gboolean interlaced; guint32 *comp_bufs[3]; /* Compositing buffers for U+V & A */ guint16 comp_left; diff --git a/gst/dvdspu/gstspu-common.h b/gst/dvdspu/gstspu-common.h index 206e882..494dab0 100644 --- a/gst/dvdspu/gstspu-common.h +++ b/gst/dvdspu/gstspu-common.h @@ -45,6 +45,7 @@ struct SpuColour { guint16 U; guint16 V; guint8 A; + guint8 vis; /* is this color visible? */ }; void gstspu_clear_comp_buffers (SpuState * state); diff --git a/gst/dvdspu/gstspu-pgs.c b/gst/dvdspu/gstspu-pgs.c index b860b29..ce6156d 100644 --- a/gst/dvdspu/gstspu-pgs.c +++ b/gst/dvdspu/gstspu-pgs.c @@ -194,12 +194,8 @@ pgs_composition_object_render (PgsCompositionObject * obj, SpuState * state, /* Store the start of each plane */ planes[0] = GST_BUFFER_DATA (dest_buf); - planes[1] = planes[0] + (state->Y_height * state->Y_stride); - planes[2] = planes[1] + (state->UV_height * state->UV_stride); - - /* Sanity check */ - g_return_if_fail (planes[2] + (state->UV_height * state->UV_stride) <= - GST_BUFFER_DATA (dest_buf) + GST_BUFFER_SIZE (dest_buf)); + planes[1] = planes[0] + state->U_offset; + planes[2] = planes[1] + state->V_offset; y = MIN (obj->y, state->Y_height); @@ -262,7 +258,7 @@ pgs_composition_object_render (PgsCompositionObject * obj, SpuState * state, } colour = &state->pgs.palette[pal_id]; - if (colour->A) { + if (colour->A && colour->vis) { guint32 inv_A = 0xff - colour->A; if (G_UNLIKELY (x + run_len > max_x)) run_len = (max_x - x); @@ -496,6 +492,8 @@ parse_set_palette (GstDVDSpu * dvdspu, guint8 type, guint8 * payload, state->pgs.palette[n].U = U * A; state->pgs.palette[n].V = V * A; state->pgs.palette[n].A = A; + state->pgs.palette[n].vis = state->pgs.palette[n].Y || + state->pgs.palette[n].U || state->pgs.palette[n].V; payload += PGS_PALETTE_ENTRY_SIZE; } diff --git a/gst/dvdspu/gstspu-vobsub-render.c b/gst/dvdspu/gstspu-vobsub-render.c index 830017d..ad8d96b 100644 --- a/gst/dvdspu/gstspu-vobsub-render.c +++ b/gst/dvdspu/gstspu-vobsub-render.c @@ -47,6 +47,7 @@ gstspu_vobsub_recalc_palette (GstDVDSpu * dvdspu, /* U/V are stored as V/U in the clut words, so switch them */ dest->V = ((guint16) ((col >> 8) & 0xff)) * dest->A; dest->U = ((guint16) (col & 0xff)) * dest->A; + dest->vis = dest->Y || dest->V || dest->U; } } else { int y = 240; @@ -168,7 +169,7 @@ gstspu_vobsub_get_rle_code (SpuState * state, guint16 * rle_offset) return code; } -static inline void +static inline gboolean gstspu_vobsub_draw_rle_run (SpuState * state, gint16 x, gint16 end, SpuColour * colour) { @@ -177,7 +178,7 @@ gstspu_vobsub_draw_rle_run (SpuState * state, gint16 x, gint16 end, state->vobsub.cur_Y, x, end, colour->Y, colour->U, colour->V, colour->A); #endif - if (colour->A != 0) { + if ((colour->A != 0) && colour->vis) { guint32 inv_A = 0xff - colour->A; /* FIXME: This could be more efficient */ @@ -191,7 +192,10 @@ gstspu_vobsub_draw_rle_run (SpuState * state, gint16 x, gint16 end, } /* Update the compositing buffer so we know how much to blend later */ *(state->vobsub.comp_last_x_ptr) = end - 1; /* end is the start of the *next* run */ + + return TRUE; } + return FALSE; } static inline gint16 @@ -204,16 +208,17 @@ rle_end_x (guint16 rle_code, gint16 x, gint16 end) return MIN (end, x + (rle_code >> 2)); } -static void gstspu_vobsub_render_line_with_chgcol (SpuState * state, +static gboolean gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3], guint16 * rle_offset); static gboolean gstspu_vobsub_update_chgcol (SpuState * state); -static void +static gboolean gstspu_vobsub_render_line (SpuState * state, guint8 * planes[3], guint16 * rle_offset) { gint16 x, next_x, end, rle_code, next_draw_x; SpuColour *colour; + gboolean visible = FALSE; /* Check for special case of chg_col info to use (either highlight or * ChgCol command */ @@ -222,8 +227,7 @@ gstspu_vobsub_render_line (SpuState * state, guint8 * planes[3], /* Check the top & bottom, because we might not be within the region yet */ if (state->vobsub.cur_Y >= state->vobsub.cur_chg_col->top && state->vobsub.cur_Y <= state->vobsub.cur_chg_col->bottom) { - gstspu_vobsub_render_line_with_chgcol (state, planes, rle_offset); - return; + return gstspu_vobsub_render_line_with_chgcol (state, planes, rle_offset); } } } @@ -248,9 +252,11 @@ gstspu_vobsub_render_line (SpuState * state, guint8 * planes[3], if (next_draw_x > state->vobsub.clip_rect.right) next_draw_x = state->vobsub.clip_rect.right; /* ensure no overflow */ /* Now draw the run between [x,next_x) */ - gstspu_vobsub_draw_rle_run (state, x, next_draw_x, colour); + visible |= gstspu_vobsub_draw_rle_run (state, x, next_draw_x, colour); x = next_x; } + + return visible; } static gboolean @@ -280,7 +286,7 @@ gstspu_vobsub_update_chgcol (SpuState * state) return FALSE; } -static void +static gboolean gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3], guint16 * rle_offset) { @@ -292,6 +298,7 @@ gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3], SpuVobsubPixCtrlI *next_pix_ctrl; SpuVobsubPixCtrlI *end_pix_ctrl; SpuVobsubPixCtrlI dummy_pix_ctrl; + gboolean visible = FALSE; gint16 cur_reg_end; gint i; @@ -340,7 +347,7 @@ gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3], if (G_LIKELY (x < run_end)) { colour = &cur_pix_ctrl->pal_cache[rle_code & 3]; - gstspu_vobsub_draw_rle_run (state, x, run_draw_end, colour); + visible |= gstspu_vobsub_draw_rle_run (state, x, run_draw_end, colour); x = run_end; } @@ -356,6 +363,8 @@ gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3], } } } + + return visible; } static void @@ -383,26 +392,135 @@ gstspu_vobsub_clear_comp_buffers (SpuState * state) state->vobsub.comp_last_x[1] = -1; } +static inline gint +ilaced_y (SpuState * state, gint y) +{ + if (state->interlaced) { + if (y % 2) { + /* odd field */ + y = (y + state->Y_height) / 2; + } else { + /* even field */ + y = y / 2; + } + } + return y; +} + +static void +gstspu_vobsub_draw_highlight (SpuState * state, + GstBuffer * buf, SpuRect *rect) +{ + guint8 *cur; + gint16 pos; + + cur = GST_BUFFER_DATA (buf) + state->Y_stride * + ilaced_y (state, rect->top); + for (pos = rect->left + 1; pos < rect->right; pos++) + cur[pos] = (cur[pos] / 2) + 0x8; + cur = GST_BUFFER_DATA (buf) + state->Y_stride * + ilaced_y (state, rect->bottom); + for (pos = rect->left + 1; pos < rect->right; pos++) + cur[pos] = (cur[pos] / 2) + 0x8; + for (pos = rect->top; pos <= rect->bottom; pos++) { + cur = GST_BUFFER_DATA (buf) + state->Y_stride * + ilaced_y (state, pos); + cur[rect->left] = (cur[rect->left] / 2) + 0x8; + cur[rect->right] = (cur[rect->right] / 2) + 0x8; + } +} + +static void +gstspu_vobsub_render_field (SpuState * state, GstBuffer * buf, + gint y, gint last_y, gint y_inc, + guint16 * rle_offset_even, guint16 * rle_offset_odd) +{ + guint8 *planes[3]; /* YUV frame pointers */ + + /* Store the start of each plane */ + planes[0] = GST_BUFFER_DATA (buf); + planes[1] = planes[0] + state->U_offset; + planes[2] = planes[1] + state->V_offset; + + /* Update our plane references to the first line of the disp_rect */ + planes[0] += state->Y_stride * y; + planes[1] += state->UV_stride * (y / 2); + planes[2] += state->UV_stride * (y / 2); + + /* Set up HL or Change Color & Contrast rect tracking */ + if (state->vobsub.hl_rect.top != -1) { + state->vobsub.cur_chg_col = &state->vobsub.hl_ctrl_i; + state->vobsub.cur_chg_col_end = state->vobsub.cur_chg_col + 1; + } else if (state->vobsub.n_line_ctrl_i > 0) { + state->vobsub.cur_chg_col = state->vobsub.line_ctrl_i; + state->vobsub.cur_chg_col_end = + state->vobsub.cur_chg_col + state->vobsub.n_line_ctrl_i; + } else + state->vobsub.cur_chg_col = NULL; + + for (state->vobsub.cur_Y = y; state->vobsub.cur_Y <= last_y; + state->vobsub.cur_Y += y_inc) { + gboolean clip, visible = FALSE; + + clip = (state->vobsub.cur_Y < state->vobsub.clip_rect.top + || state->vobsub.cur_Y > state->vobsub.clip_rect.bottom); + + /* Reset the compositing buffer */ + gstspu_vobsub_clear_comp_buffers (state); + /* Render even line */ + state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x; + visible |= gstspu_vobsub_render_line (state, planes, rle_offset_even); + + /* Advance the luminance output pointer */ + planes[0] += state->Y_stride; + + state->vobsub.cur_Y += y_inc; + + /* Render odd line */ + state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x + 1; + visible |= gstspu_vobsub_render_line (state, planes, rle_offset_odd); + + if (visible && !clip) { + /* Blend the accumulated UV compositing buffers onto the output */ + gstspu_vobsub_blend_comp_buffers (state, planes); + } + + /* Update all the output pointers */ + planes[0] += state->Y_stride; + planes[1] += state->UV_stride; + planes[2] += state->UV_stride; + } + if (state->vobsub.cur_Y == state->vobsub.disp_rect.bottom) { + gboolean clip, visible = FALSE; + + clip = (state->vobsub.cur_Y < state->vobsub.clip_rect.top + || state->vobsub.cur_Y > state->vobsub.clip_rect.bottom); + + g_assert ((state->vobsub.disp_rect.bottom & 0x01) == 0); + + if (!clip) { + /* Render a remaining lone last even line. y already has the correct value + * after the above loop exited. */ + gstspu_vobsub_clear_comp_buffers (state); + state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x; + visible |= gstspu_vobsub_render_line (state, planes, rle_offset_even); + if (visible) + gstspu_vobsub_blend_comp_buffers (state, planes); + } + } + +} + void gstspu_vobsub_render (GstDVDSpu * dvdspu, GstBuffer * buf) { SpuState *state = &dvdspu->spu_state; - guint8 *planes[3]; /* YUV frame pointers */ gint y, last_y; /* Set up our initial state */ if (G_UNLIKELY (state->vobsub.pix_buf == NULL)) return; - /* Store the start of each plane */ - planes[0] = GST_BUFFER_DATA (buf); - planes[1] = planes[0] + (state->Y_height * state->Y_stride); - planes[2] = planes[1] + (state->UV_height * state->UV_stride); - - /* Sanity check */ - g_return_if_fail (planes[2] + (state->UV_height * state->UV_stride) <= - GST_BUFFER_DATA (buf) + GST_BUFFER_SIZE (buf)); - GST_DEBUG_OBJECT (dvdspu, "Rendering SPU. disp_rect %d,%d to %d,%d. hl_rect %d,%d to %d,%d", state->vobsub.disp_rect.left, state->vobsub.disp_rect.top, @@ -421,17 +539,6 @@ gstspu_vobsub_render (GstDVDSpu * dvdspu, GstBuffer * buf) /* Update all the palette caches */ gstspu_vobsub_update_palettes (dvdspu, state); - /* Set up HL or Change Color & Contrast rect tracking */ - if (state->vobsub.hl_rect.top != -1) { - state->vobsub.cur_chg_col = &state->vobsub.hl_ctrl_i; - state->vobsub.cur_chg_col_end = state->vobsub.cur_chg_col + 1; - } else if (state->vobsub.n_line_ctrl_i > 0) { - state->vobsub.cur_chg_col = state->vobsub.line_ctrl_i; - state->vobsub.cur_chg_col_end = - state->vobsub.cur_chg_col + state->vobsub.n_line_ctrl_i; - } else - state->vobsub.cur_chg_col = NULL; - state->vobsub.clip_rect.left = state->vobsub.disp_rect.left; state->vobsub.clip_rect.right = state->vobsub.disp_rect.right; @@ -507,106 +614,22 @@ gstspu_vobsub_render (GstDVDSpu * dvdspu, GstBuffer * buf) * single line at the end if the display rect ends on an even line too. */ last_y = (state->vobsub.disp_rect.bottom - 1) & ~(0x01); - /* Update our plane references to the first line of the disp_rect */ - planes[0] += state->Y_stride * y; - planes[1] += state->UV_stride * (y / 2); - planes[2] += state->UV_stride * (y / 2); - - for (state->vobsub.cur_Y = y; state->vobsub.cur_Y <= last_y; - state->vobsub.cur_Y++) { - gboolean clip; - - clip = (state->vobsub.cur_Y < state->vobsub.clip_rect.top - || state->vobsub.cur_Y > state->vobsub.clip_rect.bottom); - - /* Reset the compositing buffer */ - gstspu_vobsub_clear_comp_buffers (state); - /* Render even line */ - state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x; - gstspu_vobsub_render_line (state, planes, &state->vobsub.cur_offsets[0]); - if (!clip) { - /* Advance the luminance output pointer */ - planes[0] += state->Y_stride; - } - state->vobsub.cur_Y++; - - /* Render odd line */ - state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x + 1; - gstspu_vobsub_render_line (state, planes, &state->vobsub.cur_offsets[1]); - - if (!clip) { - /* Blend the accumulated UV compositing buffers onto the output */ - gstspu_vobsub_blend_comp_buffers (state, planes); - - /* Update all the output pointers */ - planes[0] += state->Y_stride; - planes[1] += state->UV_stride; - planes[2] += state->UV_stride; - } - } - if (state->vobsub.cur_Y == state->vobsub.disp_rect.bottom) { - gboolean clip; - - clip = (state->vobsub.cur_Y < state->vobsub.clip_rect.top - || state->vobsub.cur_Y > state->vobsub.clip_rect.bottom); - - g_assert ((state->vobsub.disp_rect.bottom & 0x01) == 0); - - if (!clip) { - /* Render a remaining lone last even line. y already has the correct value - * after the above loop exited. */ - gstspu_vobsub_clear_comp_buffers (state); - state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x; - gstspu_vobsub_render_line (state, planes, &state->vobsub.cur_offsets[0]); - gstspu_vobsub_blend_comp_buffers (state, planes); - } + if (state->interlaced) { + gstspu_vobsub_render_field (state, buf, y, last_y, 2, + &state->vobsub.cur_offsets[0], &state->vobsub.cur_offsets[0]); + gstspu_vobsub_render_field (state, buf, y+1, last_y, 2, + &state->vobsub.cur_offsets[1], &state->vobsub.cur_offsets[1]); + } else { + gstspu_vobsub_render_field (state, buf, y, last_y, 1, + &state->vobsub.cur_offsets[0], &state->vobsub.cur_offsets[1]); } /* for debugging purposes, draw a faint rectangle at the edges of the disp_rect */ -#if 0 - do { - guint8 *cur; - gint16 pos; - - cur = GST_BUFFER_DATA (buf) + state->Y_stride * state->vobsub.disp_rect.top; - for (pos = state->vobsub.disp_rect.left + 1; - pos < state->vobsub.disp_rect.right; pos++) - cur[pos] = (cur[pos] / 2) + 0x8; - cur = - GST_BUFFER_DATA (buf) + - state->Y_stride * state->vobsub.disp_rect.bottom; - for (pos = state->vobsub.disp_rect.left + 1; - pos < state->vobsub.disp_rect.right; pos++) - cur[pos] = (cur[pos] / 2) + 0x8; - cur = GST_BUFFER_DATA (buf) + state->Y_stride * state->vobsub.disp_rect.top; - for (pos = state->vobsub.disp_rect.top; - pos <= state->vobsub.disp_rect.bottom; pos++) { - cur[state->vobsub.disp_rect.left] = - (cur[state->vobsub.disp_rect.left] / 2) + 0x8; - cur[state->vobsub.disp_rect.right] = - (cur[state->vobsub.disp_rect.right] / 2) + 0x8; - cur += state->Y_stride; - } - } while (0); -#endif + if (FALSE) { + gstspu_vobsub_draw_highlight (state, buf, &state->vobsub.disp_rect); + } /* For debugging purposes, draw a faint rectangle around the highlight rect */ -#if 0 - if (state->hl_rect.top != -1) { - guint8 *cur; - gint16 pos; - - cur = GST_BUFFER_DATA (buf) + state->Y_stride * state->hl_rect.top; - for (pos = state->hl_rect.left + 1; pos < state->hl_rect.right; pos++) - cur[pos] = (cur[pos] / 2) + 0x8; - cur = GST_BUFFER_DATA (buf) + state->Y_stride * state->hl_rect.bottom; - for (pos = state->hl_rect.left + 1; pos < state->hl_rect.right; pos++) - cur[pos] = (cur[pos] / 2) + 0x8; - cur = GST_BUFFER_DATA (buf) + state->Y_stride * state->hl_rect.top; - for (pos = state->hl_rect.top; pos <= state->hl_rect.bottom; pos++) { - cur[state->hl_rect.left] = (cur[state->hl_rect.left] / 2) + 0x8; - cur[state->hl_rect.right] = (cur[state->hl_rect.right] / 2) + 0x8; - cur += state->Y_stride; - } + if (FALSE && state->vobsub.hl_rect.top != -1) { + gstspu_vobsub_draw_highlight (state, buf, &state->vobsub.hl_rect); } -#endif } diff --git a/gst/freeze/gstfreeze.c b/gst/freeze/gstfreeze.c index 0d63eb7..1885d8c 100644 --- a/gst/freeze/gstfreeze.c +++ b/gst/freeze/gstfreeze.c @@ -44,6 +44,7 @@ enum { ARG_0, ARG_MAX_BUFFERS, + ARG_TIMEOUT, }; static GstStaticPadTemplate gst_freeze_src_template = @@ -74,6 +75,8 @@ static gboolean gst_freeze_sink_activate_pull (GstPad * sinkpad, static gboolean gst_freeze_sink_event (GstPad * pad, GstEvent * event); static void gst_freeze_clear_buffer (GstFreeze * freeze); static void gst_freeze_buffer_free (gpointer data, gpointer user_data); +static void gst_freeze_set_timeout (gpointer data); +static gboolean gst_freeze_finish_stream (gpointer data); GST_BOILERPLATE (GstFreeze, gst_freeze, GstElement, GST_TYPE_ELEMENT); @@ -113,6 +116,18 @@ gst_freeze_class_init (GstFreezeClass * klass) "Maximum number of buffers", 0, G_MAXUINT, 1, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + g_object_class_install_property (object_class, + ARG_TIMEOUT, + g_param_spec_int ("timeout", + "timeout", + "Timeout before closing stream", 0, G_MAXINT, 1, G_PARAM_READWRITE)); + + g_object_class_install_property (object_class, + ARG_TIMEOUT, + g_param_spec_int ("timeout", + "timeout", + "Timeout before closing stream", 0, G_MAXINT, 1, G_PARAM_READWRITE)); + object_class->dispose = gst_freeze_dispose; } @@ -165,6 +180,10 @@ gst_freeze_set_property (GObject * object, guint prop_id, case ARG_MAX_BUFFERS: freeze->max_buffers = g_value_get_uint (value); break; + case ARG_TIMEOUT: + freeze->timeout = g_value_get_int (value); + g_print ("set_property, timeout=%d\n", freeze->timeout); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; @@ -181,6 +200,9 @@ gst_freeze_get_property (GObject * object, guint prop_id, case ARG_MAX_BUFFERS: g_value_set_uint (value, freeze->max_buffers); break; + case ARG_TIMEOUT: + g_value_set_int (value, freeze->timeout); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; @@ -206,6 +228,7 @@ gst_freeze_change_state (GstElement * element, GstStateChange transition) case GST_STATE_CHANGE_NULL_TO_READY: case GST_STATE_CHANGE_PAUSED_TO_PLAYING: freeze->timestamp_offset = freeze->running_time = 0; + gst_freeze_set_timeout (freeze); break; default: break; @@ -369,6 +392,29 @@ gst_freeze_sink_event (GstPad * pad, GstEvent * event) } static gboolean +gst_freeze_finish_stream (gpointer data) +{ + GstFreeze *freeze = GST_FREEZE (data); + GstState cur_state; + + gst_element_get_state (GST_ELEMENT (freeze), &cur_state, NULL, 0); + if (cur_state != GST_STATE_PLAYING) + return TRUE; + + gst_pad_push_event (freeze->srcpad, gst_event_new_eos ()); + return FALSE; +} + +static void +gst_freeze_set_timeout (gpointer data) +{ + GstFreeze *freeze = GST_FREEZE (data); + + if (freeze->timeout > 0) + g_timeout_add (freeze->timeout * 1000, gst_freeze_finish_stream, freeze); +} + +static gboolean plugin_init (GstPlugin * plugin) { GST_DEBUG_CATEGORY_INIT (freeze_debug, "freeze", 0, "Stream freezer"); diff --git a/gst/freeze/gstfreeze.h b/gst/freeze/gstfreeze.h index 47cead7..1d8715b 100644 --- a/gst/freeze/gstfreeze.h +++ b/gst/freeze/gstfreeze.h @@ -43,6 +43,7 @@ struct _GstFreeze GstBuffer *current; guint max_buffers; + gint timeout; gint64 timestamp_offset; gint64 offset; diff --git a/gst/h264parse/gsth264parse.c b/gst/h264parse/gsth264parse.c index a73522b..775ce37 100644 --- a/gst/h264parse/gsth264parse.c +++ b/gst/h264parse/gsth264parse.c @@ -1384,6 +1384,7 @@ gst_h264_parse_sink_setcaps (GstPad * pad, GstCaps * caps) { GstH264Parse *h264parse; GstStructure *str; + GstCaps *src_caps = NULL; const GValue *value; guint8 *data; guint size, num_sps, num_pps; @@ -1398,6 +1399,25 @@ gst_h264_parse_sink_setcaps (GstPad * pad, GstCaps * caps) gst_structure_get_fraction (str, "framerate", &h264parse->fps_num, &h264parse->fps_den); + /****** WORKAROUND **********/ + /* don't treat codec-data as an AVC format codec-data, but treat it like a + * normal byte-stream buffer, and handle like bytestream. + */ + /* need to remove the codec_data */ + if (G_UNLIKELY (h264parse->src_caps == NULL)) { + src_caps = gst_caps_copy (caps); + } else { + src_caps = gst_caps_ref (h264parse->src_caps); + } + src_caps = gst_caps_make_writable (src_caps); + g_return_val_if_fail (src_caps != NULL, FALSE); + str = gst_caps_get_structure (src_caps, 0); + if (gst_structure_has_field (str, "codec_data")) { + gst_structure_remove_field (str, "codec_data"); + gst_caps_replace (&h264parse->src_caps, src_caps); + } + /******** END WORKAROUND ************/ + /* packetized video has a codec_data */ if ((value = gst_structure_get_value (str, "codec_data"))) { GstBuffer *buffer; @@ -1479,6 +1499,7 @@ gst_h264_parse_sink_setcaps (GstPad * pad, GstCaps * caps) /* we have 4 sync bytes */ h264parse->nal_length_size = 4; } + gst_caps_unref (src_caps); /* forward the caps */ return gst_h264_parse_update_src_caps (h264parse, caps); diff --git a/gst/h264parse/gsth264parse.h b/gst/h264parse/gsth264parse.h index e245ea0..e2fae84 100644 --- a/gst/h264parse/gsth264parse.h +++ b/gst/h264parse/gsth264parse.h @@ -48,7 +48,7 @@ typedef struct _GstH264Sps GstH264Sps; typedef struct _GstH264Pps GstH264Pps; #define MAX_SPS_COUNT 32 -#define MAX_PPS_COUNT 32 +#define MAX_PPS_COUNT 256 #define CLOCK_BASE 9LL #define CLOCK_FREQ (CLOCK_BASE * 10000) diff --git a/gst/jpegformat/gstjpegformat.h b/gst/jpegformat/gstjpegformat.h index 118dd7a..41df961 100644 --- a/gst/jpegformat/gstjpegformat.h +++ b/gst/jpegformat/gstjpegformat.h @@ -77,6 +77,7 @@ G_BEGIN_DECLS #define APP0 0xe0 /* Application marker */ #define APP1 0xe1 #define APP2 0xe2 +#define APP12 0xec /* "Picture Info" used by old cameras */ #define APP13 0xed #define APP14 0xee #define APP15 0xef diff --git a/gst/mpeg4videoparse/Makefile.am b/gst/mpeg4videoparse/Makefile.am index e0c4303..8259ed5 100644 --- a/gst/mpeg4videoparse/Makefile.am +++ b/gst/mpeg4videoparse/Makefile.am @@ -1,13 +1,13 @@ plugin_LTLIBRARIES = libgstmpeg4videoparse.la -libgstmpeg4videoparse_la_SOURCES = mpeg4videoparse.c -libgstmpeg4videoparse_la_CFLAGS = $(GST_CFLAGS) +libgstmpeg4videoparse_la_SOURCES = mpeg4videoparse.c mpeg4parse.c +libgstmpeg4videoparse_la_CFLAGS = $(GST_BASE_CFLAGS) $(GST_CFLAGS) libgstmpeg4videoparse_la_LIBADD = $(GST_BASE_LIBS) $(GST_LIBS) libgstmpeg4videoparse_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS) libgstmpeg4videoparse_la_LIBTOOLFLAGS = --tag=disable-static -noinst_HEADERS = mpeg4videoparse.h +noinst_HEADERS = mpeg4videoparse.h mpeg4parse.h Android.mk: Makefile.am $(BUILT_SOURCES) androgenizer \ diff --git a/gst/mpeg4videoparse/mpeg4parse.c b/gst/mpeg4videoparse/mpeg4parse.c new file mode 100644 index 0000000..d386ac9 --- /dev/null +++ b/gst/mpeg4videoparse/mpeg4parse.c @@ -0,0 +1,291 @@ +/* GStreamer MPEG4-2 video Parser + * Copyright (C) <2008> Mindfruit B.V. + * @author Sjoerd Simons <sjoerd@luon.net> + * Copyright (C) <2007> Julien Moutte <julien@fluendo.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "mpeg4parse.h" + +#include <gst/base/gstbitreader.h> + +GST_DEBUG_CATEGORY_EXTERN (mpeg4v_parse_debug); +#define GST_CAT_DEFAULT mpeg4v_parse_debug + + +#define GET_BITS(b, num, bits) G_STMT_START { \ + if (!gst_bit_reader_get_bits_uint32(b, bits, num)) \ + goto failed; \ +} G_STMT_END + +#define MARKER_BIT(b) G_STMT_START { \ + guint32 i; \ + GET_BITS(b, 1, &i); \ + if (i != 0x1) \ + goto failed; \ +} G_STMT_END + +static inline gboolean +next_start_code (GstBitReader * b) +{ + guint32 bits = 0; + + GET_BITS (b, 1, &bits); + if (bits != 0) + goto failed; + + while (b->bit != 0) { + GET_BITS (b, 1, &bits); + if (bits != 0x1) + goto failed; + } + + return TRUE; + +failed: + return FALSE; +} + +static inline gboolean +skip_user_data (GstBitReader * bs, guint32 * bits) +{ + while (*bits == MPEG4_USER_DATA_STARTCODE_MARKER) { + guint32 b = 0; + + do { + GET_BITS (bs, 8, &b); + *bits = (*bits << 8) | b; + } while ((*bits >> 8) != MPEG4_START_MARKER); + } + + return TRUE; + +failed: + return FALSE; +} + + +static gint aspect_ratio_table[6][2] = { + {-1, -1}, {1, 1}, {12, 11}, {10, 11}, {16, 11}, {40, 33} +}; + +static gboolean +gst_mpeg4_params_parse_vo (MPEG4Params * params, GstBitReader * br) +{ + guint32 bits; + guint16 time_increment_resolution = 0; + guint16 fixed_time_increment = 0; + gint aspect_ratio_width = -1, aspect_ratio_height = -1; + gint height = -1, width = -1; + + /* expecting a video object startcode */ + GET_BITS (br, 32, &bits); + if (bits > 0x11F) + goto failed; + + /* expecting a video object layer startcode */ + GET_BITS (br, 32, &bits); + if (bits < 0x120 || bits > 0x12F) + goto failed; + + /* ignore random accessible vol and video object type indication */ + GET_BITS (br, 9, &bits); + + GET_BITS (br, 1, &bits); + if (bits) { + /* skip video object layer verid and priority */ + GET_BITS (br, 7, &bits); + } + + /* aspect ratio info */ + GET_BITS (br, 4, &bits); + if (bits == 0) + goto failed; + + /* check if aspect ratio info is extended par */ + if (bits == 0xf) { + GET_BITS (br, 8, &bits); + aspect_ratio_width = bits; + GET_BITS (br, 8, &bits); + aspect_ratio_height = bits; + } else if (bits < 0x6) { + aspect_ratio_width = aspect_ratio_table[bits][0]; + aspect_ratio_height = aspect_ratio_table[bits][1]; + } + + GET_BITS (br, 1, &bits); + if (bits) { + /* vol control parameters, skip chroma and low delay */ + GET_BITS (br, 3, &bits); + GET_BITS (br, 1, &bits); + if (bits) { + /* skip vbv_parameters */ + if (!gst_bit_reader_skip (br, 79)) + goto failed; + } + } + + /* layer shape */ + GET_BITS (br, 2, &bits); + /* only support rectangular */ + if (bits != 0) + goto failed; + + MARKER_BIT (br); + GET_BITS (br, 16, &bits); + time_increment_resolution = bits; + MARKER_BIT (br); + + GST_DEBUG ("time increment resolution %d", time_increment_resolution); + + GET_BITS (br, 1, &bits); + if (bits) { + /* fixed time increment */ + int n; + + /* Length of the time increment is the minimal number of bits needed to + * represent time_increment_resolution */ + for (n = 0; (time_increment_resolution >> n) != 0; n++); + GET_BITS (br, n, &bits); + + fixed_time_increment = bits; + } else { + /* When fixed_vop_rate is not set we can't guess any framerate */ + fixed_time_increment = 0; + } + GST_DEBUG ("fixed time increment %d", fixed_time_increment); + + /* assuming rectangular shape */ + MARKER_BIT (br); + GET_BITS (br, 13, &bits); + width = bits; + MARKER_BIT (br); + GET_BITS (br, 13, &bits); + height = bits; + MARKER_BIT (br); + + /* so we got it all, report back */ + params->width = width; + params->height = height; + params->time_increment_resolution = time_increment_resolution; + params->fixed_time_increment = fixed_time_increment; + params->aspect_ratio_width = aspect_ratio_width; + params->aspect_ratio_height = aspect_ratio_height; + + return TRUE; + + /* ERRORS */ +failed: + { + GST_WARNING ("Failed to parse config data"); + return FALSE; + } +} + +static gboolean +gst_mpeg4_params_parse_vos (MPEG4Params * params, GstBitReader * br) +{ + guint32 bits = 0; + + GET_BITS (br, 32, &bits); + if (bits != MPEG4_VOS_STARTCODE_MARKER) + goto failed; + + GET_BITS (br, 8, &bits); + params->profile = bits; + + /* invalid profile, warn but carry on */ + if (params->profile == 0) { + GST_WARNING ("Invalid profile in VOS"); + } + + /* Expect Visual Object startcode */ + GET_BITS (br, 32, &bits); + + /* but skip optional user data */ + if (!skip_user_data (br, &bits)) + goto failed; + + if (bits != MPEG4_VISUAL_OBJECT_STARTCODE_MARKER) + goto failed; + + GET_BITS (br, 1, &bits); + if (bits == 0x1) { + /* Skip visual_object_verid and priority */ + GET_BITS (br, 7, &bits); + } + + GET_BITS (br, 4, &bits); + /* Only support video ID */ + if (bits != 0x1) + goto failed; + + /* video signal type */ + GET_BITS (br, 1, &bits); + + if (bits == 0x1) { + /* video signal type, ignore format and range */ + GET_BITS (br, 4, &bits); + + GET_BITS (br, 1, &bits); + if (bits == 0x1) { + /* ignore color description */ + GET_BITS (br, 24, &bits); + } + } + + if (!next_start_code (br)) + goto failed; + + /* skip optional user data */ + GET_BITS (br, 32, &bits); + if (!skip_user_data (br, &bits)) + goto failed; + + /* rewind to start code */ + gst_bit_reader_set_pos (br, gst_bit_reader_get_pos (br) - 32); + + return gst_mpeg4_params_parse_vo (params, br); + + /* ERRORS */ +failed: + { + GST_WARNING ("Failed to parse config data"); + return FALSE; + } +} + +gboolean +gst_mpeg4_params_parse_config (MPEG4Params * params, const guint8 * data, + guint size) +{ + GstBitReader br; + + if (size < 4) + return FALSE; + + gst_bit_reader_init (&br, data, size); + + if (data[3] == MPEG4_VOS_STARTCODE) + return gst_mpeg4_params_parse_vos (params, &br); + else + return gst_mpeg4_params_parse_vo (params, &br); +} diff --git a/gst/mpeg4videoparse/mpeg4parse.h b/gst/mpeg4videoparse/mpeg4parse.h new file mode 100644 index 0000000..cf79e88 --- /dev/null +++ b/gst/mpeg4videoparse/mpeg4parse.h @@ -0,0 +1,63 @@ +/* GStreamer MPEG4-2 video Parser + * Copyright (C) <2008> Mindfruit B.V. + * @author Sjoerd Simons <sjoerd@luon.net> + * Copyright (C) <2007> Julien Moutte <julien@fluendo.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifndef __GST_MPEG4_PARAMS_H__ +#define __GST_MPEG4_PARAMS_H__ + +#include <gst/gst.h> + +G_BEGIN_DECLS + +#define MPEG4_VIDEO_OBJECT_STARTCODE_MIN 0x00 +#define MPEG4_VIDEO_OBJECT_STARTCODE_MAX 0x1F +#define MPEG4_VOS_STARTCODE 0xB0 +#define MPEG4_VOS_ENDCODE 0xB1 +#define MPEG4_USER_DATA_STARTCODE 0xB2 +#define MPEG4_GOP_STARTCODE 0xB3 +#define MPEG4_VISUAL_OBJECT_STARTCODE 0xB5 +#define MPEG4_VOP_STARTCODE 0xB6 + +#define MPEG4_START_MARKER 0x000001 +#define MPEG4_VISUAL_OBJECT_STARTCODE_MARKER \ + ((MPEG4_START_MARKER << 8) + MPEG4_VISUAL_OBJECT_STARTCODE) +#define MPEG4_VOS_STARTCODE_MARKER \ + ((MPEG4_START_MARKER << 8) + MPEG4_VOS_STARTCODE) +#define MPEG4_USER_DATA_STARTCODE_MARKER \ + ((MPEG4_START_MARKER << 8) + MPEG4_USER_DATA_STARTCODE) + + +typedef struct _MPEG4Params MPEG4Params; + +struct _MPEG4Params +{ + gint profile; + + gint width, height; + gint aspect_ratio_width, aspect_ratio_height; + gint time_increment_resolution; + gint fixed_time_increment; +}; + +GstFlowReturn gst_mpeg4_params_parse_config (MPEG4Params * params, + const guint8 * data, guint size); + +G_END_DECLS +#endif diff --git a/gst/mpeg4videoparse/mpeg4videoparse.c b/gst/mpeg4videoparse/mpeg4videoparse.c index 254db9f..a63b03f 100644 --- a/gst/mpeg4videoparse/mpeg4videoparse.c +++ b/gst/mpeg4videoparse/mpeg4videoparse.c @@ -2,6 +2,9 @@ * Copyright (C) <2008> Mindfruit B.V. * @author Sjoerd Simons <sjoerd@luon.net> * Copyright (C) <2007> Julien Moutte <julien@fluendo.com> + * Copyright (C) <2011> Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk> + * Copyright (C) <2011> Collabora Multimedia + * Copyright (C) <2011> Nokia Corporation * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public @@ -24,9 +27,10 @@ #endif #include <string.h> +#include <gst/base/gstbytereader.h> #include "mpeg4videoparse.h" -GST_DEBUG_CATEGORY_STATIC (mpeg4v_parse_debug); +GST_DEBUG_CATEGORY (mpeg4v_parse_debug); #define GST_CAT_DEFAULT mpeg4v_parse_debug static GstStaticPadTemplate src_template = @@ -57,1024 +61,548 @@ enum PROP_LAST }; -GST_BOILERPLATE (GstMpeg4VParse, gst_mpeg4vparse, GstElement, GST_TYPE_ELEMENT); +GST_BOILERPLATE (GstMpeg4VParse, gst_mpeg4vparse, GstBaseParse, + GST_TYPE_BASE_PARSE); -static gboolean -gst_mpeg4vparse_set_new_caps (GstMpeg4VParse * parse, - guint16 time_increment_resolution, guint16 fixed_time_increment, - gint aspect_ratio_width, gint aspect_ratio_height, gint width, gint height) -{ - gboolean res; - GstCaps *out_caps; - - if (parse->sink_caps) { - out_caps = gst_caps_copy (parse->sink_caps); - } else { - out_caps = gst_caps_new_simple ("video/mpeg", - "mpegversion", G_TYPE_INT, 4, NULL); - } - gst_caps_set_simple (out_caps, "systemstream", G_TYPE_BOOLEAN, FALSE, - "parsed", G_TYPE_BOOLEAN, TRUE, NULL); +static gboolean gst_mpeg4vparse_start (GstBaseParse * parse); +static gboolean gst_mpeg4vparse_stop (GstBaseParse * parse); +static gboolean gst_mpeg4vparse_check_valid_frame (GstBaseParse * parse, + GstBaseParseFrame * frame, guint * framesize, gint * skipsize); +static GstFlowReturn gst_mpeg4vparse_parse_frame (GstBaseParse * parse, + GstBaseParseFrame * frame); +static GstFlowReturn gst_mpeg4vparse_pre_push_frame (GstBaseParse * parse, + GstBaseParseFrame * frame); +static gboolean gst_mpeg4vparse_set_caps (GstBaseParse * parse, GstCaps * caps); - if (parse->profile != 0) { - gchar *profile = NULL; +static void gst_mpeg4vparse_set_property (GObject * object, guint prop_id, + const GValue * value, GParamSpec * pspec); +static void gst_mpeg4vparse_get_property (GObject * object, guint prop_id, + GValue * value, GParamSpec * pspec); - /* FIXME does it make sense to expose the profile in the caps ? */ - profile = g_strdup_printf ("%d", parse->profile); - gst_caps_set_simple (out_caps, "profile-level-id", - G_TYPE_STRING, profile, NULL); - g_free (profile); - } - - if (parse->config != NULL) { - gst_caps_set_simple (out_caps, "codec_data", - GST_TYPE_BUFFER, parse->config, NULL); - } - - if (fixed_time_increment != 0) { - /* we have a framerate */ - gst_caps_set_simple (out_caps, "framerate", - GST_TYPE_FRACTION, time_increment_resolution, fixed_time_increment, - NULL); - parse->frame_duration = gst_util_uint64_scale_int (GST_SECOND, - fixed_time_increment, time_increment_resolution); - } else { - /* unknown duration */ - parse->frame_duration = 0; - } - - if (aspect_ratio_width > 0 && aspect_ratio_height > 0) { - gst_caps_set_simple (out_caps, "pixel-aspect-ratio", - GST_TYPE_FRACTION, aspect_ratio_width, aspect_ratio_height, NULL); - } - - if (width > 0 && height > 0) { - gst_caps_set_simple (out_caps, - "width", G_TYPE_INT, width, "height", G_TYPE_INT, height, NULL); - } - - GST_DEBUG_OBJECT (parse, "setting downstream caps to %" GST_PTR_FORMAT, - out_caps); - res = gst_pad_set_caps (parse->srcpad, out_caps); - gst_caps_unref (out_caps); +static void +gst_mpeg4vparse_base_init (gpointer klass) +{ + GstElementClass *element_class = GST_ELEMENT_CLASS (klass); - parse->have_src_caps = TRUE; - if (parse->pending_segment != NULL) { - /* We can send pending events since we now have caps for the srcpad */ - gst_pad_push_event (parse->srcpad, parse->pending_segment); - parse->pending_segment = NULL; + gst_element_class_add_pad_template (element_class, + gst_static_pad_template_get (&src_template)); + gst_element_class_add_pad_template (element_class, + gst_static_pad_template_get (&sink_template)); - if (G_UNLIKELY (parse->pending_events != NULL)) { - GList *l; + gst_element_class_set_details_simple (element_class, + "MPEG 4 video elementary stream parser", "Codec/Parser/Video", + "Parses MPEG-4 Part 2 elementary video streams", + "Julien Moutte <julien@fluendo.com>"); +} - for (l = parse->pending_events; l != NULL; l = l->next) - gst_pad_push_event (parse->srcpad, GST_EVENT (l->data)); +static void +gst_mpeg4vparse_set_property (GObject * object, guint property_id, + const GValue * value, GParamSpec * pspec) +{ + GstMpeg4VParse *parse = GST_MPEG4VIDEOPARSE (object); - g_list_free (parse->pending_events); - parse->pending_events = NULL; - } + switch (property_id) { + case PROP_DROP: + parse->drop = g_value_get_boolean (value); + break; + case PROP_CONFIG_INTERVAL: + parse->interval = g_value_get_uint (value); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); } - return res; } -#define VIDEO_OBJECT_STARTCODE_MIN 0x00 -#define VIDEO_OBJECT_STARTCODE_MAX 0x1F -#define VOS_STARTCODE 0xB0 -#define VOS_ENDCODE 0xB1 -#define USER_DATA_STARTCODE 0xB2 -#define GOP_STARTCODE 0xB3 -#define VISUAL_OBJECT_STARTCODE 0xB5 -#define VOP_STARTCODE 0xB6 - -#define START_MARKER 0x000001 -#define VISUAL_OBJECT_STARTCODE_MARKER ((START_MARKER << 8) + VISUAL_OBJECT_STARTCODE) -#define USER_DATA_STARTCODE_MARKER ((START_MARKER << 8) + USER_DATA_STARTCODE) - -typedef struct +static void +gst_mpeg4vparse_get_property (GObject * object, guint property_id, + GValue * value, GParamSpec * pspec) { - const guint8 *data; - /* byte offset */ - gsize offset; - /* bit offset */ - gsize b_offset; + GstMpeg4VParse *parse = GST_MPEG4VIDEOPARSE (object); - /* size in bytes */ - gsize size; -} bitstream_t; + switch (property_id) { + case PROP_DROP: + g_value_set_boolean (value, parse->drop); + break; + case PROP_CONFIG_INTERVAL: + g_value_set_uint (value, parse->interval); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); + } +} -static gboolean -get_bits (bitstream_t * b, int num, guint32 * bits) +static void +gst_mpeg4vparse_class_init (GstMpeg4VParseClass * klass) { - *bits = 0; - - if (b->offset + ((b->b_offset + num) / 8) > b->size) - return FALSE; + GObjectClass *gobject_class = G_OBJECT_CLASS (klass); + GstBaseParseClass *parse_class = GST_BASE_PARSE_CLASS (klass); - if (b->b_offset + num <= 8) { - *bits = b->data[b->offset]; - *bits = (*bits >> (8 - num - b->b_offset)) & (((1 << num)) - 1); + parent_class = g_type_class_peek_parent (klass); - b->offset += (b->b_offset + num) / 8; - b->b_offset = (b->b_offset + num) % 8; - return TRUE; - } else { - /* going over the edge.. */ - int next; + gobject_class->set_property = gst_mpeg4vparse_set_property; + gobject_class->get_property = gst_mpeg4vparse_get_property; - next = (8 - b->b_offset); - do { - guint32 t; + g_object_class_install_property (gobject_class, PROP_DROP, + g_param_spec_boolean ("drop", "drop", + "Drop data untill valid configuration data is received either " + "in the stream or through caps", DEFAULT_PROP_DROP, + G_PARAM_CONSTRUCT | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - if (!get_bits (b, next, &t)) - return FALSE; - *bits <<= next; - *bits |= t; - num -= next; - next = MIN (8, num); - } while (num > 0); + g_object_class_install_property (gobject_class, PROP_CONFIG_INTERVAL, + g_param_spec_uint ("config-interval", + "Configuration Send Interval", + "Send Configuration Insertion Interval in seconds (configuration headers " + "will be multiplexed in the data stream when detected.) (0 = disabled)", + 0, 3600, DEFAULT_CONFIG_INTERVAL, + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - return TRUE; - } + /* Override BaseParse vfuncs */ + parse_class->start = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_start); + parse_class->stop = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_stop); + parse_class->check_valid_frame = + GST_DEBUG_FUNCPTR (gst_mpeg4vparse_check_valid_frame); + parse_class->parse_frame = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_parse_frame); + parse_class->pre_push_frame = + GST_DEBUG_FUNCPTR (gst_mpeg4vparse_pre_push_frame); + parse_class->set_sink_caps = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_set_caps); } -#define GET_BITS(b, num, bits) G_STMT_START { \ - if (!get_bits(b, num, bits)) \ - goto failed; \ -} G_STMT_END - -#define MARKER_BIT(b) G_STMT_START { \ - guint32 i; \ - GET_BITS(b, 1, &i); \ - if (i != 0x1) \ - goto failed; \ -} G_STMT_END - -static inline gboolean -next_start_code (bitstream_t * b) +static void +gst_mpeg4vparse_init (GstMpeg4VParse * parse, GstMpeg4VParseClass * g_class) { - guint32 bits; - - GET_BITS (b, 1, &bits); - if (bits != 0) - goto failed; - - while (b->b_offset != 0) { - GET_BITS (b, 1, &bits); - if (bits != 0x1) - goto failed; - } - - return TRUE; - -failed: - return FALSE; + parse->interval = DEFAULT_CONFIG_INTERVAL; + parse->last_report = GST_CLOCK_TIME_NONE; } -static gint aspect_ratio_table[6][2] = { {-1, -1}, {1, 1}, {12, 11}, -{10, 11}, {16, 11}, {40, 33} -}; - static void -gst_mpeg4vparse_set_config (GstMpeg4VParse * parse, const guint8 * data, - gsize size) +gst_mpeg4vparse_reset_frame (GstMpeg4VParse * mp4vparse) { - /* limit possible caps noise */ - if (parse->config && size == GST_BUFFER_SIZE (parse->config) && - memcmp (GST_BUFFER_DATA (parse->config), data, size) == 0) - return; + /* done parsing; reset state */ + mp4vparse->last_sc = -1; + mp4vparse->vop_offset = -1; + mp4vparse->vos_offset = -1; + mp4vparse->vo_offset = -1; +} - if (parse->config != NULL) - gst_buffer_unref (parse->config); +static void +gst_mpeg4vparse_reset (GstMpeg4VParse * mp4vparse) +{ + gst_mpeg4vparse_reset_frame (mp4vparse); + mp4vparse->profile = 0; + mp4vparse->update_caps = TRUE; - parse->config = gst_buffer_new_and_alloc (size); - memcpy (GST_BUFFER_DATA (parse->config), data, size); + gst_buffer_replace (&mp4vparse->config, NULL); + memset (&mp4vparse->params, 0, sizeof (mp4vparse->params)); } -/* Handle parsing a video object */ static gboolean -gst_mpeg4vparse_handle_vo (GstMpeg4VParse * parse, const guint8 * data, - gsize size, gboolean set_codec_data) +gst_mpeg4vparse_start (GstBaseParse * parse) { - guint32 bits; - bitstream_t bs = { data, 0, 0, size }; - guint16 time_increment_resolution = 0; - guint16 fixed_time_increment = 0; - gint aspect_ratio_width = -1, aspect_ratio_height = -1; - gint height = -1, width = -1; - - if (set_codec_data) - gst_mpeg4vparse_set_config (parse, data, size); - - /* expecting a video object startcode */ - GET_BITS (&bs, 32, &bits); - if (bits > 0x11F) - goto failed; - - /* expecting a video object layer startcode */ - GET_BITS (&bs, 32, &bits); - if (bits < 0x120 || bits > 0x12F) - goto failed; - - /* ignore random accessible vol and video object type indication */ - GET_BITS (&bs, 9, &bits); - - GET_BITS (&bs, 1, &bits); - if (bits) { - /* skip video object layer verid and priority */ - GET_BITS (&bs, 7, &bits); - } + GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEOPARSE (parse); - /* aspect ratio info */ - GET_BITS (&bs, 4, &bits); - if (bits == 0) - goto failed; - - /* check if aspect ratio info is extended par */ - if (bits == 0xf) { - GET_BITS (&bs, 8, &bits); - aspect_ratio_width = bits; - GET_BITS (&bs, 8, &bits); - aspect_ratio_height = bits; - } else if (bits < 0x6) { - aspect_ratio_width = aspect_ratio_table[bits][0]; - aspect_ratio_height = aspect_ratio_table[bits][1]; - } + GST_DEBUG_OBJECT (parse, "start"); - GET_BITS (&bs, 1, &bits); - if (bits) { - /* vol control parameters, skip chroma and low delay */ - GET_BITS (&bs, 3, &bits); - GET_BITS (&bs, 1, &bits); - if (bits) { - /* skip vbv_parameters */ - GET_BITS (&bs, 79, &bits); - } - } - - /* layer shape */ - GET_BITS (&bs, 2, &bits); - /* only support rectangular */ - if (bits != 0) - goto failed; - - MARKER_BIT (&bs); - GET_BITS (&bs, 16, &bits); - time_increment_resolution = bits; - MARKER_BIT (&bs); - - GST_DEBUG_OBJECT (parse, "time increment resolution %d", - time_increment_resolution); - - GET_BITS (&bs, 1, &bits); - if (bits) { - /* fixed time increment */ - int n; + gst_mpeg4vparse_reset (mp4vparse); + /* at least this much for a valid frame */ + gst_base_parse_set_min_frame_size (parse, 6); - /* Length of the time increment is the minimal number of bits needed to - * represent time_increment_resolution */ - for (n = 0; (time_increment_resolution >> n) != 0; n++); - GET_BITS (&bs, n, &bits); - - fixed_time_increment = bits; - } else { - /* When fixed_vop_rate is not set we can't guess any framerate */ - fixed_time_increment = 0; - } - GST_DEBUG_OBJECT (parse, "fixed time increment %d", fixed_time_increment); - - /* assuming rectangular shape */ - MARKER_BIT (&bs); - GET_BITS (&bs, 13, &bits); - width = bits; - MARKER_BIT (&bs); - GET_BITS (&bs, 13, &bits); - height = bits; - MARKER_BIT (&bs); - - /* ok we know there is enough data in the stream to decode it and we can start - * pushing the data */ - parse->have_config = TRUE; - -out: - return gst_mpeg4vparse_set_new_caps (parse, time_increment_resolution, - fixed_time_increment, aspect_ratio_width, aspect_ratio_height, - width, height); - - /* ERRORS */ -failed: - { - GST_WARNING_OBJECT (parse, "Failed to parse config data"); - goto out; - } + return TRUE; } -static inline gboolean -skip_user_data (bitstream_t * bs, guint32 * bits) +static gboolean +gst_mpeg4vparse_stop (GstBaseParse * parse) { - while (*bits == USER_DATA_STARTCODE_MARKER) { - guint32 b; + GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEOPARSE (parse); - do { - GET_BITS (bs, 8, &b); - *bits = (*bits << 8) | b; - } while ((*bits >> 8) != START_MARKER); - } + GST_DEBUG_OBJECT (parse, "stop"); - return TRUE; + gst_mpeg4vparse_reset (mp4vparse); -failed: - return FALSE; + return TRUE; } -/* Handle parsing a visual object sequence. - Returns whether we successfully set the caps downstream if needed */ static gboolean -gst_mpeg4vparse_handle_vos (GstMpeg4VParse * parse, const guint8 * data, +gst_mpeg4vparse_process_config (GstMpeg4VParse * mp4vparse, const guint8 * data, gsize size) { - /* Skip the startcode */ - guint32 bits; - - guint8 profile; - gboolean equal; - bitstream_t bs = { data, 0, 0, size }; - - if (size < 5) - goto failed; - - /* Parse the config from the VOS frame */ - bs.offset = 5; - - profile = data[4]; - - /* invalid profile, yikes */ - if (profile == 0) { - GST_WARNING_OBJECT (parse, "Invalid profile in VOS"); - return FALSE; - } - - equal = FALSE; - if (G_LIKELY (parse->config && size == GST_BUFFER_SIZE (parse->config) && - memcmp (GST_BUFFER_DATA (parse->config), data, size) == 0)) - equal = TRUE; - - if (G_LIKELY (parse->profile == profile && equal)) { - /* We know this profile and config data, so we can just keep the same caps - */ + /* only do stuff if something new */ + if (mp4vparse->config && size == GST_BUFFER_SIZE (mp4vparse->config) && + memcmp (GST_BUFFER_DATA (mp4vparse->config), data, size) == 0) return TRUE; - } - - /* Even if we fail to parse, then some other element might succeed, so always - * put the VOS in the config */ - parse->profile = profile; - gst_mpeg4vparse_set_config (parse, data, size); - - parse->have_config = TRUE; - - /* Expect Visual Object startcode */ - GET_BITS (&bs, 32, &bits); - - /* but skip optional user data */ - if (!skip_user_data (&bs, &bits)) - goto failed; - - if (bits != VISUAL_OBJECT_STARTCODE_MARKER) - goto failed; - - GET_BITS (&bs, 1, &bits); - if (bits == 0x1) { - /* Skip visual_object_verid and priority */ - GET_BITS (&bs, 7, &bits); - } - - GET_BITS (&bs, 4, &bits); - /* Only support video ID */ - if (bits != 0x1) - goto failed; - /* video signal type */ - GET_BITS (&bs, 1, &bits); - - if (bits == 0x1) { - /* video signal type, ignore format and range */ - GET_BITS (&bs, 4, &bits); - - GET_BITS (&bs, 1, &bits); - if (bits == 0x1) { - /* ignore color description */ - GET_BITS (&bs, 24, &bits); - } - } - - if (!next_start_code (&bs)) - goto failed; - - /* skip optional user data */ - GET_BITS (&bs, 32, &bits); - if (!skip_user_data (&bs, &bits)) - goto failed; - /* rewind to start code */ - bs.offset -= 4; - - data = &bs.data[bs.offset]; - size -= bs.offset; - - return gst_mpeg4vparse_handle_vo (parse, data, size, FALSE); - -out: - return gst_mpeg4vparse_set_new_caps (parse, 0, 0, -1, -1, -1, -1); - - /* ERRORS */ -failed: - { - GST_WARNING_OBJECT (parse, "Failed to parse config data"); - goto out; - } -} - -static void -gst_mpeg4vparse_push (GstMpeg4VParse * parse, gsize size) -{ - if (G_UNLIKELY (!parse->have_config && parse->drop)) { - GST_LOG_OBJECT (parse, "Dropping %d bytes", parse->offset); - gst_adapter_flush (parse->adapter, size); - } else { - GstBuffer *out_buf; - - out_buf = gst_adapter_take_buffer (parse->adapter, parse->offset); - - if (G_LIKELY (out_buf)) { - out_buf = gst_buffer_make_metadata_writable (out_buf); - GST_BUFFER_TIMESTAMP (out_buf) = parse->timestamp; - - /* Set GST_BUFFER_FLAG_DELTA_UNIT if it's not an intra frame */ - if (!parse->intra_frame) { - GST_BUFFER_FLAG_SET (out_buf, GST_BUFFER_FLAG_DELTA_UNIT); - } else if (parse->interval > 0 && parse->config) { - GstClockTime timestamp = GST_BUFFER_TIMESTAMP (out_buf); - guint64 diff; - - /* init */ - if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (parse->last_report))) { - parse->last_report = timestamp; - } - - /* insert on intra frames */ - if (G_LIKELY (timestamp > parse->last_report)) - diff = timestamp - parse->last_report; - else - diff = 0; - - GST_LOG_OBJECT (parse, - "now %" GST_TIME_FORMAT ", last VOP-I %" GST_TIME_FORMAT, - GST_TIME_ARGS (timestamp), GST_TIME_ARGS (parse->last_report)); - - GST_DEBUG_OBJECT (parse, - "interval since last config %" GST_TIME_FORMAT, - GST_TIME_ARGS (diff)); - - if (G_UNLIKELY (GST_TIME_AS_SECONDS (diff) >= parse->interval)) { - /* we need to send config now first */ - GstBuffer *superbuf; - - GST_LOG_OBJECT (parse, "inserting config in stream"); - - /* insert header */ - superbuf = gst_buffer_merge (parse->config, out_buf); - gst_buffer_unref (out_buf); - - out_buf = gst_buffer_make_metadata_writable (superbuf); - GST_BUFFER_TIMESTAMP (out_buf) = timestamp; - - if (G_UNLIKELY (timestamp != -1)) { - parse->last_report = timestamp; - } - } - } - gst_buffer_set_caps (out_buf, GST_PAD_CAPS (parse->srcpad)); - gst_pad_push (parse->srcpad, out_buf); - } - } - - /* Restart now that we flushed data */ - parse->offset = 0; - parse->state = PARSE_NEED_START; - parse->intra_frame = FALSE; -} - -static GstFlowReturn -gst_mpeg4vparse_drain (GstMpeg4VParse * parse, GstBuffer * last_buffer) -{ - GstFlowReturn ret = GST_FLOW_OK; - const guint8 *data = NULL; - guint available = 0; - - available = gst_adapter_available (parse->adapter); - /* We do a quick check here to avoid the _peek() below. */ - if (G_UNLIKELY (available < 5)) { - GST_DEBUG_OBJECT (parse, "we need more data, %d < 5", available); - goto beach; - } - data = gst_adapter_peek (parse->adapter, available); - - /* Need at least 5 more bytes, 4 for the startcode, 1 to optionally determine - * the VOP frame type */ - while (available >= 5 && parse->offset < available - 5) { - if (data[parse->offset] == 0 && data[parse->offset + 1] == 0 && - data[parse->offset + 2] == 1) { - - switch (parse->state) { - case PARSE_NEED_START: - { - gboolean found = FALSE; - guint8 code; - - code = data[parse->offset + 3]; - - switch (code) { - case VOP_STARTCODE: - case VOS_STARTCODE: - case GOP_STARTCODE: - found = TRUE; - break; - default: - if (code <= 0x1f) - found = TRUE; - break; - } - if (found) { - /* valid starts of a frame */ - parse->state = PARSE_START_FOUND; - if (parse->offset > 0) { - GST_LOG_OBJECT (parse, "Flushing %u bytes", parse->offset); - gst_adapter_flush (parse->adapter, parse->offset); - parse->offset = 0; - available = gst_adapter_available (parse->adapter); - data = gst_adapter_peek (parse->adapter, available); - } - } else - parse->offset += 4; - break; - } - case PARSE_START_FOUND: - { - guint8 code; - - code = data[parse->offset + 3]; - - switch (code) { - case VOP_STARTCODE: - GST_LOG_OBJECT (parse, "found VOP start marker at %u", - parse->offset); - parse->intra_frame = ((data[parse->offset + 4] >> 6 & 0x3) == 0); - /* Ensure that the timestamp of the outgoing buffer is the same - * as the one the VOP header is found in */ - parse->timestamp = GST_BUFFER_TIMESTAMP (last_buffer); - parse->state = PARSE_VOP_FOUND; - break; - case VOS_STARTCODE: - GST_LOG_OBJECT (parse, "found VOS start marker at %u", - parse->offset); - parse->vos_offset = parse->offset; - parse->state = PARSE_VOS_FOUND; - break; - default: - if (code <= 0x1f) { - GST_LOG_OBJECT (parse, "found VO start marker at %u", - parse->offset); - parse->vos_offset = parse->offset; - parse->state = PARSE_VO_FOUND; - } - break; - } - /* Jump over it */ - parse->offset += 4; - break; - } - case PARSE_VO_FOUND: - switch (data[parse->offset + 3]) { - case GOP_STARTCODE: - case VOP_STARTCODE: - /* end of VOS found, interpret the config data and restart the - * search for the VOP */ - gst_mpeg4vparse_handle_vo (parse, data + parse->vos_offset, - parse->offset - parse->vos_offset, TRUE); - parse->state = PARSE_START_FOUND; - break; - default: - parse->offset += 4; - } - break; - case PARSE_VOS_FOUND: - switch (data[parse->offset + 3]) { - case GOP_STARTCODE: - case VOP_STARTCODE: - /* end of VOS found, interpret the config data and restart the - * search for the VOP */ - gst_mpeg4vparse_handle_vos (parse, data + parse->vos_offset, - parse->offset - parse->vos_offset); - parse->state = PARSE_START_FOUND; - break; - default: - parse->offset += 4; - } - break; - case PARSE_VOP_FOUND: - { /* We were in a VOP already, any start code marks the end of it */ - GST_LOG_OBJECT (parse, "found VOP end marker at %u", parse->offset); - - gst_mpeg4vparse_push (parse, parse->offset); - - available = gst_adapter_available (parse->adapter); - data = gst_adapter_peek (parse->adapter, available); - break; - } - default: - GST_WARNING_OBJECT (parse, "unexpected parse state (%d)", - parse->state); - ret = GST_FLOW_UNEXPECTED; - goto beach; - } - } else { /* Continue searching */ - parse->offset++; - } + if (!gst_mpeg4_params_parse_config (&mp4vparse->params, data, size)) { + GST_DEBUG_OBJECT (mp4vparse, "failed to parse config data (size %" + G_GSSIZE_FORMAT ")", size); + return FALSE; } -beach: - return ret; -} + GST_LOG_OBJECT (mp4vparse, "accepting parsed config size %" G_GSSIZE_FORMAT, + size); -static GstFlowReturn -gst_mpeg4vparse_chain (GstPad * pad, GstBuffer * buffer) -{ - GstMpeg4VParse *parse = GST_MPEG4VIDEOPARSE (gst_pad_get_parent (pad)); - GstFlowReturn ret = GST_FLOW_OK; + /* parsing ok, so accept it as new config */ + if (mp4vparse->config != NULL) + gst_buffer_unref (mp4vparse->config); - GST_DEBUG_OBJECT (parse, "received buffer of %u bytes with ts %" - GST_TIME_FORMAT " and offset %" G_GINT64_FORMAT, GST_BUFFER_SIZE (buffer), - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)), - GST_BUFFER_OFFSET (buffer)); + mp4vparse->config = gst_buffer_new_and_alloc (size); + memcpy (GST_BUFFER_DATA (mp4vparse->config), data, size); - gst_adapter_push (parse->adapter, buffer); + /* trigger src caps update */ + mp4vparse->update_caps = TRUE; - /* Drain the accumulated blocks frame per frame */ - ret = gst_mpeg4vparse_drain (parse, buffer); - - gst_object_unref (parse); - - return ret; + return TRUE; } +/* caller guarantees at least start code in @buf at @off */ static gboolean -gst_mpeg4vparse_sink_setcaps (GstPad * pad, GstCaps * caps) +gst_mpeg4vparse_process_sc (GstMpeg4VParse * mp4vparse, GstBuffer * buf, + gint off) { - gboolean res = TRUE; - GstMpeg4VParse *parse = GST_MPEG4VIDEOPARSE (gst_pad_get_parent (pad)); - GstStructure *s; - const GValue *value; + guint8 *data; + guint code; - GST_DEBUG_OBJECT (parse, "setcaps called with %" GST_PTR_FORMAT, caps); - parse->sink_caps = gst_caps_ref (caps); + g_return_val_if_fail (buf && GST_BUFFER_SIZE (buf) >= off + 4, FALSE); - s = gst_caps_get_structure (caps, 0); + data = GST_BUFFER_DATA (buf); + code = data[off + 3]; - if ((value = gst_structure_get_value (s, "codec_data")) != NULL - && G_VALUE_HOLDS (value, GST_TYPE_BUFFER)) { - GstBuffer *buf = gst_value_get_buffer (value); - - /* Set the config from this codec_data immediately so that in the worst - case, we don't just discard it. - Note that in most cases, this will be freed and overwritten when we - manage to parse the codec_data. */ - if (!parse->config) { - parse->config = gst_buffer_copy (buf); - } + GST_LOG_OBJECT (mp4vparse, "process startcode %x", code); - if (GST_BUFFER_SIZE (buf) < 4) { - GST_WARNING_OBJECT (parse, "codec_data too short, ignoring"); - goto failed_parse; + /* if we found a VOP, next start code ends it, + * except for final VOS end sequence code included in last VOP-frame */ + if (mp4vparse->vop_offset >= 0 && code != MPEG4_VOS_ENDCODE) { + if (G_LIKELY (GST_BUFFER_SIZE (buf) > mp4vparse->vop_offset + 4)) { + mp4vparse->intra_frame = + ((data[mp4vparse->vop_offset + 4] >> 6 & 0x3) == 0); } else { - const guint8 *data = GST_BUFFER_DATA (buf); - - res = FALSE; - if (data[0] == 0 && data[1] == 0 && data[2] == 1) { - if (data[3] == VOS_STARTCODE) { - /* Usually the codec data will be a visual object sequence, containing - a visual object, with a video object/video object layer. */ - res = gst_mpeg4vparse_handle_vos (parse, data, GST_BUFFER_SIZE (buf)); - } else if (data[3] <= VIDEO_OBJECT_STARTCODE_MAX) { - /* VIDEO_OBJECT_STARTCODE_MIN is zero, and data is unsigned, so we - don't need to check min (and in fact that causes a compile err */ - /* Sometimes, instead, it'll just have the video object/video object - layer data. We can parse that too, though it'll give us slightly - less information. */ - res = gst_mpeg4vparse_handle_vo (parse, data, GST_BUFFER_SIZE (buf), - FALSE); - } - if (!res) - goto failed_parse; - } else { - GST_WARNING_OBJECT (parse, - "codec_data does not begin with start code, invalid"); - goto failed_parse; - } + GST_WARNING_OBJECT (mp4vparse, "no data following VOP startcode"); + mp4vparse->intra_frame = FALSE; } - } else { - /* No codec data; treat the same a failed codec data */ - goto failed_parse; + GST_LOG_OBJECT (mp4vparse, "ending frame of size %d, is intra %d", off, + mp4vparse->intra_frame); + return TRUE; } -done: - gst_object_unref (parse); - return res; - -failed_parse: - /* No codec data, or obviously-invalid, so set minimal new caps. - VOS parsing later will (hopefully) fill in the other fields */ - res = gst_mpeg4vparse_set_new_caps (parse, 0, 0, 0, 0, 0, 0); - goto done; -} + switch (code) { + case MPEG4_VOP_STARTCODE: + case MPEG4_GOP_STARTCODE: + { + gint offset; -static gboolean -gst_mpeg4vparse_sink_event (GstPad * pad, GstEvent * event) -{ - gboolean res = TRUE; - GstMpeg4VParse *parse = GST_MPEG4VIDEOPARSE (gst_pad_get_parent (pad)); - - GST_DEBUG_OBJECT (parse, "handling event type %s", - GST_EVENT_TYPE_NAME (event)); - - switch (GST_EVENT_TYPE (event)) { - case GST_EVENT_FLUSH_STOP: - parse->last_report = GST_CLOCK_TIME_NONE; - gst_adapter_clear (parse->adapter); - parse->state = PARSE_NEED_START; - parse->offset = 0; - break; - case GST_EVENT_EOS: - if (parse->pending_segment != NULL) { - /* Send pending newsegment before EOS */ - gst_pad_push_event (parse->srcpad, parse->pending_segment); - parse->pending_segment = NULL; + if (code == MPEG4_VOP_STARTCODE) { + GST_LOG_OBJECT (mp4vparse, "startcode is VOP"); + mp4vparse->vop_offset = off; + } else { + GST_LOG_OBJECT (mp4vparse, "startcode is GOP"); } - if (parse->state == PARSE_VOP_FOUND) { - /* If we've found the start of the VOP assume what's left in the - * adapter is the complete VOP. This might cause us to send an - * incomplete VOP out, but prevents the last video frame from - * potentially being dropped */ - gst_mpeg4vparse_push (parse, gst_adapter_available (parse->adapter)); + /* parse config data ending here if proper startcodes found earlier; + * preferably start at VOS (visual object sequence), + * otherwise at VO (video object) */ + offset = mp4vparse->vos_offset >= 0 ? + mp4vparse->vos_offset : mp4vparse->vo_offset; + if (offset >= 0) { + gst_mpeg4vparse_process_config (mp4vparse, GST_BUFFER_DATA (buf), off); + /* avoid accepting again for a VOP sc following a GOP sc */ + mp4vparse->vos_offset = -1; + mp4vparse->vo_offset = -1; } - /* fallthrough */ - case GST_EVENT_FLUSH_START: - res = gst_pad_event_default (pad, event); break; - case GST_EVENT_NEWSEGMENT: - gst_event_replace (&parse->pending_segment, event); - gst_event_unref (event); - res = TRUE; + } + case MPEG4_VOS_STARTCODE: + GST_LOG_OBJECT (mp4vparse, "startcode is VOS"); + mp4vparse->vos_offset = off; break; default: - if (G_UNLIKELY (!parse->have_src_caps || parse->pending_segment)) { - /* We don't yet have enough data to set caps on the srcpad, so collect - * non-critical events till we do */ - parse->pending_events = g_list_append (parse->pending_events, event); - res = TRUE; - } else - res = gst_pad_event_default (pad, event); + /* VO (video object) cases */ + if (code <= 0x1f) { + GST_LOG_OBJECT (mp4vparse, "startcode is VO"); + mp4vparse->vo_offset = off; + } break; } - gst_object_unref (parse); - - return res; + /* at least need to have a VOP in a frame */ + return FALSE; } +/* FIXME move into baseparse, or anything equivalent; + * see https://bugzilla.gnome.org/show_bug.cgi?id=650093 */ +#define GST_BASE_PARSE_FRAME_FLAG_PARSING 0x10000 + static gboolean -gst_mpeg4vparse_src_query (GstPad * pad, GstQuery * query) +gst_mpeg4vparse_check_valid_frame (GstBaseParse * parse, + GstBaseParseFrame * frame, guint * framesize, gint * skipsize) { - GstMpeg4VParse *parse = GST_MPEG4VIDEOPARSE (gst_pad_get_parent (pad)); - gboolean res; - - switch (GST_QUERY_TYPE (query)) { - case GST_QUERY_LATENCY: - { - /* We need to send the query upstream and add the returned latency to our - * own */ - GstClockTime min_latency, max_latency; - - gboolean us_live; - - GstClockTime our_latency; + GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEOPARSE (parse); + GstBuffer *buf = frame->buffer; + GstByteReader reader = GST_BYTE_READER_INIT_FROM_BUFFER (buf); + gint off = 0; + gboolean ret; + guint code; + +retry: + /* at least start code and subsequent byte */ + if (G_UNLIKELY (GST_BUFFER_SIZE (buf) - off < 5)) + return FALSE; - if ((res = gst_pad_peer_query (parse->sinkpad, query))) { - gst_query_parse_latency (query, &us_live, &min_latency, &max_latency); + /* avoid stale cached parsing state */ + if (!(frame->flags & GST_BASE_PARSE_FRAME_FLAG_PARSING)) { + GST_LOG_OBJECT (mp4vparse, "parsing new frame"); + gst_mpeg4vparse_reset_frame (mp4vparse); + frame->flags |= GST_BASE_PARSE_FRAME_FLAG_PARSING; + } else { + GST_LOG_OBJECT (mp4vparse, "resuming frame parsing"); + } - GST_DEBUG_OBJECT (parse, "Peer latency: min %" - GST_TIME_FORMAT " max %" GST_TIME_FORMAT, - GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency)); + /* if already found a previous start code, e.g. start of frame, go for next */ + if (mp4vparse->last_sc >= 0) { + off = mp4vparse->last_sc; + goto next; + } - /* our latency is 1 frame, find the frame duration */ - our_latency = parse->frame_duration; + off = gst_byte_reader_masked_scan_uint32 (&reader, 0xffffff00, 0x00000100, + off, GST_BUFFER_SIZE (buf) - off); - GST_DEBUG_OBJECT (parse, "Our latency: %" GST_TIME_FORMAT, - GST_TIME_ARGS (our_latency)); + GST_LOG_OBJECT (mp4vparse, "possible sync at buffer offset %d", off); - /* we add some latency */ - min_latency += our_latency; - if (max_latency != -1) - max_latency += our_latency; + /* didn't find anything that looks like a sync word, skip */ + if (G_UNLIKELY (off < 0)) { + *skipsize = GST_BUFFER_SIZE (buf) - 3; + return FALSE; + } - GST_DEBUG_OBJECT (parse, "Calculated total latency : min %" - GST_TIME_FORMAT " max %" GST_TIME_FORMAT, - GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency)); + /* possible frame header, but not at offset 0? skip bytes before sync */ + if (G_UNLIKELY (off > 0)) { + *skipsize = off; + return FALSE; + } - gst_query_set_latency (query, us_live, min_latency, max_latency); - } + /* ensure start code looks like a real starting start code */ + code = GST_BUFFER_DATA (buf)[3]; + switch (code) { + case MPEG4_VOP_STARTCODE: + case MPEG4_VOS_STARTCODE: + case MPEG4_GOP_STARTCODE: break; - } default: - res = gst_pad_peer_query (parse->sinkpad, query); - break; + if (code <= 0x1f) + break; + /* undesirable sc */ + GST_LOG_OBJECT (mp4vparse, "start code is no VOS, VO, VOP or GOP"); + off++; + goto retry; + } + + /* found sc */ + mp4vparse->last_sc = 0; + + /* examine start code, which should not end frame at present */ + gst_mpeg4vparse_process_sc (mp4vparse, buf, 0); + +next: + /* start is fine as of now */ + *skipsize = 0; + /* position a bit further than last sc */ + off++; + /* so now we have start code at start of data; locate next start code */ + off = gst_byte_reader_masked_scan_uint32 (&reader, 0xffffff00, 0x00000100, + off, GST_BUFFER_SIZE (buf) - off); + + GST_LOG_OBJECT (mp4vparse, "next start code at %d", off); + if (off < 0) { + /* if draining, take all */ + if (GST_BASE_PARSE_DRAINING (parse)) { + off = GST_BUFFER_SIZE (buf); + ret = TRUE; + } else { + /* resume scan where we left it */ + mp4vparse->last_sc = GST_BUFFER_SIZE (buf) - 4; + /* request best next available */ + *framesize = G_MAXUINT; + return FALSE; + } + } else { + /* decide whether this startcode ends a frame */ + ret = gst_mpeg4vparse_process_sc (mp4vparse, buf, off); } - gst_object_unref (parse); - return res; + if (ret) { + *framesize = off; + } else { + goto next; + } + + return ret; } static void -gst_mpeg4vparse_cleanup (GstMpeg4VParse * parse) +gst_mpeg4vparse_update_src_caps (GstMpeg4VParse * mp4vparse) { - if (parse->sink_caps) { - gst_caps_unref (parse->sink_caps); - parse->sink_caps = NULL; - } - if (parse->adapter) { - gst_adapter_clear (parse->adapter); - } - if (parse->config != NULL) { - gst_buffer_unref (parse->config); - parse->config = NULL; - } - - if (parse->pending_segment) - gst_event_unref (parse->pending_segment); - parse->pending_segment = NULL; + GstCaps *caps = NULL; - g_list_foreach (parse->pending_events, (GFunc) gst_event_unref, NULL); - g_list_free (parse->pending_events); - parse->pending_events = NULL; + /* only update if no src caps yet or explicitly triggered */ + if (G_LIKELY (GST_PAD_CAPS (GST_BASE_PARSE_SRC_PAD (mp4vparse)) && + !mp4vparse->update_caps)) + return; - parse->have_src_caps = FALSE; + /* carry over input caps as much as possible; override with our own stuff */ + caps = GST_PAD_CAPS (GST_BASE_PARSE_SINK_PAD (mp4vparse)); + if (caps) { + caps = gst_caps_copy (caps); + } else { + caps = gst_caps_new_simple ("video/mpeg", + "mpegversion", G_TYPE_INT, 4, NULL); + } - parse->state = PARSE_NEED_START; - parse->have_config = FALSE; - parse->offset = 0; - parse->last_report = GST_CLOCK_TIME_NONE; -} + gst_caps_set_simple (caps, "systemstream", G_TYPE_BOOLEAN, FALSE, + "parsed", G_TYPE_BOOLEAN, TRUE, NULL); -static GstStateChangeReturn -gst_mpeg4vparse_change_state (GstElement * element, GstStateChange transition) -{ - GstMpeg4VParse *parse = GST_MPEG4VIDEOPARSE (element); + if (mp4vparse->profile != 0) { + gchar *profile = NULL; - GstStateChangeReturn ret; + /* FIXME does it make sense to expose the profile in the caps ? */ + profile = g_strdup_printf ("%d", mp4vparse->profile); + gst_caps_set_simple (caps, "profile-level-id", + G_TYPE_STRING, profile, NULL); + g_free (profile); + } - ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); + if (mp4vparse->config != NULL) { + gst_caps_set_simple (caps, "codec_data", + GST_TYPE_BUFFER, mp4vparse->config, NULL); + } - switch (transition) { - case GST_STATE_CHANGE_PAUSED_TO_READY: - gst_mpeg4vparse_cleanup (parse); - break; - default: - break; + if (mp4vparse->params.width > 0 && mp4vparse->params.height > 0) { + gst_caps_set_simple (caps, "width", G_TYPE_INT, mp4vparse->params.width, + "height", G_TYPE_INT, mp4vparse->params.height, NULL); } - return ret; -} -static void -gst_mpeg4vparse_finalize (GObject * object) -{ - GstMpeg4VParse *parse = GST_MPEG4VIDEOPARSE (object); + /* perhaps we have a framerate */ + if (mp4vparse->params.fixed_time_increment != 0) { + gint fps_num = mp4vparse->params.time_increment_resolution; + gint fps_den = mp4vparse->params.fixed_time_increment; + GstClockTime latency = gst_util_uint64_scale (GST_SECOND, fps_den, fps_num); - gst_mpeg4vparse_cleanup (parse); + gst_caps_set_simple (caps, "framerate", + GST_TYPE_FRACTION, fps_num, fps_den, NULL); + gst_base_parse_set_frame_rate (GST_BASE_PARSE (mp4vparse), + fps_num, fps_den, 0, 0); + gst_base_parse_set_latency (GST_BASE_PARSE (mp4vparse), latency, latency); + } - if (parse->adapter) { - g_object_unref (parse->adapter); - parse->adapter = NULL; + /* or pixel-aspect-ratio */ + if (mp4vparse->params.aspect_ratio_width > 0 && + mp4vparse->params.aspect_ratio_height > 0) { + gst_caps_set_simple (caps, "pixel-aspect-ratio", + GST_TYPE_FRACTION, mp4vparse->params.aspect_ratio_width, + mp4vparse->params.aspect_ratio_height, NULL); } - GST_CALL_PARENT (G_OBJECT_CLASS, finalize, (object)); + gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (mp4vparse), caps); + gst_caps_unref (caps); } -static void -gst_mpeg4vparse_base_init (gpointer klass) +static GstFlowReturn +gst_mpeg4vparse_parse_frame (GstBaseParse * parse, GstBaseParseFrame * frame) { - GstElementClass *element_class = GST_ELEMENT_CLASS (klass); + GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEOPARSE (parse); + GstBuffer *buffer = frame->buffer; - gst_element_class_add_pad_template (element_class, - gst_static_pad_template_get (&src_template)); - gst_element_class_add_pad_template (element_class, - gst_static_pad_template_get (&sink_template)); + gst_mpeg4vparse_update_src_caps (mp4vparse); - gst_element_class_set_details_simple (element_class, - "MPEG 4 video elementary stream parser", "Codec/Parser/Video", - "Parses MPEG-4 Part 2 elementary video streams", - "Julien Moutte <julien@fluendo.com>"); + if (mp4vparse->intra_frame) + GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); + else + GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); + + if (G_UNLIKELY (mp4vparse->drop && !mp4vparse->config)) { + GST_DEBUG_OBJECT (mp4vparse, "dropping frame as no config yet"); + return GST_BASE_PARSE_FLOW_DROPPED; + } else + return GST_FLOW_OK; } -static void -gst_mpeg4vparse_set_property (GObject * object, guint property_id, - const GValue * value, GParamSpec * pspec) +static GstFlowReturn +gst_mpeg4vparse_pre_push_frame (GstBaseParse * parse, GstBaseParseFrame * frame) { - GstMpeg4VParse *parse = GST_MPEG4VIDEOPARSE (object); + GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEOPARSE (parse); + GstBuffer *buffer = frame->buffer; - switch (property_id) { - case PROP_DROP: - parse->drop = g_value_get_boolean (value); - break; - case PROP_CONFIG_INTERVAL: - parse->interval = g_value_get_uint (value); - break; - default: - G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); - } -} + /* periodic SPS/PPS sending */ + if (mp4vparse->interval > 0) { + GstClockTime timestamp = GST_BUFFER_TIMESTAMP (buffer); + guint64 diff; -static void -gst_mpeg4vparse_get_property (GObject * object, guint property_id, - GValue * value, GParamSpec * pspec) -{ - GstMpeg4VParse *parse = GST_MPEG4VIDEOPARSE (object); + /* init */ + if (!GST_CLOCK_TIME_IS_VALID (mp4vparse->last_report)) { + mp4vparse->last_report = timestamp; + } - switch (property_id) { - case PROP_DROP: - g_value_set_boolean (value, parse->drop); - break; - case PROP_CONFIG_INTERVAL: - g_value_set_uint (value, parse->interval); - break; - default: - G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); - } -} + if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) { + if (timestamp > mp4vparse->last_report) + diff = timestamp - mp4vparse->last_report; + else + diff = 0; -static void -gst_mpeg4vparse_class_init (GstMpeg4VParseClass * klass) -{ - GObjectClass *gobject_class; - GstElementClass *gstelement_class; - gstelement_class = (GstElementClass *) klass; - gobject_class = G_OBJECT_CLASS (klass); + GST_LOG_OBJECT (mp4vparse, + "now %" GST_TIME_FORMAT ", last config %" GST_TIME_FORMAT, + GST_TIME_ARGS (timestamp), GST_TIME_ARGS (mp4vparse->last_report)); - parent_class = g_type_class_peek_parent (klass); + GST_LOG_OBJECT (mp4vparse, + "interval since last config %" GST_TIME_FORMAT, GST_TIME_ARGS (diff)); - gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_finalize); + if (GST_TIME_AS_SECONDS (diff) >= mp4vparse->interval) { + /* we need to send config now first */ + GST_LOG_OBJECT (parse, "inserting config in stream"); - gobject_class->set_property = gst_mpeg4vparse_set_property; - gobject_class->get_property = gst_mpeg4vparse_get_property; + /* avoid inserting duplicate config */ + if ((GST_BUFFER_SIZE (buffer) < GST_BUFFER_SIZE (mp4vparse->config)) || + memcmp (GST_BUFFER_DATA (buffer), + GST_BUFFER_DATA (mp4vparse->config), + GST_BUFFER_SIZE (mp4vparse->config))) { + GstBuffer *superbuf; - g_object_class_install_property (gobject_class, PROP_DROP, - g_param_spec_boolean ("drop", "drop", - "Drop data untill valid configuration data is received either " - "in the stream or through caps", DEFAULT_PROP_DROP, - G_PARAM_CONSTRUCT | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + /* insert header */ + superbuf = gst_buffer_merge (mp4vparse->config, buffer); + gst_buffer_copy_metadata (superbuf, buffer, GST_BUFFER_COPY_ALL); + gst_buffer_replace (&frame->buffer, superbuf); + gst_buffer_unref (superbuf); + } else { + GST_LOG_OBJECT (parse, "... but avoiding duplication"); + } - g_object_class_install_property (gobject_class, PROP_CONFIG_INTERVAL, - g_param_spec_uint ("config-interval", - "Configuration Send Interval", - "Send Configuration Insertion Interval in seconds (configuration headers " - "will be multiplexed in the data stream when detected.) (0 = disabled)", - 0, 3600, DEFAULT_CONFIG_INTERVAL, - G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + if (G_UNLIKELY (timestamp != -1)) { + mp4vparse->last_report = timestamp; + } + } + } + } - gstelement_class->change_state = - GST_DEBUG_FUNCPTR (gst_mpeg4vparse_change_state); + return GST_FLOW_OK; } -static void -gst_mpeg4vparse_init (GstMpeg4VParse * parse, GstMpeg4VParseClass * g_class) +static gboolean +gst_mpeg4vparse_set_caps (GstBaseParse * parse, GstCaps * caps) { - parse->sinkpad = gst_pad_new_from_static_template (&sink_template, "sink"); - gst_pad_set_chain_function (parse->sinkpad, - GST_DEBUG_FUNCPTR (gst_mpeg4vparse_chain)); - gst_pad_set_event_function (parse->sinkpad, - GST_DEBUG_FUNCPTR (gst_mpeg4vparse_sink_event)); - gst_pad_set_setcaps_function (parse->sinkpad, - GST_DEBUG_FUNCPTR (gst_mpeg4vparse_sink_setcaps)); - gst_element_add_pad (GST_ELEMENT (parse), parse->sinkpad); - - parse->srcpad = gst_pad_new_from_static_template (&src_template, "src"); - gst_pad_set_query_function (parse->srcpad, - GST_DEBUG_FUNCPTR (gst_mpeg4vparse_src_query)); - gst_pad_use_fixed_caps (parse->srcpad); - gst_element_add_pad (GST_ELEMENT (parse), parse->srcpad); - - parse->adapter = gst_adapter_new (); + GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEOPARSE (parse); + GstStructure *s; + const GValue *value; + GstBuffer *buf; - parse->interval = DEFAULT_CONFIG_INTERVAL; - parse->last_report = GST_CLOCK_TIME_NONE; + GST_DEBUG_OBJECT (parse, "setcaps called with %" GST_PTR_FORMAT, caps); - gst_mpeg4vparse_cleanup (parse); + s = gst_caps_get_structure (caps, 0); + + if ((value = gst_structure_get_value (s, "codec_data")) != NULL + && (buf = gst_value_get_buffer (value))) { + /* best possible parse attempt, + * src caps are based on sink caps so it will end up in there + * whether sucessful or not */ + gst_mpeg4vparse_process_config (mp4vparse, GST_BUFFER_DATA (buf), + GST_BUFFER_SIZE (buf)); + } + + /* let's not interfere and accept regardless of config parsing success */ + return TRUE; } static gboolean @@ -1083,7 +611,7 @@ plugin_init (GstPlugin * plugin) GST_DEBUG_CATEGORY_INIT (mpeg4v_parse_debug, "mpeg4videoparse", 0, "MPEG-4 video parser"); - if (!gst_element_register (plugin, "mpeg4videoparse", GST_RANK_SECONDARY, + if (!gst_element_register (plugin, "mpeg4videoparse", GST_RANK_PRIMARY + 1, gst_mpeg4vparse_get_type ())) return FALSE; diff --git a/gst/mpeg4videoparse/mpeg4videoparse.h b/gst/mpeg4videoparse/mpeg4videoparse.h index 29f7fa1..05d81e8 100644 --- a/gst/mpeg4videoparse/mpeg4videoparse.h +++ b/gst/mpeg4videoparse/mpeg4videoparse.h @@ -21,7 +21,9 @@ #define __MPEG4VIDEOPARSE_H__ #include <gst/gst.h> -#include <gst/base/gstadapter.h> +#include <gst/base/gstbaseparse.h> + +#include "mpeg4parse.h" G_BEGIN_DECLS @@ -40,47 +42,30 @@ G_BEGIN_DECLS typedef struct _GstMpeg4VParse GstMpeg4VParse; typedef struct _GstMpeg4VParseClass GstMpeg4VParseClass; -typedef enum { - PARSE_NEED_START, - PARSE_START_FOUND, - PARSE_VO_FOUND, - PARSE_VOS_FOUND, - PARSE_VOP_FOUND -} GstMpeg4VParseState; - struct _GstMpeg4VParse { - GstElement element; - - GstPad * sinkpad; - GstPad * srcpad; - - GstCaps *sink_caps; + GstBaseParse element; - guint interval; GstClockTime last_report; - GstAdapter * adapter; - guint offset; - guint vos_offset; + /* parse state */ + gint last_sc; + gint vop_offset; + gint vos_offset; + gint vo_offset; gboolean intra_frame; - - GstMpeg4VParseState state; - GstClockTime timestamp; + gboolean update_caps; GstBuffer *config; - gboolean have_config; guint8 profile; - GstClockTime frame_duration; + MPEG4Params params; + /* properties */ gboolean drop; - - gboolean have_src_caps; - GstEvent *pending_segment; - GList *pending_events; + guint interval; }; struct _GstMpeg4VParseClass { - GstElementClass parent_class; + GstBaseParseClass parent_class; }; GType gst_mpeg4vparse_get_type (void); diff --git a/gst/mpegdemux/gstmpegdemux.c b/gst/mpegdemux/gstmpegdemux.c index ca2bac4..b575ed4 100644 --- a/gst/mpegdemux/gstmpegdemux.c +++ b/gst/mpegdemux/gstmpegdemux.c @@ -60,6 +60,8 @@ #define SEGMENT_THRESHOLD (300*GST_MSECOND) #define VIDEO_SEGMENT_THRESHOLD (500*GST_MSECOND) +#define DURATION_SCAN_LIMIT 4 * 1024 * 1024 + typedef enum { SCAN_SCR, @@ -154,9 +156,9 @@ static GstStateChangeReturn gst_flups_demux_change_state (GstElement * element, GstStateChange transition); static inline gboolean gst_flups_demux_scan_forward_ts (GstFluPSDemux * demux, - guint64 * pos, SCAN_MODE mode, guint64 * rts); + guint64 * pos, SCAN_MODE mode, guint64 * rts, gint limit); static inline gboolean gst_flups_demux_scan_backward_ts (GstFluPSDemux * demux, - guint64 * pos, SCAN_MODE mode, guint64 * rts); + guint64 * pos, SCAN_MODE mode, guint64 * rts, gint limit); static inline void gst_flups_demux_send_segment_updates (GstFluPSDemux * demux, GstClockTime new_time); @@ -1034,22 +1036,26 @@ gst_flups_demux_do_seek (GstFluPSDemux * demux, GstSegment * seeksegment) GST_INFO_OBJECT (demux, "sink segment configured %" GST_SEGMENT_FORMAT ", trying to go at SCR: %" G_GUINT64_FORMAT, &demux->sink_segment, scr); - offset = MIN (gst_util_uint64_scale (scr, scr_rate_n, scr_rate_d), - demux->sink_segment.stop); + offset = + MIN (gst_util_uint64_scale (scr - demux->first_scr, scr_rate_n, + scr_rate_d), demux->sink_segment.stop); - found = gst_flups_demux_scan_forward_ts (demux, &offset, SCAN_SCR, &fscr); + found = gst_flups_demux_scan_forward_ts (demux, &offset, SCAN_SCR, &fscr, 0); if (!found) { - found = gst_flups_demux_scan_backward_ts (demux, &offset, SCAN_SCR, &fscr); + found = + gst_flups_demux_scan_backward_ts (demux, &offset, SCAN_SCR, &fscr, 0); } while (found && fscr < scr) { offset++; - found = gst_flups_demux_scan_forward_ts (demux, &offset, SCAN_SCR, &fscr); + found = + gst_flups_demux_scan_forward_ts (demux, &offset, SCAN_SCR, &fscr, 0); } while (found && fscr > scr && offset > 0) { offset--; - found = gst_flups_demux_scan_backward_ts (demux, &offset, SCAN_SCR, &fscr); + found = + gst_flups_demux_scan_backward_ts (demux, &offset, SCAN_SCR, &fscr, 0); } GST_INFO_OBJECT (demux, "doing seek at offset %" G_GUINT64_FORMAT @@ -1598,7 +1604,7 @@ gst_flups_demux_parse_pack_start (GstFluPSDemux * demux) /* adjustment of the SCR */ if (G_LIKELY (demux->current_scr != G_MAXUINT64)) { - gint64 diff; + guint64 diff; guint64 old_scr, old_mux_rate, bss, adjust = 0; /* keep SCR of the previous packet */ @@ -2368,7 +2374,7 @@ beach: static inline gboolean gst_flups_demux_scan_forward_ts (GstFluPSDemux * demux, guint64 * pos, - SCAN_MODE mode, guint64 * rts) + SCAN_MODE mode, guint64 * rts, gint limit) { GstFlowReturn ret = GST_FLOW_OK; GstBuffer *buffer = NULL; @@ -2384,6 +2390,9 @@ gst_flups_demux_scan_forward_ts (GstFluPSDemux * demux, guint64 * pos, if (offset + scan_sz > demux->sink_segment.stop) return FALSE; + if (limit && offset > *pos + limit) + return FALSE; + if (offset + to_read > demux->sink_segment.stop) to_read = demux->sink_segment.stop - offset; @@ -2415,7 +2424,7 @@ gst_flups_demux_scan_forward_ts (GstFluPSDemux * demux, guint64 * pos, static inline gboolean gst_flups_demux_scan_backward_ts (GstFluPSDemux * demux, guint64 * pos, - SCAN_MODE mode, guint64 * rts) + SCAN_MODE mode, guint64 * rts, gint limit) { GstFlowReturn ret = GST_FLOW_OK; GstBuffer *buffer = NULL; @@ -2431,6 +2440,9 @@ gst_flups_demux_scan_backward_ts (GstFluPSDemux * demux, guint64 * pos, if (offset < scan_sz - 1) return FALSE; + if (limit && offset < *pos - limit) + return FALSE; + if (offset > BLOCK_SZ) offset -= BLOCK_SZ; else { @@ -2496,7 +2508,8 @@ gst_flups_sink_get_duration (GstFluPSDemux * demux) /* Scan for notorious SCR and PTS to calculate the duration */ /* scan for first SCR in the stream */ offset = demux->sink_segment.start; - gst_flups_demux_scan_forward_ts (demux, &offset, SCAN_SCR, &demux->first_scr); + gst_flups_demux_scan_forward_ts (demux, &offset, SCAN_SCR, &demux->first_scr, + DURATION_SCAN_LIMIT); GST_DEBUG_OBJECT (demux, "First SCR: %" G_GINT64_FORMAT " %" GST_TIME_FORMAT " in packet starting at %" G_GUINT64_FORMAT, demux->first_scr, GST_TIME_ARGS (MPEGTIME_TO_GSTTIME (demux->first_scr)), @@ -2504,7 +2517,8 @@ gst_flups_sink_get_duration (GstFluPSDemux * demux) demux->first_scr_offset = offset; /* scan for last SCR in the stream */ offset = demux->sink_segment.stop; - gst_flups_demux_scan_backward_ts (demux, &offset, SCAN_SCR, &demux->last_scr); + gst_flups_demux_scan_backward_ts (demux, &offset, SCAN_SCR, + &demux->last_scr, 0); GST_DEBUG_OBJECT (demux, "Last SCR: %" G_GINT64_FORMAT " %" GST_TIME_FORMAT " in packet starting at %" G_GUINT64_FORMAT, demux->last_scr, GST_TIME_ARGS (MPEGTIME_TO_GSTTIME (demux->last_scr)), @@ -2512,18 +2526,22 @@ gst_flups_sink_get_duration (GstFluPSDemux * demux) demux->last_scr_offset = offset; /* scan for first PTS in the stream */ offset = demux->sink_segment.start; - gst_flups_demux_scan_forward_ts (demux, &offset, SCAN_PTS, &demux->first_pts); + gst_flups_demux_scan_forward_ts (demux, &offset, SCAN_PTS, &demux->first_pts, + DURATION_SCAN_LIMIT); GST_DEBUG_OBJECT (demux, "First PTS: %" G_GINT64_FORMAT " %" GST_TIME_FORMAT " in packet starting at %" G_GUINT64_FORMAT, demux->first_pts, GST_TIME_ARGS (MPEGTIME_TO_GSTTIME (demux->first_pts)), offset); - /* scan for last PTS in the stream */ - offset = demux->sink_segment.stop; - gst_flups_demux_scan_backward_ts (demux, &offset, SCAN_PTS, &demux->last_pts); - GST_DEBUG_OBJECT (demux, "Last PTS: %" G_GINT64_FORMAT " %" GST_TIME_FORMAT - " in packet starting at %" G_GUINT64_FORMAT, - demux->last_pts, GST_TIME_ARGS (MPEGTIME_TO_GSTTIME (demux->last_pts)), - offset); + if (demux->first_pts != G_MAXUINT64) { + /* scan for last PTS in the stream */ + offset = demux->sink_segment.stop; + gst_flups_demux_scan_backward_ts (demux, &offset, SCAN_PTS, + &demux->last_pts, DURATION_SCAN_LIMIT); + GST_DEBUG_OBJECT (demux, + "Last PTS: %" G_GINT64_FORMAT " %" GST_TIME_FORMAT + " in packet starting at %" G_GUINT64_FORMAT, demux->last_pts, + GST_TIME_ARGS (MPEGTIME_TO_GSTTIME (demux->last_pts)), offset); + } /* Detect wrong SCR values */ if (demux->first_scr > demux->last_scr) { GST_DEBUG_OBJECT (demux, "Wrong SCR values detected, searching for " @@ -2531,7 +2549,7 @@ gst_flups_sink_get_duration (GstFluPSDemux * demux) offset = demux->first_scr_offset; for (i = 0; i < 10; i++) { offset++; - gst_flups_demux_scan_forward_ts (demux, &offset, SCAN_SCR, &scr); + gst_flups_demux_scan_forward_ts (demux, &offset, SCAN_SCR, &scr, 0); if (scr < demux->last_scr) { demux->first_scr = scr; demux->first_scr_offset = offset; diff --git a/gst/mpegdemux/gstmpegdemux.h b/gst/mpegdemux/gstmpegdemux.h index a882235..6cf7aed 100644 --- a/gst/mpegdemux/gstmpegdemux.h +++ b/gst/mpegdemux/gstmpegdemux.h @@ -84,7 +84,6 @@ struct _GstFluPSStream gint id; gint type; - gint size_bound; GstClockTime segment_thresh; GstClockTime last_seg_start; diff --git a/gst/mpegdemux/gstpesfilter.c b/gst/mpegdemux/gstpesfilter.c index 736d496..55bc6d7 100644 --- a/gst/mpegdemux/gstpesfilter.c +++ b/gst/mpegdemux/gstpesfilter.c @@ -168,7 +168,7 @@ gst_pes_filter_parse (GstPESFilter * filter) avail = MIN (avail, filter->length + 6); } - if (avail < 7) + if (avail < 6) goto need_more_data; /* read more data, either the whole packet if there is a length @@ -202,6 +202,8 @@ gst_pes_filter_parse (GstPESFilter * filter) break; } + if (datalen == 0) + goto need_more_data; filter->pts = filter->dts = -1; /* stuffing bits, first two bits are '10' for mpeg2 pes so this code is diff --git a/gst/mpegvideoparse/mpegvideoparse.c b/gst/mpegvideoparse/mpegvideoparse.c index 70fdc89..92e38a1 100644 --- a/gst/mpegvideoparse/mpegvideoparse.c +++ b/gst/mpegvideoparse/mpegvideoparse.c @@ -1022,10 +1022,10 @@ gst_mpegvideoparse_change_state (GstElement * element, static gboolean plugin_init (GstPlugin * plugin) { - GST_DEBUG_CATEGORY_INIT (mpv_parse_debug, "mpegvideoparse", 0, + GST_DEBUG_CATEGORY_INIT (mpv_parse_debug, "legacympegvideoparse", 0, "MPEG Video Parser"); - return gst_element_register (plugin, "mpegvideoparse", + return gst_element_register (plugin, "legacympegvideoparse", GST_RANK_PRIMARY, GST_TYPE_MPEGVIDEOPARSE); } diff --git a/gst/videoparsers/Makefile.am b/gst/videoparsers/Makefile.am index aca5033..bb61c1c 100644 --- a/gst/videoparsers/Makefile.am +++ b/gst/videoparsers/Makefile.am @@ -2,18 +2,26 @@ plugin_LTLIBRARIES = libgstvideoparsersbad.la libgstvideoparsersbad_la_SOURCES = plugin.c \ h263parse.c gsth263parse.c \ - gsth264parse.c h264parse.c \ - gstdiracparse.c dirac_parse.c + gstdiracparse.c dirac_parse.c \ + gsth264parse.c gstmpegvideoparse.c \ + mpegvideoparse.c \ + gstvc1parse.c + libgstvideoparsersbad_la_CFLAGS = \ + $(GST_PLUGINS_BAD_CFLAGS) $(GST_PLUGINS_BASE_CFLAGS) \ + -DGST_USE_UNSTABLE_API \ $(GST_BASE_CFLAGS) $(GST_CFLAGS) -libgstvideoparsersbad_la_LIBADD = \ +libgstvideoparsersbad_la_LIBADD = $(GST_PLUGINS_BASE_LIBS) \ + $(top_builddir)/gst-libs/gst/codecparsers/libgstcodecparsers-$(GST_MAJORMINOR).la \ $(GST_BASE_LIBS) $(GST_LIBS) libgstvideoparsersbad_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS) libgstvideoparsersbad_la_LIBTOOLFLAGS = --tag=disable-static noinst_HEADERS = gsth263parse.h h263parse.h \ - gsth264parse.h h264parse.h \ - gstdiracparse.h dirac_parse.h + gstdiracparse.h dirac_parse.h \ + gsth264parse.h \ + gstmpegvideoparse.h mpegvideoparse.h \ + gstvc1parse.h Android.mk: Makefile.am $(BUILT_SOURCES) androgenizer \ diff --git a/gst/videoparsers/gsth264parse.c b/gst/videoparsers/gsth264parse.c index 8b996fe..cccd7cc 100644 --- a/gst/videoparsers/gsth264parse.c +++ b/gst/videoparsers/gsth264parse.c @@ -1,7 +1,10 @@ /* GStreamer H.264 Parser - * Copyright (C) <2010> Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk> - * Copyright (C) <2010> Collabora Multimedia + * Copyright (C) <2010> Collabora ltd * Copyright (C) <2010> Nokia Corporation + * Copyright (C) <2011> Intel Corporation + * + * Copyright (C) <2010> Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk> + * Copyright (C) <2011> Thibault Saunier <thibault.saunier@collabora.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public @@ -33,13 +36,12 @@ GST_DEBUG_CATEGORY (h264_parse_debug); #define GST_CAT_DEFAULT h264_parse_debug -#define DEFAULT_SPLIT_PACKETIZED FALSE #define DEFAULT_CONFIG_INTERVAL (0) +#define GST_BUFFER_FLAG_B_FRAME (GST_BUFFER_FLAG_LAST << 0) enum { PROP_0, - PROP_SPLIT_PACKETIZED, PROP_CONFIG_INTERVAL, PROP_LAST }; @@ -61,14 +63,17 @@ enum static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, - GST_STATIC_CAPS ("video/x-h264, parsed = (boolean) false")); + GST_STATIC_CAPS ("video/x-h264")); static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, - GST_STATIC_CAPS ("video/x-h264, parsed = (boolean) true")); + GST_STATIC_CAPS ("video/x-h264, parsed = (boolean) true, " + "stream-format=(string) { avc, byte-stream }, " + "alignment=(string) { au, nal }")); -GST_BOILERPLATE (GstH264Parse, gst_h264_parse, GstElement, GST_TYPE_BASE_PARSE); +GST_BOILERPLATE (GstH264Parse, gst_h264_parse, GstBaseParse, + GST_TYPE_BASE_PARSE); static void gst_h264_parse_finalize (GObject * object); @@ -88,6 +93,7 @@ static void gst_h264_parse_get_property (GObject * object, guint prop_id, static gboolean gst_h264_parse_set_caps (GstBaseParse * parse, GstCaps * caps); static GstFlowReturn gst_h264_parse_chain (GstPad * pad, GstBuffer * buffer); +static gboolean gst_h264_parse_event (GstBaseParse * parse, GstEvent * event); static void gst_h264_parse_base_init (gpointer g_class) @@ -100,7 +106,7 @@ gst_h264_parse_base_init (gpointer g_class) gst_static_pad_template_get (&sinktemplate)); gst_element_class_set_details_simple (gstelement_class, "H.264 parser", - "Codec/Parser/Video", + "Codec/Parser/Converter/Video", "Parses H.264 streams", "Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>"); @@ -117,10 +123,6 @@ gst_h264_parse_class_init (GstH264ParseClass * klass) gobject_class->set_property = gst_h264_parse_set_property; gobject_class->get_property = gst_h264_parse_get_property; - g_object_class_install_property (gobject_class, PROP_SPLIT_PACKETIZED, - g_param_spec_boolean ("split-packetized", "Split packetized", - "Split NAL units of packetized streams", DEFAULT_SPLIT_PACKETIZED, - G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_CONFIG_INTERVAL, g_param_spec_uint ("config-interval", "SPS PPS Send Interval", @@ -138,6 +140,7 @@ gst_h264_parse_class_init (GstH264ParseClass * klass) parse_class->pre_push_frame = GST_DEBUG_FUNCPTR (gst_h264_parse_pre_push_frame); parse_class->set_sink_caps = GST_DEBUG_FUNCPTR (gst_h264_parse_set_caps); + parse_class->event = GST_DEBUG_FUNCPTR (gst_h264_parse_event); } static void @@ -168,14 +171,22 @@ gst_h264_parse_finalize (GObject * object) static void gst_h264_parse_reset_frame (GstH264Parse * h264parse) { + GST_DEBUG_OBJECT (h264parse, "reset frame"); + /* done parsing; reset state */ - h264parse->last_nal_pos = 0; - h264parse->next_sc_pos = 0; + h264parse->nalu.valid = FALSE; + h264parse->nalu.offset = 0; + h264parse->nalu.size = 0; + h264parse->current_off = 0; + h264parse->picture_start = FALSE; h264parse->update_caps = FALSE; h264parse->idr_pos = -1; + h264parse->sei_pos = -1; h264parse->keyframe = FALSE; h264parse->frame_start = FALSE; + h264parse->b_frame = FALSE; + gst_adapter_clear (h264parse->frame_out); } static void @@ -185,6 +196,9 @@ gst_h264_parse_reset (GstH264Parse * h264parse) h264parse->height = 0; h264parse->fps_num = 0; h264parse->fps_den = 0; + h264parse->aspect_ratio_idc = 0; + h264parse->sar_width = 0; + h264parse->sar_height = 0; gst_buffer_replace (&h264parse->codec_data, NULL); h264parse->nal_length_size = 4; h264parse->packetized = FALSE; @@ -194,6 +208,12 @@ gst_h264_parse_reset (GstH264Parse * h264parse) h264parse->last_report = GST_CLOCK_TIME_NONE; h264parse->push_codec = FALSE; + h264parse->have_pps = FALSE; + h264parse->have_sps = FALSE; + + h264parse->dts = GST_CLOCK_TIME_NONE; + h264parse->ts_trn_nb = GST_CLOCK_TIME_NONE; + h264parse->do_ts = TRUE; gst_h264_parse_reset_frame (h264parse); } @@ -203,12 +223,18 @@ gst_h264_parse_start (GstBaseParse * parse) { GstH264Parse *h264parse = GST_H264_PARSE (parse); - GST_DEBUG ("Start"); + GST_DEBUG_OBJECT (parse, "start"); gst_h264_parse_reset (h264parse); - gst_h264_params_create (&h264parse->params, GST_ELEMENT (h264parse)); + h264parse->nalparser = gst_h264_nal_parser_new (); + + h264parse->dts = GST_CLOCK_TIME_NONE; + h264parse->ts_trn_nb = GST_CLOCK_TIME_NONE; + h264parse->sei_pic_struct_pres_flag = FALSE; + h264parse->sei_pic_struct = 0; + h264parse->field_pic_flag = 0; - gst_base_parse_set_min_frame_size (parse, 512); + gst_base_parse_set_min_frame_size (parse, 6); return TRUE; } @@ -216,13 +242,18 @@ gst_h264_parse_start (GstBaseParse * parse) static gboolean gst_h264_parse_stop (GstBaseParse * parse) { + guint i; GstH264Parse *h264parse = GST_H264_PARSE (parse); - GST_DEBUG ("Stop"); + GST_DEBUG_OBJECT (parse, "stop"); gst_h264_parse_reset (h264parse); - gst_h264_params_free (h264parse->params); - h264parse->params = NULL; + for (i = 0; i < GST_H264_MAX_SPS_COUNT; i++) + gst_buffer_replace (&h264parse->sps_nals[i], NULL); + for (i = 0; i < GST_H264_MAX_PPS_COUNT; i++) + gst_buffer_replace (&h264parse->pps_nals[i], NULL); + + gst_h264_nal_parser_free (h264parse->nalparser); return TRUE; } @@ -251,44 +282,80 @@ gst_h264_parse_get_string (GstH264Parse * parse, gboolean format, gint code) } } -/* check downstream caps to configure format and alignment */ static void -gst_h264_parse_negotiate (GstH264Parse * h264parse) +gst_h264_parse_format_from_caps (GstCaps * caps, guint * format, guint * align) { - GstCaps *caps; - guint format = GST_H264_PARSE_FORMAT_NONE; - guint align = GST_H264_PARSE_ALIGN_NONE; + g_return_if_fail (gst_caps_is_fixed (caps)); - caps = gst_pad_get_allowed_caps (GST_BASE_PARSE_SRC_PAD (h264parse)); - GST_DEBUG_OBJECT (h264parse, "allowed caps: %" GST_PTR_FORMAT, caps); + GST_DEBUG ("parsing caps: %" GST_PTR_FORMAT, caps); + + if (format) + *format = GST_H264_PARSE_FORMAT_NONE; + + if (align) + *align = GST_H264_PARSE_ALIGN_NONE; if (caps && gst_caps_get_size (caps) > 0) { GstStructure *s = gst_caps_get_structure (caps, 0); const gchar *str = NULL; - if ((str = gst_structure_get_string (s, "stream-format"))) { - if (strcmp (str, "avc") == 0) { - format = GST_H264_PARSE_FORMAT_AVC; - } else if (strcmp (str, "byte-stream") == 0) { - format = GST_H264_PARSE_FORMAT_BYTE; - } else { - GST_DEBUG_OBJECT (h264parse, "unknown stream-format: %s", str); + if (format) { + if ((str = gst_structure_get_string (s, "stream-format"))) { + if (strcmp (str, "avc") == 0) + *format = GST_H264_PARSE_FORMAT_AVC; + else if (strcmp (str, "byte-stream") == 0) + *format = GST_H264_PARSE_FORMAT_BYTE; } } - if ((str = gst_structure_get_string (s, "alignment"))) { - if (strcmp (str, "au") == 0) { - align = GST_H264_PARSE_ALIGN_AU; - } else if (strcmp (str, "nal") == 0) { - align = GST_H264_PARSE_ALIGN_NAL; - } else { - GST_DEBUG_OBJECT (h264parse, "unknown alignment: %s", str); + if (align) { + if ((str = gst_structure_get_string (s, "alignment"))) { + if (strcmp (str, "au") == 0) + *align = GST_H264_PARSE_ALIGN_AU; + else if (strcmp (str, "nal") == 0) + *align = GST_H264_PARSE_ALIGN_NAL; } } } +} - if (caps) +/* check downstream caps to configure format and alignment */ +static void +gst_h264_parse_negotiate (GstH264Parse * h264parse, GstCaps * in_caps) +{ + GstCaps *caps; + guint format = GST_H264_PARSE_FORMAT_NONE; + guint align = GST_H264_PARSE_ALIGN_NONE; + + g_return_if_fail ((in_caps == NULL) || gst_caps_is_fixed (in_caps)); + + caps = gst_pad_get_allowed_caps (GST_BASE_PARSE_SRC_PAD (h264parse)); + GST_DEBUG_OBJECT (h264parse, "allowed caps: %" GST_PTR_FORMAT, caps); + + /* concentrate on leading structure, since decodebin2 parser + * capsfilter always includes parser template caps */ + if (caps) { + caps = gst_caps_make_writable (caps); + gst_caps_truncate (caps); + GST_DEBUG_OBJECT (h264parse, "negotiating with caps: %" GST_PTR_FORMAT, + caps); + } + + if (in_caps && caps) { + if (gst_caps_can_intersect (in_caps, caps)) { + GST_DEBUG_OBJECT (h264parse, "downstream accepts upstream caps"); + gst_h264_parse_format_from_caps (in_caps, &format, &align); + gst_caps_unref (caps); + caps = NULL; + } + } + + if (caps) { + /* fixate to avoid ambiguity with lists when parsing */ + gst_pad_fixate_caps (GST_BASE_PARSE_SRC_PAD (h264parse), caps); + gst_h264_parse_format_from_caps (caps, &format, &align); gst_caps_unref (caps); + } /* default */ if (!format) @@ -309,13 +376,18 @@ gst_h264_parse_wrap_nal (GstH264Parse * h264parse, guint format, guint8 * data, guint size) { GstBuffer *buf; - const guint nl = h264parse->nal_length_size; + guint nl = h264parse->nal_length_size; + + GST_DEBUG_OBJECT (h264parse, "nal length %d", size); buf = gst_buffer_new_and_alloc (size + nl + 4); if (format == GST_H264_PARSE_FORMAT_AVC) { GST_WRITE_UINT32_BE (GST_BUFFER_DATA (buf), size << (32 - 8 * nl)); } else { - g_assert (nl == 4); + /* HACK: nl should always be 4 here, otherwise this won't work. + * There are legit cases where nl in avc stream is 2, but byte-stream + * SC is still always 4 bytes. */ + nl = 4; GST_WRITE_UINT32_BE (GST_BUFFER_DATA (buf), 1); } @@ -325,60 +397,193 @@ gst_h264_parse_wrap_nal (GstH264Parse * h264parse, guint format, guint8 * data, return buf; } +static void +gst_h264_parser_store_nal (GstH264Parse * h264parse, guint id, + GstH264NalUnitType naltype, GstH264NalUnit * nalu) +{ + GstBuffer *buf, **store; + guint size = nalu->size, store_size; + + if (naltype == GST_H264_NAL_SPS) { + store_size = GST_H264_MAX_SPS_COUNT; + store = h264parse->sps_nals; + GST_DEBUG_OBJECT (h264parse, "storing sps %u", id); + } else if (naltype == GST_H264_NAL_PPS) { + store_size = GST_H264_MAX_PPS_COUNT; + store = h264parse->pps_nals; + GST_DEBUG_OBJECT (h264parse, "storing pps %u", id); + } else + return; + + if (id >= store_size) { + GST_DEBUG_OBJECT (h264parse, "unable to store nal, id out-of-range %d", id); + return; + } + + buf = gst_buffer_new_and_alloc (size); + memcpy (GST_BUFFER_DATA (buf), nalu->data + nalu->offset, size); + + if (store[id]) + gst_buffer_unref (store[id]); + + store[id] = buf; +} + /* SPS/PPS/IDR considered key, all others DELTA; * so downstream waiting for keyframe can pick up at SPS/PPS/IDR */ #define NAL_TYPE_IS_KEY(nt) (((nt) == 5) || ((nt) == 7) || ((nt) == 8)) /* caller guarantees 2 bytes of nal payload */ static void -gst_h264_parse_process_nal (GstH264Parse * h264parse, guint8 * data, - gint sc_pos, gint nal_pos, guint nal_size) +gst_h264_parse_process_nal (GstH264Parse * h264parse, GstH264NalUnit * nalu) { guint nal_type; + GstH264SliceHdr slice; + GstH264PPS pps; + GstH264SPS sps; + GstH264SEIMessage sei; - g_return_if_fail (nal_size >= 2); - g_return_if_fail (nal_pos - sc_pos > 0 && nal_pos - sc_pos <= 4); + gboolean slcparsed = FALSE; + GstH264NalParser *nalparser = h264parse->nalparser; - /* lower layer collects params */ - gst_h264_params_parse_nal (h264parse->params, data + nal_pos, nal_size); + + /* nothing to do for broken input */ + if (G_UNLIKELY (nalu->size < 2)) { + GST_DEBUG_OBJECT (h264parse, "not processing nal size %u", nalu->size); + return; + } /* we have a peek as well */ - nal_type = data[nal_pos] & 0x1f; + nal_type = nalu->type; h264parse->keyframe |= NAL_TYPE_IS_KEY (nal_type); + GST_DEBUG_OBJECT (h264parse, "processing nal of type %u, size %u", + nal_type, nalu->size); + switch (nal_type) { - case NAL_SPS: - case NAL_PPS: + case GST_H264_NAL_SPS: + gst_h264_parser_parse_sps (nalparser, nalu, &sps, TRUE); + + GST_DEBUG_OBJECT (h264parse, "triggering src caps check"); + h264parse->update_caps = TRUE; + h264parse->have_sps = TRUE; + if (h264parse->push_codec && h264parse->have_pps) { + /* SPS and PPS found in stream before the first pre_push_frame, no need + * to forcibly push at start */ + GST_INFO_OBJECT (h264parse, "have SPS/PPS in stream"); + h264parse->push_codec = FALSE; + h264parse->have_sps = FALSE; + h264parse->have_pps = FALSE; + } + + gst_h264_parser_store_nal (h264parse, sps.id, nal_type, nalu); + break; + case GST_H264_NAL_PPS: + gst_h264_parser_parse_pps (nalparser, nalu, &pps); /* parameters might have changed, force caps check */ GST_DEBUG_OBJECT (h264parse, "triggering src caps check"); h264parse->update_caps = TRUE; - /* found in stream, no need to forcibly push at start */ - h264parse->push_codec = FALSE; + h264parse->have_pps = TRUE; + if (h264parse->push_codec && h264parse->have_sps) { + /* SPS and PPS found in stream before the first pre_push_frame, no need + * to forcibly push at start */ + GST_INFO_OBJECT (h264parse, "have SPS/PPS in stream"); + h264parse->push_codec = FALSE; + h264parse->have_sps = FALSE; + h264parse->have_pps = FALSE; + } + + gst_h264_parser_store_nal (h264parse, pps.id, nal_type, nalu); + break; + case GST_H264_NAL_SEI: + gst_h264_parser_parse_sei (nalparser, nalu, &sei); + switch (sei.payloadType) { + case GST_H264_SEI_PIC_TIMING: + h264parse->sei_pic_struct_pres_flag = + sei.pic_timing.pic_struct_present_flag; + h264parse->sei_cpb_removal_delay = sei.pic_timing.cpb_removal_delay; + if (h264parse->sei_pic_struct_pres_flag) + h264parse->sei_pic_struct = sei.pic_timing.pic_struct; + break; + case GST_H264_SEI_BUF_PERIOD: + if (h264parse->ts_trn_nb == GST_CLOCK_TIME_NONE || + h264parse->dts == GST_CLOCK_TIME_NONE) + h264parse->ts_trn_nb = 0; + else + h264parse->ts_trn_nb = h264parse->dts; + + GST_LOG_OBJECT (h264parse, + "new buffering period; ts_trn_nb updated: %" GST_TIME_FORMAT, + GST_TIME_ARGS (h264parse->ts_trn_nb)); + break; + } + /* mark SEI pos */ + if (h264parse->sei_pos == -1) { + if (h264parse->format == GST_H264_PARSE_FORMAT_AVC) + h264parse->sei_pos = gst_adapter_available (h264parse->frame_out); + else + h264parse->sei_pos = nalu->offset - 4; + GST_DEBUG_OBJECT (h264parse, "marking SEI in frame at offset %d", + h264parse->sei_pos); + } break; - case NAL_SLICE: - case NAL_SLICE_DPA: - case NAL_SLICE_DPB: - case NAL_SLICE_DPC: + + case GST_H264_NAL_SLICE: + case GST_H264_NAL_SLICE_DPA: + case GST_H264_NAL_SLICE_DPB: + case GST_H264_NAL_SLICE_DPC: + slcparsed = TRUE; + if (gst_h264_parser_parse_slice_hdr (nalparser, nalu, + &slice, FALSE, FALSE) == GST_H264_PARSER_ERROR) + return; + /* real frame data */ - h264parse->frame_start |= (h264parse->params->first_mb_in_slice == 0); + h264parse->frame_start |= (slice.first_mb_in_slice == 0); + if (slice.type == GST_H264_B_SLICE || slice.type == GST_H264_S_B_SLICE) + h264parse->b_frame = TRUE; + else + h264parse->b_frame = FALSE; /* if we need to sneak codec NALs into the stream, * this is a good place, so fake it as IDR * (which should be at start anyway) */ + GST_DEBUG_OBJECT (h264parse, "frame start: %i first_mb_in_slice %i", + h264parse->frame_start, slice.first_mb_in_slice); if (G_LIKELY (!h264parse->push_codec)) break; /* fall-through */ - case NAL_SLICE_IDR: + case GST_H264_NAL_SLICE_IDR: + if (!slcparsed) { + if (gst_h264_parser_parse_slice_hdr (nalparser, nalu, + &slice, FALSE, FALSE) == GST_H264_PARSER_ERROR) + return; + GST_DEBUG_OBJECT (h264parse, "frame start: %i first_mb_in_slice %i", + h264parse->frame_start, slice.first_mb_in_slice); + } /* real frame data */ - h264parse->frame_start |= (h264parse->params->first_mb_in_slice == 0); + h264parse->frame_start |= (slice.first_mb_in_slice == 0); + /* mark where config needs to go if interval expired */ /* mind replacement buffer if applicable */ - if (h264parse->format == GST_H264_PARSE_FORMAT_AVC) - h264parse->idr_pos = gst_adapter_available (h264parse->frame_out); - else - h264parse->idr_pos = sc_pos; - GST_DEBUG_OBJECT (h264parse, "marking IDR in frame at offset %d", - h264parse->idr_pos); + if (h264parse->idr_pos == -1) { + if (h264parse->format == GST_H264_PARSE_FORMAT_AVC) + h264parse->idr_pos = gst_adapter_available (h264parse->frame_out); + else + h264parse->idr_pos = nalu->offset - 4; + GST_DEBUG_OBJECT (h264parse, "marking IDR in frame at offset %d", + h264parse->idr_pos); + } + + GST_DEBUG_OBJECT (h264parse, "first MB: %u, slice type: %u", + slice.first_mb_in_slice, slice.type); + /* if SEI preceeds (faked) IDR, then we have to insert config there */ + if (h264parse->sei_pos >= 0 && h264parse->idr_pos > h264parse->sei_pos) { + h264parse->idr_pos = h264parse->sei_pos; + GST_DEBUG_OBJECT (h264parse, "moved IDR mark to SEI position %d", + h264parse->idr_pos); + } break; + default: + gst_h264_parser_parse_nal (nalparser, nalu); } /* if AVC output needed, collect properly prefixed nal in adapter, @@ -388,7 +593,7 @@ gst_h264_parse_process_nal (GstH264Parse * h264parse, guint8 * data, GST_LOG_OBJECT (h264parse, "collecting NAL in AVC frame"); buf = gst_h264_parse_wrap_nal (h264parse, h264parse->format, - data + nal_pos, nal_size); + nalu->data + nalu->offset, nalu->size); gst_adapter_push (h264parse->frame_out, buf); } } @@ -396,21 +601,31 @@ gst_h264_parse_process_nal (GstH264Parse * h264parse, guint8 * data, /* caller guarantees at least 2 bytes of nal payload for each nal * returns TRUE if next_nal indicates that nal terminates an AU */ static inline gboolean -gst_h264_parse_collect_nal (GstH264Parse * h264parse, guint8 * nal, - guint8 * next_nal) +gst_h264_parse_collect_nal (GstH264Parse * h264parse, const guint8 * data, + guint size, GstH264NalUnit * nalu) { - gint nal_type; gboolean complete; + GstH264ParserResult parse_res; + GstH264NalUnitType nal_type = nalu->type; + GstH264NalUnit nnalu; + + GST_DEBUG_OBJECT (h264parse, "parsing collected nal"); + parse_res = gst_h264_parser_identify_nalu (h264parse->nalparser, data, + nalu->offset + nalu->size, size, &nnalu); - if (h264parse->align == GST_H264_PARSE_ALIGN_NAL) + if (parse_res == GST_H264_PARSER_ERROR) + return FALSE; + + if (h264parse->align == GST_H264_PARSE_ALIGN_NAL) { return TRUE; + } /* determine if AU complete */ - nal_type = nal[0] & 0x1f; GST_LOG_OBJECT (h264parse, "nal type: %d", nal_type); /* coded slice NAL starts a picture, * i.e. other types become aggregated in front of it */ - h264parse->picture_start |= (nal_type == 1 || nal_type == 2 || nal_type == 5); + h264parse->picture_start |= (nal_type == GST_H264_NAL_SLICE || + nal_type == GST_H264_NAL_SLICE_DPA || nal_type == GST_H264_NAL_SLICE_IDR); /* consider a coded slices (IDR or not) to start a picture, * (so ending the previous one) if first_mb_in_slice == 0 @@ -419,34 +634,26 @@ gst_h264_parse_collect_nal (GstH264Parse * h264parse, guint8 * nal, * but in practice it works in sane cases, needs not much parsing, * and also works with broken frame_num in NAL * (where spec-wise would fail) */ - nal_type = next_nal[0] & 0x1f; + nal_type = nnalu.type; + complete = h264parse->picture_start && (nal_type >= GST_H264_NAL_SEI && + nal_type <= GST_H264_NAL_AU_DELIMITER); + GST_LOG_OBJECT (h264parse, "next nal type: %d", nal_type); - complete = h264parse->picture_start && (nal_type >= 6 && nal_type <= 9); complete |= h264parse->picture_start && - (nal_type == 1 || nal_type == 2 || nal_type == 5) && + (nal_type == GST_H264_NAL_SLICE || + nal_type == GST_H264_NAL_SLICE_DPA || + nal_type == GST_H264_NAL_SLICE_IDR) && /* first_mb_in_slice == 0 considered start of frame */ - (next_nal[1] & 0x80); + (nnalu.data[nnalu.offset + 1] & 0x80); GST_LOG_OBJECT (h264parse, "au complete: %d", complete); return complete; } -/* finds next startcode == 00 00 01, along with a subsequent byte */ -static guint -gst_h264_parse_find_sc (GstBuffer * buffer, guint skip) -{ - GstByteReader br; - guint sc_pos = -1; - - gst_byte_reader_init_from_buffer (&br, buffer); - - /* NALU not empty, so we can at least expect 1 (even 2) bytes following sc */ - sc_pos = gst_byte_reader_masked_scan_uint32 (&br, 0xffffff00, 0x00000100, - skip, gst_byte_reader_get_remaining (&br) - skip); - - return sc_pos; -} +/* FIXME move into baseparse, or anything equivalent; + * see https://bugzilla.gnome.org/show_bug.cgi?id=650093 */ +#define GST_BASE_PARSE_FRAME_FLAG_PARSING 0x10000 static gboolean gst_h264_parse_check_valid_frame (GstBaseParse * parse, @@ -454,10 +661,11 @@ gst_h264_parse_check_valid_frame (GstBaseParse * parse, { GstH264Parse *h264parse = GST_H264_PARSE (parse); GstBuffer *buffer = frame->buffer; - gint sc_pos, nal_pos, next_sc_pos, next_nal_pos; guint8 *data; - guint size; + guint size, current_off = 0; gboolean drain; + GstH264NalParser *nalparser = h264parse->nalparser; + GstH264NalUnit nalu = h264parse->nalu; /* expect at least 3 bytes startcode == sc, and 2 bytes NALU payload */ if (G_UNLIKELY (GST_BUFFER_SIZE (buffer) < 5)) @@ -465,94 +673,177 @@ gst_h264_parse_check_valid_frame (GstBaseParse * parse, /* need to configure aggregation */ if (G_UNLIKELY (h264parse->format == GST_H264_PARSE_FORMAT_NONE)) - gst_h264_parse_negotiate (h264parse); + gst_h264_parse_negotiate (h264parse, NULL); + + /* avoid stale cached parsing state */ + if (!(frame->flags & GST_BASE_PARSE_FRAME_FLAG_PARSING)) { + GST_LOG_OBJECT (h264parse, "parsing new frame"); + gst_h264_parse_reset_frame (h264parse); + frame->flags |= GST_BASE_PARSE_FRAME_FLAG_PARSING; + } else { + GST_LOG_OBJECT (h264parse, "resuming frame parsing"); + } data = GST_BUFFER_DATA (buffer); size = GST_BUFFER_SIZE (buffer); - GST_LOG_OBJECT (h264parse, "last_nal_pos: %d, last_scan_pos %d", - h264parse->last_nal_pos, h264parse->next_sc_pos); + drain = FALSE; + current_off = h264parse->current_off; - nal_pos = h264parse->last_nal_pos; - next_sc_pos = h264parse->next_sc_pos; + g_assert (current_off < size); - if (!next_sc_pos) { - sc_pos = gst_h264_parse_find_sc (buffer, 0); + GST_DEBUG_OBJECT (h264parse, "last parse position %u", current_off); + while (TRUE) { + GstH264ParserResult pres; + +#if 0 + if (h264parse->packetized_chunked) + pres = + gst_h264_parser_identify_nalu_unchecked (nalparser, data, current_off, + size, &nalu); + else +#endif + pres = + gst_h264_parser_identify_nalu (nalparser, data, current_off, size, + &nalu); + + switch (pres) { + case GST_H264_PARSER_OK: + GST_DEBUG_OBJECT (h264parse, "complete nal found. " + "current offset: %u, Nal offset: %u, Nal Size: %u", + current_off, nalu.offset, nalu.size); + + GST_DEBUG_OBJECT (h264parse, "current off. %u", + nalu.offset + nalu.size); + if (!h264parse->nalu.size && !h264parse->nalu.valid) + h264parse->nalu = nalu; + + /* need 2 bytes of next nal */ + if (!h264parse->packetized_chunked && + (nalu.offset + nalu.size + 4 + 2 > size)) { + if (GST_BASE_PARSE_DRAINING (parse)) { + drain = TRUE; + } else { + GST_DEBUG_OBJECT (h264parse, "need more bytes of next nal"); + current_off = nalu.sc_offset; + goto more; + } + } else if (h264parse->packetized_chunked) { +#if 0 + /* normal next nal based collection not possible, + * _chain will have to tell us whether this was last one for AU */ + drain = h264parse->packetized_last; +#endif + } + break; + case GST_H264_PARSER_BROKEN_LINK: + return FALSE; + case GST_H264_PARSER_ERROR: + current_off = size - 3; + goto parsing_error; + case GST_H264_PARSER_NO_NAL: + /* don't expect to have found any NAL so far */ + g_assert (h264parse->nalu.size == 0); + current_off = h264parse->nalu.sc_offset = size - 3; + goto more; + case GST_H264_PARSER_BROKEN_DATA: + GST_WARNING_OBJECT (h264parse, "input stream is corrupt; " + "it contains a NAL unit of length %d", nalu.size); + + /* broken nal at start -> arrange to skip it, + * otherwise have it terminate current au + * (and so it will be skipped on next frame round) */ + if (nalu.sc_offset == h264parse->nalu.sc_offset) { + *skipsize = nalu.offset; + + GST_DEBUG_OBJECT (h264parse, "skipping broken nal"); + return FALSE; + } else { + nalu.size = 0; + goto end; + } + case GST_H264_PARSER_NO_NAL_END: + GST_DEBUG_OBJECT (h264parse, "not a complete nal found at offset %u", + nalu.offset); + + current_off = nalu.sc_offset; + /* We keep the reference to this nal so we start over the parsing + * here */ + if (!h264parse->nalu.size && !h264parse->nalu.valid) + h264parse->nalu = nalu; + + if (GST_BASE_PARSE_DRAINING (parse)) { + drain = TRUE; + GST_DEBUG_OBJECT (h264parse, "draining NAL %u %u %u", size, + h264parse->nalu.offset, h264parse->nalu.size); + /* Can't parse the nalu */ + if (size - h264parse->nalu.offset < 2) { + *skipsize = nalu.offset; + return FALSE; + } - if (sc_pos == -1) { - /* SC not found, need more data */ - sc_pos = GST_BUFFER_SIZE (buffer) - 3; - goto more; + /* We parse it anyway */ + nalu.size = size - nalu.offset; + break; + } + goto more; } - nal_pos = sc_pos + 3; - next_sc_pos = nal_pos; - /* sc might have 2 or 3 0-bytes */ - if (sc_pos > 0 && data[sc_pos - 1] == 00) - sc_pos--; - GST_LOG_OBJECT (h264parse, "found sc at offset %d", sc_pos); - } else { - /* previous checks already arrange sc at start */ - sc_pos = 0; - } + current_off = nalu.offset + nalu.size; - drain = GST_BASE_PARSE_DRAINING (parse); - while (TRUE) { - gint prev_sc_pos; - - next_sc_pos = gst_h264_parse_find_sc (buffer, next_sc_pos); - if (next_sc_pos == -1) { - GST_LOG_OBJECT (h264parse, "no next sc"); - if (drain) { - /* FLUSH/EOS, it's okay if we can't find the next frame */ - next_sc_pos = size; - next_nal_pos = size; - } else { - next_sc_pos = size - 3; - goto more; - } - } else { - next_nal_pos = next_sc_pos + 3; - if (data[next_sc_pos - 1] == 00) - next_sc_pos--; - GST_LOG_OBJECT (h264parse, "found next sc at offset %d", next_sc_pos); - /* need at least 1 more byte of next NAL */ - if (!drain && (next_nal_pos == size - 1)) - goto more; + GST_DEBUG_OBJECT (h264parse, "%p complete nal found. Off: %u, Size: %u", + data, nalu.offset, nalu.size); + + gst_h264_parse_process_nal (h264parse, &nalu); + + /* simulate no next nal if none needed */ + drain = drain || (h264parse->align == GST_H264_PARSE_ALIGN_NAL); + + /* In packetized mode we know there's only on NALU in each input packet, + * but we may not have seen the whole AU already, possibly need more */ + if (h264parse->packetized_chunked) { + if (drain) + break; +#if 0 + /* next NALU expected at end of current data */ + current_off = size; + goto more; +#endif } - /* determine nal's sc position */ - prev_sc_pos = nal_pos - 3; - g_assert (prev_sc_pos >= 0); - if (prev_sc_pos > 0 && data[prev_sc_pos - 1] == 0) - prev_sc_pos--; - - /* already consume and gather info from NAL */ - gst_h264_parse_process_nal (h264parse, data, prev_sc_pos, nal_pos, - next_sc_pos - nal_pos); - if (next_nal_pos >= size - 1 || - gst_h264_parse_collect_nal (h264parse, data + nal_pos, - data + next_nal_pos)) + /* if no next nal, we know it's complete here */ + if (drain || gst_h264_parse_collect_nal (h264parse, data, size, &nalu)) break; - /* move along */ - next_sc_pos = nal_pos = next_nal_pos; + GST_DEBUG_OBJECT (h264parse, "Looking for more"); } - *skipsize = sc_pos; - *framesize = next_sc_pos - sc_pos; +end: + /* FIXME this shouldnt be needed */ + if (h264parse->nalu.sc_offset > 0 && data[h264parse->nalu.sc_offset - 1] == 0) + h264parse->nalu.sc_offset--; + + *skipsize = h264parse->nalu.sc_offset; + *framesize = nalu.offset + nalu.size - h264parse->nalu.sc_offset; /* CHECKME */ + h264parse->current_off = current_off; return TRUE; +parsing_error: + GST_DEBUG_OBJECT (h264parse, "error parsing Nal Unit"); more: - /* Ask for 1024 bytes more - this is an arbitrary choice */ - gst_base_parse_set_min_frame_size (parse, GST_BUFFER_SIZE (buffer) + 1024); - /* skip up to initial startcode */ - *skipsize = sc_pos; - /* resume scanning here next time */ - h264parse->last_nal_pos = nal_pos; - h264parse->next_sc_pos = next_sc_pos; + /* ask for best next available */ + *framesize = G_MAXUINT; + if (!h264parse->nalu.size) { + /* skip up to initial startcode */ + *skipsize = h264parse->nalu.sc_offset; + } else { + *skipsize = 0; + } + + /* Restart parsing from here next time */ + h264parse->current_off = current_off; return FALSE; } @@ -569,8 +860,8 @@ gst_h264_parse_make_codec_data (GstH264Parse * h264parse) /* only nal payload in stored nals */ - for (i = 0; i < MAX_SPS_COUNT; i++) { - if ((nal = h264parse->params->sps_nals[i])) { + for (i = 0; i < GST_H264_MAX_SPS_COUNT; i++) { + if ((nal = h264parse->sps_nals[i])) { num_sps++; /* size bytes also count */ sps_size += GST_BUFFER_SIZE (nal) + 2; @@ -582,8 +873,8 @@ gst_h264_parse_make_codec_data (GstH264Parse * h264parse) } } } - for (i = 0; i < MAX_PPS_COUNT; i++) { - if ((nal = h264parse->params->pps_nals[i])) { + for (i = 0; i < GST_H264_MAX_PPS_COUNT; i++) { + if ((nal = h264parse->pps_nals[i])) { num_pps++; /* size bytes also count */ pps_size += GST_BUFFER_SIZE (nal) + 2; @@ -607,8 +898,8 @@ gst_h264_parse_make_codec_data (GstH264Parse * h264parse) data[5] = 0xe0 | num_sps; /* number of SPSs */ data += 6; - for (i = 0; i < MAX_SPS_COUNT; i++) { - if ((nal = h264parse->params->sps_nals[i])) { + for (i = 0; i < GST_H264_MAX_SPS_COUNT; i++) { + if ((nal = h264parse->sps_nals[i])) { GST_WRITE_UINT16_BE (data, GST_BUFFER_SIZE (nal)); memcpy (data + 2, GST_BUFFER_DATA (nal), GST_BUFFER_SIZE (nal)); data += 2 + GST_BUFFER_SIZE (nal); @@ -617,8 +908,8 @@ gst_h264_parse_make_codec_data (GstH264Parse * h264parse) data[0] = num_pps; data++; - for (i = 0; i < MAX_PPS_COUNT; i++) { - if ((nal = h264parse->params->pps_nals[i])) { + for (i = 0; i < GST_H264_MAX_PPS_COUNT; i++) { + if ((nal = h264parse->pps_nals[i])) { GST_WRITE_UINT16_BE (data, GST_BUFFER_SIZE (nal)); memcpy (data + 2, GST_BUFFER_DATA (nal), GST_BUFFER_SIZE (nal)); data += 2 + GST_BUFFER_SIZE (nal); @@ -629,10 +920,96 @@ gst_h264_parse_make_codec_data (GstH264Parse * h264parse) } static void -gst_h264_parse_update_src_caps (GstH264Parse * h264parse) +gst_h264_parse_get_par (GstH264Parse * h264parse, gint * num, gint * den) { - GstH264ParamsSPS *sps; - GstCaps *caps = NULL, *sink_caps; + gint par_n, par_d; + + par_n = par_d = 0; + switch (h264parse->aspect_ratio_idc) { + case 0: + par_n = par_d = 0; + break; + case 1: + par_n = 1; + par_d = 1; + break; + case 2: + par_n = 12; + par_d = 11; + break; + case 3: + par_n = 10; + par_d = 11; + break; + case 4: + par_n = 16; + par_d = 11; + break; + case 5: + par_n = 40; + par_d = 33; + break; + case 6: + par_n = 24; + par_d = 11; + break; + case 7: + par_n = 20; + par_d = 11; + break; + case 8: + par_n = 32; + par_d = 11; + break; + case 9: + par_n = 80; + par_d = 33; + break; + case 10: + par_n = 18; + par_d = 11; + break; + case 11: + par_n = 15; + par_d = 11; + break; + case 12: + par_n = 64; + par_d = 33; + break; + case 13: + par_n = 160; + par_d = 99; + break; + case 14: + par_n = 4; + par_d = 3; + break; + case 15: + par_n = 3; + par_d = 2; + break; + case 16: + par_n = 2; + par_d = 1; + break; + case 255: + par_n = h264parse->sar_width; + par_d = h264parse->sar_height; + break; + default: + par_n = par_d = 0; + } + + *num = par_n; + *den = par_d; +} + +static void +gst_h264_parse_update_src_caps (GstH264Parse * h264parse, GstCaps * caps) +{ + GstH264SPS *sps; + GstCaps *sink_caps; gboolean modified = FALSE; GstBuffer *buf = NULL; @@ -641,14 +1018,20 @@ gst_h264_parse_update_src_caps (GstH264Parse * h264parse) else if (G_UNLIKELY (!h264parse->update_caps)) return; + /* if this is being called from the first _setcaps call, caps on the sinkpad + * aren't set yet and so they need to be passed as an argument */ + if (caps) + sink_caps = caps; + else + sink_caps = GST_PAD_CAPS (GST_BASE_PARSE_SINK_PAD (h264parse)); + /* carry over input caps as much as possible; override with our own stuff */ - sink_caps = GST_PAD_CAPS (GST_BASE_PARSE_SINK_PAD (h264parse)); if (sink_caps) gst_caps_ref (sink_caps); else sink_caps = gst_caps_new_simple ("video/x-h264", NULL); - sps = h264parse->params->sps; + sps = h264parse->nalparser->last_sps; GST_DEBUG_OBJECT (h264parse, "sps: %p", sps); /* only codec-data for nice-and-clean au aligned packetized avc format */ @@ -667,35 +1050,83 @@ gst_h264_parse_update_src_caps (GstH264Parse * h264parse) } } + caps = NULL; if (G_UNLIKELY (!sps)) { caps = gst_caps_copy (sink_caps); - } else if (G_UNLIKELY (h264parse->width != sps->width || - h264parse->height != sps->height || h264parse->fps_num != sps->fps_num - || h264parse->fps_den != sps->fps_den || modified)) { - caps = gst_caps_copy (sink_caps); - /* sps should give this */ - gst_caps_set_simple (caps, "width", G_TYPE_INT, sps->width, - "height", G_TYPE_INT, sps->height, NULL); - h264parse->height = sps->height; - h264parse->width = sps->width; - /* but not necessarily or reliably this */ - if ((!h264parse->fps_num || !h264parse->fps_den) && - sps->fps_num > 0 && sps->fps_den > 0) { - gst_caps_set_simple (caps, "framerate", - GST_TYPE_FRACTION, sps->fps_num, sps->fps_den, NULL); - h264parse->fps_num = sps->fps_num; - h264parse->fps_den = sps->fps_den; - gst_base_parse_set_frame_rate (GST_BASE_PARSE (h264parse), - h264parse->fps_num, h264parse->fps_den, 0, 0); + } else { + if (G_UNLIKELY (h264parse->width != sps->width || + h264parse->height != sps->height)) { + GST_INFO_OBJECT (h264parse, "resolution changed %dx%d", + sps->width, sps->height); + h264parse->width = sps->width; + h264parse->height = sps->height; + modified = TRUE; + } + + /* 0/1 is set as the default in the codec parser */ + if (sps->vui_parameters.timing_info_present_flag && + !(sps->fps_num == 0 && sps->fps_den == 1)) { + if (G_UNLIKELY (h264parse->fps_num != sps->fps_num + || h264parse->fps_den != sps->fps_den)) { + GST_INFO_OBJECT (h264parse, "framerate changed %d/%d", + sps->fps_num, sps->fps_den); + h264parse->fps_num = sps->fps_num; + h264parse->fps_den = sps->fps_den; + gst_base_parse_set_frame_rate (GST_BASE_PARSE (h264parse), + h264parse->fps_num, h264parse->fps_den, 0, 0); + modified = TRUE; + } + } + + if (sps->vui_parameters.aspect_ratio_info_present_flag) { + if (G_UNLIKELY (h264parse->aspect_ratio_idc != + sps->vui_parameters.aspect_ratio_idc)) { + h264parse->aspect_ratio_idc = sps->vui_parameters.aspect_ratio_idc; + GST_INFO_OBJECT (h264parse, "aspect ratio idc changed %d", + h264parse->aspect_ratio_idc); + modified = TRUE; + } + + /* 255 means sar_width and sar_height present */ + if (G_UNLIKELY (sps->vui_parameters.aspect_ratio_idc == 255 && + (h264parse->sar_width != sps->vui_parameters.sar_width || + h264parse->sar_height != sps->vui_parameters.sar_height))) { + h264parse->sar_width = sps->vui_parameters.sar_width; + h264parse->sar_height = sps->vui_parameters.sar_height; + GST_INFO_OBJECT (h264parse, "aspect ratio SAR changed %d/%d", + h264parse->sar_width, h264parse->sar_height); + modified = TRUE; + } + } + + if (G_UNLIKELY (modified)) { + caps = gst_caps_copy (sink_caps); + /* sps should give this */ + gst_caps_set_simple (caps, "width", G_TYPE_INT, sps->width, + "height", G_TYPE_INT, sps->height, NULL); + /* but not necessarily or reliably this */ + if (h264parse->fps_num > 0 && h264parse->fps_den > 0) + gst_caps_set_simple (caps, "framerate", + GST_TYPE_FRACTION, h264parse->fps_num, h264parse->fps_den, NULL); } } if (caps) { + gint par_n, par_d; + gst_caps_set_simple (caps, "parsed", G_TYPE_BOOLEAN, TRUE, "stream-format", G_TYPE_STRING, gst_h264_parse_get_string (h264parse, TRUE, h264parse->format), "alignment", G_TYPE_STRING, gst_h264_parse_get_string (h264parse, FALSE, h264parse->align), NULL); + + gst_h264_parse_get_par (h264parse, &par_n, &par_d); + if (par_n != 0 && par_d != 0) { + GST_INFO_OBJECT (h264parse, "PAR %d/%d", par_n, par_d); + gst_caps_set_simple (caps, "pixel-aspect-ratio", GST_TYPE_FRACTION, + par_n, par_d, NULL); + } + if (buf) { gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, buf, NULL); gst_buffer_replace (&h264parse->codec_data, buf); @@ -716,6 +1147,134 @@ gst_h264_parse_update_src_caps (GstH264Parse * h264parse) gst_buffer_unref (buf); } +static void +gst_h264_parse_get_timestamp (GstH264Parse * h264parse, + GstClockTime * out_ts, GstClockTime * out_dur, gboolean frame) +{ + GstH264SPS *sps = h264parse->nalparser->last_sps; + GstClockTime upstream; + gint duration = 1; + + g_return_if_fail (out_dur != NULL); + g_return_if_fail (out_ts != NULL); + + upstream = *out_ts; + + if (!frame) { + GST_LOG_OBJECT (h264parse, "no frame data -> 0 duration"); + *out_dur = 0; + goto exit; + } else { + *out_ts = upstream; + } + + if (!sps) { + GST_DEBUG_OBJECT (h264parse, "referred SPS invalid"); + goto exit; + } else if (!sps->vui_parameters.timing_info_present_flag) { + GST_DEBUG_OBJECT (h264parse, + "unable to compute timestamp: timing info not present"); + goto exit; + } else if (sps->vui_parameters.time_scale == 0) { + GST_DEBUG_OBJECT (h264parse, + "unable to compute timestamp: time_scale = 0 " + "(this is forbidden in spec; bitstream probably contains error)"); + goto exit; + } + + if (h264parse->sei_pic_struct_pres_flag && + h264parse->sei_pic_struct != (guint8) - 1) { + /* Note that when h264parse->sei_pic_struct == -1 (unspecified), there + * are ways to infer its value. This is related to computing the + * TopFieldOrderCnt and BottomFieldOrderCnt, which looks + * complicated and thus not implemented for the time being. Yet + * the value we have here is correct for many applications + */ + switch (h264parse->sei_pic_struct) { + case GST_H264_SEI_PIC_STRUCT_TOP_FIELD: + case GST_H264_SEI_PIC_STRUCT_BOTTOM_FIELD: + duration = 1; + break; + case GST_H264_SEI_PIC_STRUCT_FRAME: + case GST_H264_SEI_PIC_STRUCT_TOP_BOTTOM: + case GST_H264_SEI_PIC_STRUCT_BOTTOM_TOP: + duration = 2; + break; + case GST_H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP: + case GST_H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: + duration = 3; + break; + case GST_H264_SEI_PIC_STRUCT_FRAME_DOUBLING: + duration = 4; + break; + case GST_H264_SEI_PIC_STRUCT_FRAME_TRIPLING: + duration = 6; + break; + default: + GST_DEBUG_OBJECT (h264parse, + "h264parse->sei_pic_struct of unknown value %d. Not parsed", + h264parse->sei_pic_struct); + break; + } + } else { + duration = h264parse->field_pic_flag ? 1 : 2; + } + + GST_LOG_OBJECT (h264parse, "frame tick duration %d", duration); + + /* + * h264parse.264 C.1.2 Timing of coded picture removal (equivalent to DTS): + * Tr,n(0) = initial_cpb_removal_delay[ SchedSelIdx ] / 90000 + * Tr,n(n) = Tr,n(nb) + Tc * cpb_removal_delay(n) + * where + * Tc = num_units_in_tick / time_scale + */ + + if (h264parse->ts_trn_nb != GST_CLOCK_TIME_NONE) { + GST_LOG_OBJECT (h264parse, "buffering based ts"); + /* buffering period is present */ + if (upstream != GST_CLOCK_TIME_NONE) { + /* If upstream timestamp is valid, we respect it and adjust current + * reference point */ + h264parse->ts_trn_nb = upstream - + (GstClockTime) gst_util_uint64_scale_int + (h264parse->sei_cpb_removal_delay * GST_SECOND, + sps->vui_parameters.num_units_in_tick, + sps->vui_parameters.time_scale); + } else { + /* If no upstream timestamp is given, we write in new timestamp */ + upstream = h264parse->dts = h264parse->ts_trn_nb + + (GstClockTime) gst_util_uint64_scale_int + (h264parse->sei_cpb_removal_delay * GST_SECOND, + sps->vui_parameters.num_units_in_tick, + sps->vui_parameters.time_scale); + } + } else { + GstClockTime dur; + + GST_LOG_OBJECT (h264parse, "duration based ts"); + /* naive method: no removal delay specified + * track upstream timestamp and provide best guess frame duration */ + dur = gst_util_uint64_scale_int (duration * GST_SECOND, + sps->vui_parameters.num_units_in_tick, sps->vui_parameters.time_scale); + /* sanity check */ + if (dur < GST_MSECOND) { + GST_DEBUG_OBJECT (h264parse, "discarding dur %" GST_TIME_FORMAT, + GST_TIME_ARGS (dur)); + } else { + *out_dur = dur; + } + } + +exit: + if (GST_CLOCK_TIME_IS_VALID (upstream)) + *out_ts = h264parse->dts = upstream; + + if (GST_CLOCK_TIME_IS_VALID (*out_dur) && + GST_CLOCK_TIME_IS_VALID (h264parse->dts)) + h264parse->dts += *out_dur; +} + static GstFlowReturn gst_h264_parse_parse_frame (GstBaseParse * parse, GstBaseParseFrame * frame) { @@ -726,17 +1285,23 @@ gst_h264_parse_parse_frame (GstBaseParse * parse, GstBaseParseFrame * frame) h264parse = GST_H264_PARSE (parse); buffer = frame->buffer; - gst_h264_parse_update_src_caps (h264parse); + gst_h264_parse_update_src_caps (h264parse, NULL); - gst_h264_params_get_timestamp (h264parse->params, - &GST_BUFFER_TIMESTAMP (buffer), &GST_BUFFER_DURATION (buffer), - h264parse->frame_start); + /* don't mess with timestamps if provided by upstream, + * particularly since our ts not that good they handle seeking etc */ + if (h264parse->do_ts) + gst_h264_parse_get_timestamp (h264parse, + &GST_BUFFER_TIMESTAMP (buffer), &GST_BUFFER_DURATION (buffer), + h264parse->frame_start); if (h264parse->keyframe) GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); else GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); + if (h264parse->b_frame) + GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_B_FRAME); + /* replace with transformed AVC output if applicable */ av = gst_adapter_available (h264parse->frame_out); if (av) { @@ -745,6 +1310,7 @@ gst_h264_parse_parse_frame (GstBaseParse * parse, GstBaseParseFrame * frame) buf = gst_adapter_take_buffer (h264parse->frame_out, av); gst_buffer_copy_metadata (buf, buffer, GST_BUFFER_COPY_ALL); gst_buffer_replace (&frame->buffer, buf); + gst_buffer_unref (buf); } return GST_FLOW_OK; @@ -815,16 +1381,16 @@ gst_h264_parse_pre_push_frame (GstBaseParse * parse, GstBaseParseFrame * frame) if (h264parse->align == GST_H264_PARSE_ALIGN_NAL) { /* send separate config NAL buffers */ GST_DEBUG_OBJECT (h264parse, "- sending SPS/PPS"); - for (i = 0; i < MAX_SPS_COUNT; i++) { - if ((codec_nal = h264parse->params->sps_nals[i])) { + for (i = 0; i < GST_H264_MAX_SPS_COUNT; i++) { + if ((codec_nal = h264parse->sps_nals[i])) { GST_DEBUG_OBJECT (h264parse, "sending SPS nal"); gst_h264_parse_push_codec_buffer (h264parse, codec_nal, timestamp); h264parse->last_report = new_ts; } } - for (i = 0; i < MAX_PPS_COUNT; i++) { - if ((codec_nal = h264parse->params->pps_nals[i])) { + for (i = 0; i < GST_H264_MAX_PPS_COUNT; i++) { + if ((codec_nal = h264parse->pps_nals[i])) { GST_DEBUG_OBJECT (h264parse, "sending PPS nal"); gst_h264_parse_push_codec_buffer (h264parse, codec_nal, timestamp); @@ -841,8 +1407,8 @@ gst_h264_parse_pre_push_frame (GstBaseParse * parse, GstBaseParseFrame * frame) gst_byte_writer_put_data (&bw, GST_BUFFER_DATA (buffer), h264parse->idr_pos); GST_DEBUG_OBJECT (h264parse, "- inserting SPS/PPS"); - for (i = 0; i < MAX_SPS_COUNT; i++) { - if ((codec_nal = h264parse->params->sps_nals[i])) { + for (i = 0; i < GST_H264_MAX_SPS_COUNT; i++) { + if ((codec_nal = h264parse->sps_nals[i])) { GST_DEBUG_OBJECT (h264parse, "inserting SPS nal"); gst_byte_writer_put_uint32_be (&bw, bs ? 1 : GST_BUFFER_SIZE (codec_nal)); @@ -851,8 +1417,8 @@ gst_h264_parse_pre_push_frame (GstBaseParse * parse, GstBaseParseFrame * frame) h264parse->last_report = new_ts; } } - for (i = 0; i < MAX_PPS_COUNT; i++) { - if ((codec_nal = h264parse->params->pps_nals[i])) { + for (i = 0; i < GST_H264_MAX_PPS_COUNT; i++) { + if ((codec_nal = h264parse->pps_nals[i])) { GST_DEBUG_OBJECT (h264parse, "inserting PPS nal"); gst_byte_writer_put_uint32_be (&bw, bs ? 1 : GST_BUFFER_SIZE (codec_nal)); @@ -867,11 +1433,17 @@ gst_h264_parse_pre_push_frame (GstBaseParse * parse, GstBaseParseFrame * frame) /* collect result and push */ new_buf = gst_byte_writer_reset_and_get_buffer (&bw); gst_buffer_copy_metadata (new_buf, buffer, GST_BUFFER_COPY_ALL); + /* should already be keyframe/IDR, but it may not have been, + * so mark it as such to avoid being discarded by picky decoder */ + GST_BUFFER_FLAG_UNSET (new_buf, GST_BUFFER_FLAG_DELTA_UNIT); gst_buffer_replace (&frame->buffer, new_buf); + gst_buffer_unref (new_buf); } } /* we pushed whatever we had */ h264parse->push_codec = FALSE; + h264parse->have_sps = FALSE; + h264parse->have_pps = FALSE; } } @@ -886,8 +1458,10 @@ gst_h264_parse_set_caps (GstBaseParse * parse, GstCaps * caps) GstH264Parse *h264parse; GstStructure *str; const GValue *value; - GstBuffer *buffer = NULL; - guint size; + GstBuffer *codec_data = NULL; + guint size, format, align, off; + GstH264NalUnit nalu; + GstH264ParserResult parseres; h264parse = GST_H264_PARSE (parse); @@ -901,26 +1475,33 @@ gst_h264_parse_set_caps (GstBaseParse * parse, GstCaps * caps) gst_structure_get_int (str, "height", &h264parse->height); gst_structure_get_fraction (str, "framerate", &h264parse->fps_num, &h264parse->fps_den); + gst_structure_get_fraction (str, "pixel-aspect-ratio", &h264parse->sar_width, + &h264parse->sar_height); + + /* get upstream format and align from caps */ + gst_h264_parse_format_from_caps (caps, &format, &align); /* packetized video has a codec_data */ - if ((value = gst_structure_get_value (str, "codec_data"))) { + if (format == GST_H264_PARSE_FORMAT_AVC && + (value = gst_structure_get_value (str, "codec_data"))) { guint8 *data; - guint num_sps, num_pps, profile, len; + guint num_sps, num_pps, profile; gint i; GST_DEBUG_OBJECT (h264parse, "have packetized h264"); /* make note for optional split processing */ h264parse->packetized = TRUE; - buffer = gst_value_get_buffer (value); - if (!buffer) + codec_data = gst_value_get_buffer (value); + if (!codec_data) goto wrong_type; - data = GST_BUFFER_DATA (buffer); - size = GST_BUFFER_SIZE (buffer); + data = GST_BUFFER_DATA (codec_data); + size = GST_BUFFER_SIZE (codec_data); /* parse the avcC data */ - if (size < 7) + if (size < 8) goto avcc_too_small; + /* parse the version, this must be 1 */ if (data[0] != 1) goto wrong_version; @@ -935,31 +1516,42 @@ gst_h264_parse_set_caps (GstBaseParse * parse, GstCaps * caps) /* this is the number of bytes in front of the NAL units to mark their * length */ h264parse->nal_length_size = (data[4] & 0x03) + 1; - GST_DEBUG_OBJECT (h264parse, "nal length %u", h264parse->nal_length_size); + GST_DEBUG_OBJECT (h264parse, "nal length size %u", + h264parse->nal_length_size); num_sps = data[5] & 0x1f; - data += 6; - size -= 6; + off = 6; for (i = 0; i < num_sps; i++) { - len = GST_READ_UINT16_BE (data); - if (size < len + 2 || len < 2) + parseres = gst_h264_parser_identify_nalu_avc (h264parse->nalparser, + data, off, size, 2, &nalu); + if (parseres != GST_H264_PARSER_OK) goto avcc_too_small; - /* digest for later reference */ - gst_h264_parse_process_nal (h264parse, data, 0, 2, len); - data += len + 2; - size -= len + 2; + + gst_h264_parse_process_nal (h264parse, &nalu); + off = nalu.offset + nalu.size; } - num_pps = data[0]; - data++; - size++; + + num_pps = data[off]; + off++; + for (i = 0; i < num_pps; i++) { - len = GST_READ_UINT16_BE (data); - if (size < len + 2 || len < 2) + parseres = gst_h264_parser_identify_nalu_avc (h264parse->nalparser, + data, off, size, 2, &nalu); + if (parseres != GST_H264_PARSER_OK) { goto avcc_too_small; - /* digest for later reference */ - gst_h264_parse_process_nal (h264parse, data, 0, 2, len); - data += len + 2; - size -= len + 2; + } + + gst_h264_parse_process_nal (h264parse, &nalu); + off = nalu.offset + nalu.size; + } + + h264parse->codec_data = gst_buffer_ref (codec_data); + + /* if upstream sets codec_data without setting stream-format and alignment, we + * assume stream-format=avc,alignment=au */ + if (format == GST_H264_PARSE_FORMAT_NONE) { + format = GST_H264_PARSE_FORMAT_AVC; + align = GST_H264_PARSE_ALIGN_AU; } } else { GST_DEBUG_OBJECT (h264parse, "have bytestream h264"); @@ -967,40 +1559,50 @@ gst_h264_parse_set_caps (GstBaseParse * parse, GstCaps * caps) h264parse->packetized = FALSE; /* we have 4 sync bytes */ h264parse->nal_length_size = 4; - } - if (h264parse->packetized) { - if (h264parse->split_packetized) { - GST_DEBUG_OBJECT (h264parse, - "converting AVC to nal bytestream prior to parsing"); - /* negotiate behaviour with upstream */ - gst_h264_parse_negotiate (h264parse); - if (h264parse->format == GST_H264_PARSE_FORMAT_BYTE) { - /* arrange to insert codec-data in-stream if needed */ - h264parse->push_codec = h264parse->packetized; - } - gst_base_parse_set_passthrough (parse, FALSE); - } else { - GST_DEBUG_OBJECT (h264parse, "passing on packetized AVC"); - /* no choice to negotiate */ - h264parse->format = GST_H264_PARSE_FORMAT_AVC; - h264parse->align = GST_H264_PARSE_ALIGN_AU; - /* fallback codec-data */ - h264parse->codec_data = gst_buffer_ref (buffer); - /* pass through unharmed, though _chain will parse a bit */ - gst_base_parse_set_passthrough (parse, TRUE); - /* we did parse codec-data and might supplement src caps */ - gst_h264_parse_update_src_caps (h264parse); + if (format == GST_H264_PARSE_FORMAT_NONE) { + format = GST_H264_PARSE_FORMAT_BYTE; + align = GST_H264_PARSE_ALIGN_AU; } } - /* src caps are only arranged for later on */ + { + GstCaps *in_caps; + + /* prefer input type determined above */ + in_caps = gst_caps_new_simple ("video/x-h264", + "parsed", G_TYPE_BOOLEAN, TRUE, + "stream-format", G_TYPE_STRING, + gst_h264_parse_get_string (h264parse, TRUE, format), + "alignment", G_TYPE_STRING, + gst_h264_parse_get_string (h264parse, FALSE, align), NULL); + /* negotiate with downstream, sets ->format and ->align */ + gst_h264_parse_negotiate (h264parse, in_caps); + gst_caps_unref (in_caps); + } + + if (format == h264parse->format && align == h264parse->align) { + gst_base_parse_set_passthrough (parse, TRUE); + + /* we did parse codec-data and might supplement src caps */ + gst_h264_parse_update_src_caps (h264parse, caps); + } else if (format == GST_H264_PARSE_FORMAT_AVC) { + /* if input != output, and input is avc, must split before anything else */ + /* arrange to insert codec-data in-stream if needed. + * src caps are only arranged for later on */ + h264parse->push_codec = TRUE; + h264parse->have_sps = FALSE; + h264parse->have_pps = FALSE; + h264parse->split_packetized = TRUE; + h264parse->packetized = TRUE; + } + return TRUE; /* ERRORS */ avcc_too_small: { - GST_DEBUG_OBJECT (h264parse, "avcC size %u < 7", size); + GST_DEBUG_OBJECT (h264parse, "avcC size %u < 8", size); goto refuse_caps; } wrong_version: @@ -1020,53 +1622,84 @@ refuse_caps: } } +static gboolean +gst_h264_parse_event (GstBaseParse * parse, GstEvent * event) +{ + gboolean handled = FALSE; + GstH264Parse *h264parse = GST_H264_PARSE (parse); + + switch (GST_EVENT_TYPE (event)) { + case GST_EVENT_FLUSH_STOP: + h264parse->dts = GST_CLOCK_TIME_NONE; + h264parse->ts_trn_nb = GST_CLOCK_TIME_NONE; + break; + case GST_EVENT_NEWSEGMENT: + { + gdouble rate, applied_rate; + GstFormat format; + gint64 start; + + gst_event_parse_new_segment_full (event, NULL, &rate, &applied_rate, + &format, &start, NULL, NULL); + /* don't try to mess with more subtle cases (e.g. seek) */ + if (format == GST_FORMAT_TIME && + (start != 0 || rate != 1.0 || applied_rate != 1.0)) + h264parse->do_ts = FALSE; + break; + } + default: + break; + } + + return handled; +} + static GstFlowReturn gst_h264_parse_chain (GstPad * pad, GstBuffer * buffer) { GstH264Parse *h264parse = GST_H264_PARSE (GST_PAD_PARENT (pad)); if (h264parse->packetized && buffer) { - GstByteReader br; GstBuffer *sub; GstFlowReturn ret = GST_FLOW_OK; - guint32 len; + GstH264ParserResult parse_res; + GstH264NalUnit nalu; const guint nl = h264parse->nal_length_size; + if (nl < 1 || nl > 4) { + GST_DEBUG_OBJECT (h264parse, "insufficient data to split input"); + gst_buffer_unref (buffer); + + return GST_FLOW_NOT_NEGOTIATED; + } + GST_LOG_OBJECT (h264parse, "processing packet buffer of size %d", GST_BUFFER_SIZE (buffer)); - gst_byte_reader_init_from_buffer (&br, buffer); - while (ret == GST_FLOW_OK && gst_byte_reader_get_remaining (&br)) { + + parse_res = gst_h264_parser_identify_nalu_avc (h264parse->nalparser, + GST_BUFFER_DATA (buffer), 0, GST_BUFFER_SIZE (buffer), nl, &nalu); + + while (parse_res == GST_H264_PARSER_OK) { GST_DEBUG_OBJECT (h264parse, "AVC nal offset %d", - gst_byte_reader_get_pos (&br)); - if (gst_byte_reader_get_remaining (&br) < nl) - goto parse_failed; - switch (nl) { - case 4: - len = gst_byte_reader_get_uint32_be_unchecked (&br); - break; - case 3: - len = gst_byte_reader_get_uint24_be_unchecked (&br); - break; - case 2: - len = gst_byte_reader_get_uint16_be_unchecked (&br); - break; - case 1: - len = gst_byte_reader_get_uint8_unchecked (&br); - break; - default: - goto not_negotiated; - break; - } - GST_DEBUG_OBJECT (h264parse, "AVC nal size %d", len); - if (gst_byte_reader_get_remaining (&br) < len) - goto parse_failed; + nalu.offset + nalu.size); + if (h264parse->split_packetized) { /* convert to NAL aligned byte stream input */ sub = gst_h264_parse_wrap_nal (h264parse, GST_H264_PARSE_FORMAT_BYTE, - (guint8 *) gst_byte_reader_get_data_unchecked (&br, len), len); + nalu.data + nalu.offset, nalu.size); /* at least this should make sense */ GST_BUFFER_TIMESTAMP (sub) = GST_BUFFER_TIMESTAMP (buffer); - GST_LOG_OBJECT (h264parse, "pushing NAL of size %d", len); + /* transfer flags (e.g. DISCONT) for first fragment */ + if (nalu.offset <= nl) + gst_buffer_copy_metadata (sub, buffer, GST_BUFFER_COPY_FLAGS); + /* in reverse playback, baseparse gathers buffers, so we cannot + * guarantee a buffer to contain a single whole NALU */ + h264parse->packetized_chunked = + (GST_BASE_PARSE (h264parse)->segment.rate > 0.0); + h264parse->packetized_last = + (nalu.offset + nalu.size + nl >= GST_BUFFER_SIZE (buffer)); + GST_LOG_OBJECT (h264parse, "pushing NAL of size %d, last = %d", + nalu.size, h264parse->packetized_last); ret = h264parse->parse_chain (pad, sub); } else { /* pass-through: no looking for frames (and nal processing), @@ -1074,41 +1707,41 @@ gst_h264_parse_chain (GstPad * pad, GstBuffer * buffer) /* NOTE: so if it is really configured to do so, * pre_push can/will still insert codec-data at intervals, * which is not really pure pass-through, but anyway ... */ - gst_h264_parse_process_nal (h264parse, - GST_BUFFER_DATA (buffer), gst_byte_reader_get_pos (&br) - nl, - gst_byte_reader_get_pos (&br), len); - gst_byte_reader_skip_unchecked (&br, len); + gst_h264_parse_process_nal (h264parse, &nalu); + } + + parse_res = gst_h264_parser_identify_nalu_avc (h264parse->nalparser, + GST_BUFFER_DATA (buffer), nalu.offset + nalu.size, + GST_BUFFER_SIZE (buffer), nl, &nalu); } - if (h264parse->split_packetized) + + if (h264parse->split_packetized) { + gst_buffer_unref (buffer); return ret; - } + } else { + /* nal processing in pass-through might have collected stuff; + * ensure nothing happens with this later on */ + gst_adapter_clear (h264parse->frame_out); + } -exit: - /* nal processing in pass-through might have collected stuff; - * ensure nothing happens with this later on */ - gst_adapter_clear (h264parse->frame_out); + if (parse_res == GST_H264_PARSER_NO_NAL_END || + parse_res == GST_H264_PARSER_BROKEN_DATA) { - return h264parse->parse_chain (pad, buffer); + if (h264parse->split_packetized) { + GST_ELEMENT_ERROR (h264parse, STREAM, FAILED, (NULL), + ("invalid AVC input data")); + gst_buffer_unref (buffer); - /* ERRORS */ -not_negotiated: - { - GST_DEBUG_OBJECT (h264parse, "insufficient data to split input"); - return GST_FLOW_NOT_NEGOTIATED; - } -parse_failed: - { - if (h264parse->split_packetized) { - GST_ELEMENT_ERROR (h264parse, STREAM, FAILED, (NULL), - ("invalid AVC input data")); - return GST_FLOW_ERROR; - } else { - /* do not meddle to much in this case */ - GST_DEBUG_OBJECT (h264parse, "parsing packet failed"); - goto exit; + return GST_FLOW_ERROR; + } else { + /* do not meddle to much in this case */ + GST_DEBUG_OBJECT (h264parse, "parsing packet failed"); + } } } + + return h264parse->parse_chain (pad, buffer); } static void @@ -1120,9 +1753,6 @@ gst_h264_parse_set_property (GObject * object, guint prop_id, parse = GST_H264_PARSE (object); switch (prop_id) { - case PROP_SPLIT_PACKETIZED: - parse->split_packetized = g_value_get_boolean (value); - break; case PROP_CONFIG_INTERVAL: parse->interval = g_value_get_uint (value); break; @@ -1141,9 +1771,6 @@ gst_h264_parse_get_property (GObject * object, guint prop_id, GValue * value, parse = GST_H264_PARSE (object); switch (prop_id) { - case PROP_SPLIT_PACKETIZED: - g_value_set_boolean (value, parse->split_packetized); - break; case PROP_CONFIG_INTERVAL: g_value_set_uint (value, parse->interval); break; diff --git a/gst/videoparsers/gsth264parse.h b/gst/videoparsers/gsth264parse.h index 1aa1323..1304b64 100644 --- a/gst/videoparsers/gsth264parse.h +++ b/gst/videoparsers/gsth264parse.h @@ -1,7 +1,10 @@ /* GStreamer H.264 Parser - * Copyright (C) <2010> Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk> - * Copyright (C) <2010> Collabora Multimedia + * Copyright (C) <2010> Collabora ltd * Copyright (C) <2010> Nokia Corporation + * Copyright (C) <2011> Intel Corporation + * + * Copyright (C) <2010> Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk> + * Copyright (C) <2011> Thibault Saunier <thibault.saunier@collabora.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public @@ -24,8 +27,7 @@ #include <gst/gst.h> #include <gst/base/gstbaseparse.h> - -#include "h264parse.h" +#include <gst/codecparsers/gsth264parser.h> G_BEGIN_DECLS @@ -56,28 +58,54 @@ struct _GstH264Parse /* stream */ gint width, height; gint fps_num, fps_den; + gint aspect_ratio_idc; + gint sar_width, sar_height; GstBuffer *codec_data; guint nal_length_size; gboolean packetized; /* state */ - GstH264Params *params; + GstH264NalParser *nalparser; + GstH264NalUnit nalu; guint align; guint format; + guint current_off; + gboolean packetized_last; + gboolean packetized_chunked; GstClockTime last_report; gboolean push_codec; + gboolean have_sps; + gboolean have_pps; + + /* collected SPS and PPS NALUs */ + GstBuffer *sps_nals[GST_H264_MAX_SPS_COUNT]; + GstBuffer *pps_nals[GST_H264_MAX_PPS_COUNT]; + + /* Infos we need to keep track of */ + guint32 sei_cpb_removal_delay; + guint8 sei_pic_struct; + guint8 sei_pic_struct_pres_flag; + guint field_pic_flag; + + /* cached timestamps */ + /* (trying to) track upstream dts and interpolate */ + GstClockTime dts; + /* dts at start of last buffering period */ + GstClockTime ts_trn_nb; + gboolean do_ts; /* frame parsing */ - guint last_nal_pos; - guint next_sc_pos; - gint idr_pos; + /*guint last_nal_pos;*/ + /*guint next_sc_pos;*/ + gint idr_pos, sei_pos; gboolean update_caps; GstAdapter *frame_out; gboolean keyframe; gboolean frame_start; /* AU state */ gboolean picture_start; + gboolean b_frame; /* props */ gboolean split_packetized; diff --git a/gst/videoparsers/gstmpegvideoparse.c b/gst/videoparsers/gstmpegvideoparse.c new file mode 100644 index 0000000..2c2d560 --- /dev/null +++ b/gst/videoparsers/gstmpegvideoparse.c @@ -0,0 +1,663 @@ +/* GStreamer + * Copyright (C) <2007> Jan Schmidt <thaytan@mad.scientist.com> + * Copyright (C) <2011> Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk> + * Copyright (C) <2011> Collabora Multimedia + * Copyright (C) <2011> Nokia Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <string.h> +#include <gst/base/gstbytereader.h> + +#include "gstmpegvideoparse.h" + +GST_DEBUG_CATEGORY (mpegv_parse_debug); +#define GST_CAT_DEFAULT mpegv_parse_debug + +static GstStaticPadTemplate src_template = +GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, + GST_PAD_ALWAYS, + GST_STATIC_CAPS ("video/mpeg, " + "mpegversion = (int) [1, 2], " + "parsed = (boolean) true, " "systemstream = (boolean) false") + ); + +static GstStaticPadTemplate sink_template = +GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, + GST_PAD_ALWAYS, + GST_STATIC_CAPS ("video/mpeg, " + "mpegversion = (int) [1, 2], " + "parsed = (boolean) false, " "systemstream = (boolean) false") + ); + +/* Properties */ +#define DEFAULT_PROP_DROP TRUE +#define DEFAULT_PROP_GOP_SPLIT FALSE + +enum +{ + PROP_0, + PROP_DROP, + PROP_GOP_SPLIT, + PROP_LAST +}; + +GST_BOILERPLATE (GstMpegvParse, gst_mpegv_parse, GstBaseParse, + GST_TYPE_BASE_PARSE); + +static gboolean gst_mpegv_parse_start (GstBaseParse * parse); +static gboolean gst_mpegv_parse_stop (GstBaseParse * parse); +static gboolean gst_mpegv_parse_check_valid_frame (GstBaseParse * parse, + GstBaseParseFrame * frame, guint * framesize, gint * skipsize); +static GstFlowReturn gst_mpegv_parse_parse_frame (GstBaseParse * parse, + GstBaseParseFrame * frame); +static gboolean gst_mpegv_parse_set_caps (GstBaseParse * parse, GstCaps * caps); + +static void gst_mpegv_parse_set_property (GObject * object, guint prop_id, + const GValue * value, GParamSpec * pspec); +static void gst_mpegv_parse_get_property (GObject * object, guint prop_id, + GValue * value, GParamSpec * pspec); + +static void +gst_mpegv_parse_base_init (gpointer klass) +{ + GstElementClass *element_class = GST_ELEMENT_CLASS (klass); + + gst_element_class_add_pad_template (element_class, + gst_static_pad_template_get (&src_template)); + gst_element_class_add_pad_template (element_class, + gst_static_pad_template_get (&sink_template)); + + gst_element_class_set_details_simple (element_class, + "MPEG video elementary stream parser", + "Codec/Parser/Video", + "Parses and frames MPEG-1 and MPEG-2 elementary video streams", + "Wim Taymans <wim.taymans@ccollabora.co.uk>, " + "Jan Schmidt <thaytan@mad.scientist.com>, " + "Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>"); + + GST_DEBUG_CATEGORY_INIT (mpegv_parse_debug, "mpegvideoparse", 0, + "MPEG-1/2 video parser"); +} + +static void +gst_mpegv_parse_set_property (GObject * object, guint property_id, + const GValue * value, GParamSpec * pspec) +{ + GstMpegvParse *parse = GST_MPEGVIDEO_PARSE (object); + + switch (property_id) { + case PROP_DROP: + parse->drop = g_value_get_boolean (value); + break; + case PROP_GOP_SPLIT: + parse->gop_split = g_value_get_boolean (value); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); + } +} + +static void +gst_mpegv_parse_get_property (GObject * object, guint property_id, + GValue * value, GParamSpec * pspec) +{ + GstMpegvParse *parse = GST_MPEGVIDEO_PARSE (object); + + switch (property_id) { + case PROP_DROP: + g_value_set_boolean (value, parse->drop); + break; + case PROP_GOP_SPLIT: + g_value_set_boolean (value, parse->gop_split); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); + } +} + +static void +gst_mpegv_parse_class_init (GstMpegvParseClass * klass) +{ + GObjectClass *gobject_class = G_OBJECT_CLASS (klass); + GstBaseParseClass *parse_class = GST_BASE_PARSE_CLASS (klass); + + parent_class = g_type_class_peek_parent (klass); + + gobject_class->set_property = gst_mpegv_parse_set_property; + gobject_class->get_property = gst_mpegv_parse_get_property; + + g_object_class_install_property (gobject_class, PROP_DROP, + g_param_spec_boolean ("drop", "drop", + "Drop data untill valid configuration data is received either " + "in the stream or through caps", DEFAULT_PROP_DROP, + G_PARAM_CONSTRUCT | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + g_object_class_install_property (gobject_class, PROP_GOP_SPLIT, + g_param_spec_boolean ("gop-split", "gop-split", + "Split frame when encountering GOP", DEFAULT_PROP_GOP_SPLIT, + G_PARAM_CONSTRUCT | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + /* Override BaseParse vfuncs */ + parse_class->start = GST_DEBUG_FUNCPTR (gst_mpegv_parse_start); + parse_class->stop = GST_DEBUG_FUNCPTR (gst_mpegv_parse_stop); + parse_class->check_valid_frame = + GST_DEBUG_FUNCPTR (gst_mpegv_parse_check_valid_frame); + parse_class->parse_frame = GST_DEBUG_FUNCPTR (gst_mpegv_parse_parse_frame); + parse_class->set_sink_caps = GST_DEBUG_FUNCPTR (gst_mpegv_parse_set_caps); +} + +static void +gst_mpegv_parse_init (GstMpegvParse * parse, GstMpegvParseClass * g_class) +{ +} + +static void +gst_mpegv_parse_reset_frame (GstMpegvParse * mpvparse) +{ + /* done parsing; reset state */ + mpvparse->last_sc = -1; + mpvparse->seq_offset = -1; + mpvparse->pic_offset = -1; +} + +static void +gst_mpegv_parse_reset (GstMpegvParse * mpvparse) +{ + gst_mpegv_parse_reset_frame (mpvparse); + mpvparse->profile = 0; + mpvparse->update_caps = TRUE; + + gst_buffer_replace (&mpvparse->config, NULL); + memset (&mpvparse->params, 0, sizeof (mpvparse->params)); +} + +static gboolean +gst_mpegv_parse_start (GstBaseParse * parse) +{ + GstMpegvParse *mpvparse = GST_MPEGVIDEO_PARSE (parse); + + GST_DEBUG_OBJECT (parse, "start"); + + gst_mpegv_parse_reset (mpvparse); + /* at least this much for a valid frame */ + gst_base_parse_set_min_frame_size (parse, 6); + + return TRUE; +} + +static gboolean +gst_mpegv_parse_stop (GstBaseParse * parse) +{ + GstMpegvParse *mpvparse = GST_MPEGVIDEO_PARSE (parse); + + GST_DEBUG_OBJECT (parse, "stop"); + + gst_mpegv_parse_reset (mpvparse); + + return TRUE; +} + +static gboolean +gst_mpegv_parse_process_config (GstMpegvParse * mpvparse, const guint8 * data, + gsize size) +{ + /* only do stuff if something new */ + if (mpvparse->config && size == GST_BUFFER_SIZE (mpvparse->config) && + memcmp (GST_BUFFER_DATA (mpvparse->config), data, size) == 0) + return TRUE; + + if (!gst_mpeg_video_params_parse_config (&mpvparse->params, data, size)) { + GST_DEBUG_OBJECT (mpvparse, "failed to parse config data (size %" + G_GSSIZE_FORMAT ")", size); + return FALSE; + } + + GST_LOG_OBJECT (mpvparse, "accepting parsed config size %" G_GSSIZE_FORMAT, + size); + + /* parsing ok, so accept it as new config */ + if (mpvparse->config != NULL) + gst_buffer_unref (mpvparse->config); + + mpvparse->config = gst_buffer_new_and_alloc (size); + memcpy (GST_BUFFER_DATA (mpvparse->config), data, size); + + /* trigger src caps update */ + mpvparse->update_caps = TRUE; + + return TRUE; +} + +#ifndef GST_DISABLE_GST_DEBUG +static const gchar * +picture_start_code_name (guint8 psc) +{ + guint i; + const struct + { + guint8 psc; + const gchar *name; + } psc_names[] = { + { + 0x00, "Picture Start"}, { + 0xb0, "Reserved"}, { + 0xb1, "Reserved"}, { + 0xb2, "User Data Start"}, { + 0xb3, "Sequence Header Start"}, { + 0xb4, "Sequence Error"}, { + 0xb5, "Extension Start"}, { + 0xb6, "Reserved"}, { + 0xb7, "Sequence End"}, { + 0xb8, "Group Start"}, { + 0xb9, "Program End"} + }; + if (psc < 0xB0 && psc > 0) + return "Slice Start"; + + for (i = 0; i < G_N_ELEMENTS (psc_names); i++) + if (psc_names[i].psc == psc) + return psc_names[i].name; + + return "UNKNOWN"; +}; + +static const gchar * +picture_type_name (guint8 pct) +{ + guint i; + const struct + { + guint8 pct; + const gchar *name; + } pct_names[] = { + { + 0, "Forbidden"}, { + 1, "I Frame"}, { + 2, "P Frame"}, { + 3, "B Frame"}, { + 4, "DC Intra Coded (Shall Not Be Used!)"} + }; + + for (i = 0; i < G_N_ELEMENTS (pct_names); i++) + if (pct_names[i].pct == pct) + return pct_names[i].name; + + return "Reserved/Unknown"; +} +#endif /* GST_DISABLE_GST_DEBUG */ + +/* caller guarantees at least start code in @buf at @off */ +/* for off == 0 initial code; returns TRUE if code starts a frame, + * otherwise returns TRUE if code terminates preceding frame */ +static gboolean +gst_mpegv_parse_process_sc (GstMpegvParse * mpvparse, GstBuffer * buf, gint off) +{ + gboolean ret = FALSE, do_seq = TRUE; + guint8 *data; + guint code; + + g_return_val_if_fail (buf && GST_BUFFER_SIZE (buf) >= 4, FALSE); + + data = GST_BUFFER_DATA (buf); + code = data[off + 3]; + + GST_LOG_OBJECT (mpvparse, "process startcode %x (%s)", code, + picture_start_code_name (code)); + + switch (code) { + case MPEG_PACKET_PICTURE: + GST_LOG_OBJECT (mpvparse, "startcode is PICTURE"); + /* picture is aggregated with preceding sequence/gop, if any. + * so, picture start code only ends if already a previous one */ + if (mpvparse->pic_offset < 0) + mpvparse->pic_offset = off; + else + ret = TRUE; + if (!off) + ret = TRUE; + break; + case MPEG_PACKET_SEQUENCE: + GST_LOG_OBJECT (mpvparse, "startcode is SEQUENCE"); + if (off == 0) + mpvparse->seq_offset = off; + ret = TRUE; + break; + case MPEG_PACKET_GOP: + GST_LOG_OBJECT (mpvparse, "startcode is GOP"); + if (mpvparse->seq_offset >= 0) + ret = mpvparse->gop_split; + else + ret = TRUE; + break; + default: + do_seq = FALSE; + break; + } + + /* process config data */ + if (G_UNLIKELY (mpvparse->seq_offset >= 0 && off && do_seq)) { + g_assert (mpvparse->seq_offset == 0); + gst_mpegv_parse_process_config (mpvparse, GST_BUFFER_DATA (buf), off); + /* avoid accepting again for a PICTURE sc following a GOP sc */ + mpvparse->seq_offset = -1; + } + + /* extract some picture info if there is any in the frame being terminated */ + if (G_UNLIKELY (ret && off)) { + if (G_LIKELY (mpvparse->pic_offset >= 0 && mpvparse->pic_offset < off)) { + if (G_LIKELY (GST_BUFFER_SIZE (buf) >= mpvparse->pic_offset + 6)) { + gint pct = (data[mpvparse->pic_offset + 5] >> 3) & 0x7; + + GST_LOG_OBJECT (mpvparse, "picture_coding_type %d (%s)", pct, + picture_type_name (pct)); + mpvparse->intra_frame = (pct == MPEG_PICTURE_TYPE_I); + } else { + GST_WARNING_OBJECT (mpvparse, "no data following PICTURE startcode"); + mpvparse->intra_frame = FALSE; + } + } else { + /* frame without picture must be some config, consider as keyframe */ + mpvparse->intra_frame = TRUE; + } + GST_LOG_OBJECT (mpvparse, "ending frame of size %d, is intra %d", off, + mpvparse->intra_frame); + } + + return ret; +} + +/* FIXME move into baseparse, or anything equivalent; + * see https://bugzilla.gnome.org/show_bug.cgi?id=650093 */ +#define GST_BASE_PARSE_FRAME_FLAG_PARSING 0x10000 + +static gboolean +gst_mpegv_parse_check_valid_frame (GstBaseParse * parse, + GstBaseParseFrame * frame, guint * framesize, gint * skipsize) +{ + GstMpegvParse *mpvparse = GST_MPEGVIDEO_PARSE (parse); + GstBuffer *buf = frame->buffer; + GstByteReader reader = GST_BYTE_READER_INIT_FROM_BUFFER (buf); + gint off = 0; + gboolean ret; + +retry: + /* at least start code and subsequent byte */ + if (G_UNLIKELY (GST_BUFFER_SIZE (buf) - off < 5)) + return FALSE; + + /* avoid stale cached parsing state */ + if (!(frame->flags & GST_BASE_PARSE_FRAME_FLAG_PARSING)) { + GST_LOG_OBJECT (mpvparse, "parsing new frame"); + gst_mpegv_parse_reset_frame (mpvparse); + frame->flags |= GST_BASE_PARSE_FRAME_FLAG_PARSING; + } else { + GST_LOG_OBJECT (mpvparse, "resuming frame parsing"); + } + + /* if already found a previous start code, e.g. start of frame, go for next */ + if (mpvparse->last_sc >= 0) { + off = mpvparse->last_sc; + goto next; + } + + off = gst_byte_reader_masked_scan_uint32 (&reader, 0xffffff00, 0x00000100, + off, GST_BUFFER_SIZE (buf) - off); + + GST_LOG_OBJECT (mpvparse, "possible sync at buffer offset %d", off); + + /* didn't find anything that looks like a sync word, skip */ + if (G_UNLIKELY (off < 0)) { + *skipsize = GST_BUFFER_SIZE (buf) - 3; + return FALSE; + } + + /* possible frame header, but not at offset 0? skip bytes before sync */ + if (G_UNLIKELY (off > 0)) { + *skipsize = off; + return FALSE; + } + + /* note: initial start code is assumed at offset 0 by subsequent code */ + + /* examine start code, see if it looks like an initial start code */ + if (gst_mpegv_parse_process_sc (mpvparse, buf, 0)) { + /* found sc */ + mpvparse->last_sc = 0; + } else { + off++; + goto retry; + } + +next: + /* start is fine as of now */ + *skipsize = 0; + /* position a bit further than last sc */ + off++; + /* so now we have start code at start of data; locate next start code */ + off = gst_byte_reader_masked_scan_uint32 (&reader, 0xffffff00, 0x00000100, + off, GST_BUFFER_SIZE (buf) - off); + + GST_LOG_OBJECT (mpvparse, "next start code at %d", off); + if (off < 0) { + /* if draining, take all */ + if (GST_BASE_PARSE_DRAINING (parse)) { + off = GST_BUFFER_SIZE (buf); + ret = TRUE; + } else { + /* resume scan where we left it */ + mpvparse->last_sc = GST_BUFFER_SIZE (buf) - 4; + /* request best next available */ + *framesize = G_MAXUINT; + return FALSE; + } + } else { + /* decide whether this startcode ends a frame */ + ret = gst_mpegv_parse_process_sc (mpvparse, buf, off); + } + + if (ret) { + *framesize = off; + } else { + goto next; + } + + return ret; +} + +static void +gst_mpegv_parse_update_src_caps (GstMpegvParse * mpvparse) +{ + GstCaps *caps = NULL; + + /* only update if no src caps yet or explicitly triggered */ + if (G_LIKELY (GST_PAD_CAPS (GST_BASE_PARSE_SRC_PAD (mpvparse)) && + !mpvparse->update_caps)) + return; + + /* carry over input caps as much as possible; override with our own stuff */ + caps = GST_PAD_CAPS (GST_BASE_PARSE_SINK_PAD (mpvparse)); + if (caps) { + caps = gst_caps_copy (caps); + } else { + caps = gst_caps_new_simple ("video/mpeg", NULL); + } + + /* typically we don't output buffers until we have properly parsed some + * config data, so we should at least know about version. + * If not, it means it has been requested not to drop data, and + * upstream and/or app must know what they are doing ... */ + if (G_LIKELY (mpvparse->params.mpeg_version)) { + gst_caps_set_simple (caps, + "mpegversion", G_TYPE_INT, mpvparse->params.mpeg_version, + "interlaced", G_TYPE_BOOLEAN, !mpvparse->params.progressive, NULL); + } + + gst_caps_set_simple (caps, "systemstream", G_TYPE_BOOLEAN, FALSE, + "parsed", G_TYPE_BOOLEAN, TRUE, NULL); + + if (mpvparse->params.width > 0 && mpvparse->params.height > 0) { + gst_caps_set_simple (caps, "width", G_TYPE_INT, mpvparse->params.width, + "height", G_TYPE_INT, mpvparse->params.height, NULL); + } + + /* perhaps we have a framerate */ + if (mpvparse->params.fps_n > 0 && mpvparse->params.fps_d > 0) { + gint fps_num = mpvparse->params.fps_n; + gint fps_den = mpvparse->params.fps_d; + GstClockTime latency = gst_util_uint64_scale (GST_SECOND, fps_den, fps_num); + + gst_caps_set_simple (caps, "framerate", + GST_TYPE_FRACTION, fps_num, fps_den, NULL); + gst_base_parse_set_frame_rate (GST_BASE_PARSE (mpvparse), + fps_num, fps_den, 0, 0); + gst_base_parse_set_latency (GST_BASE_PARSE (mpvparse), latency, latency); + } + + /* or pixel-aspect-ratio */ + if (mpvparse->params.par_w && mpvparse->params.par_h > 0) { + gst_caps_set_simple (caps, "pixel-aspect-ratio", GST_TYPE_FRACTION, + mpvparse->params.par_w, mpvparse->params.par_h, NULL); + } + + if (mpvparse->config != NULL) { + gst_caps_set_simple (caps, "codec_data", + GST_TYPE_BUFFER, mpvparse->config, NULL); + } + + if (mpvparse->params.mpeg_version == 2) { + const guint profile_c = mpvparse->params.profile; + const guint level_c = mpvparse->params.level; + const gchar *profile = NULL, *level = NULL; + /* + * Profile indication - 1 => High, 2 => Spatially Scalable, + * 3 => SNR Scalable, 4 => Main, 5 => Simple + * 4:2:2 and Multi-view have profile = 0, with the escape bit set to 1 + */ + const gchar *profiles[] = { "high", "spatial", "snr", "main", "simple" }; + /* + * Level indication - 4 => High, 6 => High-1440, 8 => Main, 10 => Low, + * except in the case of profile = 0 + */ + const gchar *levels[] = { "high", "high-1440", "main", "low" }; + + if (profile_c > 0 && profile_c < 6) + profile = profiles[profile_c - 1]; + + if ((level_c > 3) && (level_c < 11) && (level_c % 2 == 0)) + level = levels[(level_c >> 1) - 1]; + + if (profile_c == 8) { + /* Non-hierarchical profile */ + switch (level_c) { + case 2: + level = levels[0]; + case 5: + level = levels[2]; + profile = "4:2:2"; + break; + case 10: + level = levels[0]; + case 11: + level = levels[1]; + case 13: + level = levels[2]; + case 14: + level = levels[3]; + profile = "multiview"; + break; + default: + break; + } + } + + /* FIXME does it make sense to expose profile/level in the caps ? */ + + if (profile) + gst_caps_set_simple (caps, "profile", G_TYPE_STRING, profile, NULL); + else + GST_DEBUG_OBJECT (mpvparse, "Invalid profile - %u", profile_c); + + if (level) + gst_caps_set_simple (caps, "level", G_TYPE_STRING, level, NULL); + else + GST_DEBUG_OBJECT (mpvparse, "Invalid level - %u", level_c); + + gst_caps_set_simple (caps, "interlaced", + G_TYPE_BOOLEAN, !mpvparse->params.progressive, NULL); + } + + gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (mpvparse), caps); + gst_caps_unref (caps); +} + +static GstFlowReturn +gst_mpegv_parse_parse_frame (GstBaseParse * parse, GstBaseParseFrame * frame) +{ + GstMpegvParse *mpvparse = GST_MPEGVIDEO_PARSE (parse); + GstBuffer *buffer = frame->buffer; + + gst_mpegv_parse_update_src_caps (mpvparse); + + if (G_UNLIKELY (mpvparse->intra_frame)) + GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); + else + GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); + + /* maybe only sequence in this buffer, though not recommended, + * so mark it as such and force 0 duration */ + if (G_UNLIKELY (mpvparse->pic_offset < 0)) { + GST_DEBUG_OBJECT (mpvparse, "frame holds no picture data"); + frame->flags |= GST_BASE_PARSE_FRAME_FLAG_NO_FRAME; + GST_BUFFER_DURATION (buffer) = 0; + } + + if (G_UNLIKELY (mpvparse->drop && !mpvparse->config)) { + GST_DEBUG_OBJECT (mpvparse, "dropping frame as no config yet"); + return GST_BASE_PARSE_FLOW_DROPPED; + } else + return GST_FLOW_OK; +} + +static gboolean +gst_mpegv_parse_set_caps (GstBaseParse * parse, GstCaps * caps) +{ + GstMpegvParse *mpvparse = GST_MPEGVIDEO_PARSE (parse); + GstStructure *s; + const GValue *value; + GstBuffer *buf; + + GST_DEBUG_OBJECT (parse, "setcaps called with %" GST_PTR_FORMAT, caps); + + s = gst_caps_get_structure (caps, 0); + + if ((value = gst_structure_get_value (s, "codec_data")) != NULL + && (buf = gst_value_get_buffer (value))) { + /* best possible parse attempt, + * src caps are based on sink caps so it will end up in there + * whether sucessful or not */ + gst_mpegv_parse_process_config (mpvparse, GST_BUFFER_DATA (buf), + GST_BUFFER_SIZE (buf)); + } + + /* let's not interfere and accept regardless of config parsing success */ + return TRUE; +} diff --git a/gst/videoparsers/gstmpegvideoparse.h b/gst/videoparsers/gstmpegvideoparse.h new file mode 100644 index 0000000..a3706a4 --- /dev/null +++ b/gst/videoparsers/gstmpegvideoparse.h @@ -0,0 +1,75 @@ +/* GStreamer + * Copyright (C) <2007> Jan Schmidt <thaytan@mad.scientist.com> + * Copyright (C) <2011> Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk> + * Copyright (C) <2011> Collabora Multimedia + * Copyright (C) <2011> Nokia Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifndef __GST_MPEGVIDEO_PARSE_H__ +#define __GST_MPEGVIDEO_PARSE_H__ + +#include <gst/gst.h> +#include <gst/base/gstbaseparse.h> + +#include "mpegvideoparse.h" + +G_BEGIN_DECLS + +#define GST_TYPE_MPEGVIDEO_PARSE (gst_mpegv_parse_get_type()) +#define GST_MPEGVIDEO_PARSE(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),\ + GST_TYPE_MPEGVIDEO_PARSE, GstMpegvParse)) +#define GST_MPEGVIDEO_PARSE_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),\ + GST_TYPE_MPEGVIDEO_PARSE, GstMpegvParseClass)) +#define GST_MPEGVIDEO_PARSE_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj),\ + GST_TYPE_MPEGVIDEO_PARSE, GstMpegvParseClass)) +#define GST_IS_MPEGVIDEO_PARSE(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),\ + GST_TYPE_MPEGVIDEO_PARSE)) +#define GST_IS_MPEGVIDEO_PARSE_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),\ + GST_TYPE_MPEGVIDEO_PARSE)) + +typedef struct _GstMpegvParse GstMpegvParse; +typedef struct _GstMpegvParseClass GstMpegvParseClass; + +struct _GstMpegvParse { + GstBaseParse element; + + /* parse state */ + gint last_sc; + gint seq_offset; + gint pic_offset; + gboolean intra_frame; + gboolean update_caps; + + GstBuffer *config; + guint8 profile; + MPEGVParams params; + + /* properties */ + gboolean drop; + gboolean gop_split; +}; + +struct _GstMpegvParseClass { + GstBaseParseClass parent_class; +}; + +GType gst_mpegv_parse_get_type (void); + +G_END_DECLS + +#endif /* __GST_MPEGVIDEO_PARSE_H__ */ diff --git a/gst/videoparsers/gstvc1parse.c b/gst/videoparsers/gstvc1parse.c new file mode 100644 index 0000000..be94649 --- /dev/null +++ b/gst/videoparsers/gstvc1parse.c @@ -0,0 +1,1544 @@ +/* + * Copyright (C) 2011, Hewlett-Packard Development Company, L.P. + * Author: Sebastian Dröge <sebastian.droege@collabora.co.uk>, Collabora Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +/* + * Information about the caps fields: + * + * header-format: + * none: No codec_data and only in-stream headers + * + * asf: codec_data as specified in the ASF specification + * Simple/Main profile: 4 byte sequence header without startcode + * Advanced profile: Sequence header and entrypoint with startcodes + * + * sequence-layer: codec_data as specified in SMPTE 421M Annex L.2 + * + * + * stream-format: + * bdu: BDUs with startcodes + * + * bdu-frame: BDUs with startcodes, everything up to and including a frame + * per buffer. This also means everything needed to decode a frame, i.e. + * field and slice BDUs + * + * sequence-layer-bdu: Sequence layer in first buffer, then BDUs with startcodes + * + * sequence-layer-bdu-frame: Sequence layer in first buffer, then only frame + * BDUs with startcodes, i.e. everything up to and including a frame. + * + * sequence-layer-raw-frame: Sequence layer in first buffer, then only frame + * BDUs without startcodes. Only for simple/main profile. + * + * sequence-layer-frame-layer: As specified in SMPTE 421M Annex L, sequence-layer + * first, then BDUs inside frame-layer + * + * asf: As specified in the ASF specification. + * For simple/main profile a single frame BDU without startcodes per buffer + * For advanced profile one or many BDUs with/without startcodes: + * Startcodes required if non-frame BDU or multiple BDUs per buffer + * unless frame BDU followed by field BDU. In that case only second (field) + * startcode required. + * + * frame-layer: As specified in SMPTE 421M Annex L.2 + * + * + * If no stream-format is given in the caps we do the following: + * + * 0) If header-format=asf we assume stream-format=asf + * 1) If first buffer starts with sequence header startcode + * we assume stream-format=bdu (or bdu-frame, doesn't matter + * for the input because we're parsing anyway) + * 2) If first buffer starts with sequence layer startcode + * 1) If followed by sequence header or frame startcode + * we assume stream-format=sequence-layer-bdu (or -bdu-frame, + * doesn't matter for the input because we're parsing anyway) + * 2) Otherwise we assume stream-format=sequence-layer-frame-layer + * 3) Otherwise + * 1) If header-format=sequence-layer we assume stream-format=frame-layer + * 2) If header-format=none we error out + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "gstvc1parse.h" + +#include <gst/base/gstbytereader.h> +#include <string.h> + +GST_DEBUG_CATEGORY (vc1_parse_debug); +#define GST_CAT_DEFAULT vc1_parse_debug + +#define GST_BUFFER_FLAG_B_FRAME (GST_BUFFER_FLAG_LAST << 0) + +static const struct +{ + gchar str[15]; + VC1HeaderFormat en; +} header_formats[] = { + { + "none", VC1_HEADER_FORMAT_NONE}, { + "asf", VC1_HEADER_FORMAT_ASF}, { + "sequence-layer", VC1_HEADER_FORMAT_SEQUENCE_LAYER} +}; + +static const struct +{ + gchar str[27]; + VC1StreamFormat en; +} stream_formats[] = { + { + "bdu", VC1_STREAM_FORMAT_BDU}, { + "bdu-frame", VC1_STREAM_FORMAT_BDU_FRAME}, { + "sequence-layer-bdu", VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU}, { + "sequence-layer-bdu-frame", VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU_FRAME}, { + "sequence-layer-raw-frame", VC1_STREAM_FORMAT_SEQUENCE_LAYER_RAW_FRAME}, { + "sequence-layer-frame-layer", VC1_STREAM_FORMAT_SEQUENCE_LAYER_FRAME_LAYER}, { + "asf", VC1_STREAM_FORMAT_ASF}, { + "frame-layer", VC1_STREAM_FORMAT_FRAME_LAYER} +}; + +static const gchar * +stream_format_to_string (VC1StreamFormat stream_format) +{ + return stream_formats[stream_format].str; +} + +static VC1StreamFormat +stream_format_from_string (const gchar * stream_format) +{ + gint i; + + for (i = 0; i < G_N_ELEMENTS (stream_formats); i++) { + if (strcmp (stream_formats[i].str, stream_format) == 0) + return stream_formats[i].en; + } + return -1; +} + +static const gchar * +header_format_to_string (VC1HeaderFormat header_format) +{ + return header_formats[header_format].str; +} + +static VC1HeaderFormat +header_format_from_string (const gchar * header_format) +{ + gint i; + + for (i = 0; i < G_N_ELEMENTS (header_formats); i++) { + if (strcmp (header_formats[i].str, header_format) == 0) + return header_formats[i].en; + } + return -1; +} + +static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink", + GST_PAD_SINK, + GST_PAD_ALWAYS, + GST_STATIC_CAPS ("video/x-wmv, wmvversion=(int) 3, " + "format=(fourcc) {WVC1, WMV3}")); + +static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src", + GST_PAD_SRC, + GST_PAD_ALWAYS, + GST_STATIC_CAPS ("video/x-wmv, wmvversion=(int) 3, " + "format=(fourcc) {WVC1, WMV3}, " + "stream-format=(string) {bdu, bdu-frame, sequence-layer-bdu, " + "sequence-layer-bdu-frame, sequence-layer-raw-frame, " + "sequence-layer-frame-layer, asf, frame-layer}, " + "header-format=(string) {none, asf, sequence-layer}")); + + +GST_BOILERPLATE (GstVC1Parse, gst_vc1_parse, GstBaseParse, GST_TYPE_BASE_PARSE); + +static void gst_vc1_parse_finalize (GObject * object); + +static gboolean gst_vc1_parse_start (GstBaseParse * parse); +static gboolean gst_vc1_parse_stop (GstBaseParse * parse); +static gboolean gst_vc1_parse_check_valid_frame (GstBaseParse * parse, + GstBaseParseFrame * frame, guint * framesize, gint * skipsize); +static GstFlowReturn gst_vc1_parse_parse_frame (GstBaseParse * parse, + GstBaseParseFrame * frame); +static GstFlowReturn gst_vc1_parse_pre_push_frame (GstBaseParse * parse, + GstBaseParseFrame * frame); +static gboolean gst_vc1_parse_set_caps (GstBaseParse * parse, GstCaps * caps); +static GstCaps *gst_vc1_parse_get_sink_caps (GstBaseParse * parse); +static GstFlowReturn gst_vc1_parse_detect (GstBaseParse * parse, + GstBuffer * buffer); + +static void gst_vc1_parse_reset (GstVC1Parse * vc1parse); +static gboolean gst_vc1_parse_handle_seq_layer (GstVC1Parse * vc1parse, + GstBuffer * buf, guint offset, guint size); +static gboolean gst_vc1_parse_handle_seq_hdr (GstVC1Parse * vc1parse, + GstBuffer * buf, guint offset, guint size); +static gboolean gst_vc1_parse_handle_entrypoint (GstVC1Parse * vc1parse, + GstBuffer * buf, guint offset, guint size); +static void gst_vc1_parse_update_stream_format_properties (GstVC1Parse * + vc1parse); + +static void +gst_vc1_parse_base_init (gpointer g_class) +{ + GstElementClass *gstelement_class = GST_ELEMENT_CLASS (g_class); + + gst_element_class_add_pad_template (gstelement_class, + gst_static_pad_template_get (&srctemplate)); + gst_element_class_add_pad_template (gstelement_class, + gst_static_pad_template_get (&sinktemplate)); + + gst_element_class_set_details_simple (gstelement_class, "VC1 parser", + "Codec/Parser/Converter/Video", + "Parses VC1 streams", + "Sebastian Dröge <sebastian.droege@collabora.co.uk>"); + + GST_DEBUG_CATEGORY_INIT (vc1_parse_debug, "vc1parse", 0, "vc1 parser"); +} + +static void +gst_vc1_parse_class_init (GstVC1ParseClass * klass) +{ + GObjectClass *gobject_class = (GObjectClass *) klass; + GstBaseParseClass *parse_class = GST_BASE_PARSE_CLASS (klass); + + gobject_class->finalize = gst_vc1_parse_finalize; + + parse_class->start = GST_DEBUG_FUNCPTR (gst_vc1_parse_start); + parse_class->stop = GST_DEBUG_FUNCPTR (gst_vc1_parse_stop); + parse_class->check_valid_frame = + GST_DEBUG_FUNCPTR (gst_vc1_parse_check_valid_frame); + parse_class->parse_frame = GST_DEBUG_FUNCPTR (gst_vc1_parse_parse_frame); + parse_class->pre_push_frame = + GST_DEBUG_FUNCPTR (gst_vc1_parse_pre_push_frame); + parse_class->set_sink_caps = GST_DEBUG_FUNCPTR (gst_vc1_parse_set_caps); + parse_class->get_sink_caps = GST_DEBUG_FUNCPTR (gst_vc1_parse_get_sink_caps); + parse_class->detect = GST_DEBUG_FUNCPTR (gst_vc1_parse_detect); +} + +static void +gst_vc1_parse_init (GstVC1Parse * vc1parse, GstVC1ParseClass * g_class) +{ + /* Default values for stream-format=raw, i.e. + * raw VC1 frames with startcodes */ + gst_base_parse_set_syncable (GST_BASE_PARSE (vc1parse), TRUE); + gst_base_parse_set_has_timing_info (GST_BASE_PARSE (vc1parse), FALSE); + + gst_vc1_parse_reset (vc1parse); +} + +static void +gst_vc1_parse_finalize (GObject * object) +{ + /*GstVC1Parse *vc1parse = GST_VC1_PARSE (object); */ + + G_OBJECT_CLASS (parent_class)->finalize (object); +} + +static void +gst_vc1_parse_reset (GstVC1Parse * vc1parse) +{ + vc1parse->profile = -1; + vc1parse->level = -1; + vc1parse->fourcc = 0; + vc1parse->width = 0; + vc1parse->height = 0; + vc1parse->fps_n = vc1parse->fps_d = 0; + vc1parse->frame_duration = GST_CLOCK_TIME_NONE; + vc1parse->fps_from_caps = FALSE; + vc1parse->par_n = vc1parse->par_d = 0; + vc1parse->par_from_caps = FALSE; + + vc1parse->renegotiate = TRUE; + vc1parse->update_caps = TRUE; + + vc1parse->input_header_format = VC1_HEADER_FORMAT_NONE; + vc1parse->input_stream_format = VC1_STREAM_FORMAT_BDU; + vc1parse->output_header_format = VC1_HEADER_FORMAT_NONE; + vc1parse->output_stream_format = VC1_STREAM_FORMAT_BDU; + gst_buffer_replace (&vc1parse->seq_layer_buffer, NULL); + gst_buffer_replace (&vc1parse->seq_hdr_buffer, NULL); + gst_buffer_replace (&vc1parse->entrypoint_buffer, NULL); +} + +static gboolean +gst_vc1_parse_start (GstBaseParse * parse) +{ + GstVC1Parse *vc1parse = GST_VC1_PARSE (parse); + + GST_DEBUG_OBJECT (parse, "start"); + gst_vc1_parse_reset (vc1parse); + + vc1parse->detecting_stream_format = TRUE; + + return TRUE; +} + +static gboolean +gst_vc1_parse_stop (GstBaseParse * parse) +{ + GstVC1Parse *vc1parse = GST_VC1_PARSE (parse); + + GST_DEBUG_OBJECT (parse, "stop"); + gst_vc1_parse_reset (vc1parse); + + return TRUE; +} + +static gboolean +gst_vc1_parse_renegotiate (GstVC1Parse * vc1parse) +{ + GstCaps *allowed_caps; + + if (!vc1parse->renegotiate) + return TRUE; + + /* Negotiate with downstream here */ + GST_DEBUG_OBJECT (vc1parse, "Renegotiating"); + + allowed_caps = gst_pad_get_allowed_caps (GST_BASE_PARSE_SRC_PAD (vc1parse)); + if (allowed_caps && !gst_caps_is_empty (allowed_caps) + && !gst_caps_is_any (allowed_caps)) { + GstStructure *s; + const gchar *stream_format, *header_format; + + GST_DEBUG_OBJECT (vc1parse, "Downstream allowed caps: %" GST_PTR_FORMAT, + allowed_caps); + + allowed_caps = gst_caps_make_writable (allowed_caps); + gst_caps_truncate (allowed_caps); + s = gst_caps_get_structure (allowed_caps, 0); + + /* If already fixed this does nothing */ + gst_structure_fixate_field_string (s, "header-format", "asf"); + header_format = gst_structure_get_string (s, "header-format"); + if (!header_format) { + vc1parse->output_header_format = vc1parse->input_header_format; + header_format = header_format_to_string (vc1parse->output_header_format); + gst_structure_set (s, "header-format", G_TYPE_STRING, header_format, + NULL); + } else { + vc1parse->output_header_format = + header_format_from_string (header_format); + } + + /* If already fixed this does nothing */ + gst_structure_fixate_field_string (s, "stream-format", "asf"); + stream_format = gst_structure_get_string (s, "stream-format"); + if (!stream_format) { + vc1parse->output_stream_format = vc1parse->input_stream_format; + stream_format = stream_format_to_string (vc1parse->output_stream_format); + gst_structure_set (s, "stream-format", G_TYPE_STRING, stream_format, + NULL); + } else { + vc1parse->output_stream_format = + stream_format_from_string (stream_format); + } + } else if (allowed_caps && gst_caps_is_empty (allowed_caps)) { + GST_ERROR_OBJECT (vc1parse, "Empty caps"); + return FALSE; + } else { + GST_DEBUG_OBJECT (vc1parse, "Using input header/stream format"); + vc1parse->output_header_format = vc1parse->input_header_format; + vc1parse->output_stream_format = vc1parse->input_stream_format; + } + + if (allowed_caps) + gst_caps_unref (allowed_caps); + vc1parse->renegotiate = FALSE; + vc1parse->update_caps = TRUE; + + GST_INFO_OBJECT (vc1parse, "input %s/%s, negotiated %s/%s with downstream", + header_format_to_string (vc1parse->input_header_format), + stream_format_to_string (vc1parse->input_stream_format), + header_format_to_string (vc1parse->output_header_format), + stream_format_to_string (vc1parse->output_stream_format)); + + return TRUE; +} + +static GstCaps * +gst_vc1_parse_get_sink_caps (GstBaseParse * parse) +{ + GstCaps *peercaps; + const GstCaps *templ; + GstCaps *ret; + + templ = gst_pad_get_pad_template_caps (GST_BASE_PARSE_SINK_PAD (parse)); + peercaps = gst_pad_peer_get_caps (GST_BASE_PARSE_SRC_PAD (parse)); + if (peercaps) { + guint i, n; + GstStructure *s; + + /* Remove the stream-format and header-format fields + * and add the generic ones again by intersecting + * with our template */ + peercaps = gst_caps_make_writable (peercaps); + n = gst_caps_get_size (peercaps); + for (i = 0; i < n; i++) { + s = gst_caps_get_structure (peercaps, i); + + gst_structure_remove_field (s, "stream-format"); + gst_structure_remove_field (s, "header-format"); + } + + ret = gst_caps_intersect_full (peercaps, templ, GST_CAPS_INTERSECT_FIRST); + gst_caps_unref (peercaps); + } else { + ret = gst_caps_copy (templ); + } + + return ret; +} + +static GstFlowReturn +gst_vc1_parse_detect (GstBaseParse * parse, GstBuffer * buffer) +{ + GstVC1Parse *vc1parse = GST_VC1_PARSE (parse); + guint8 *data; + gint size; + + if (!vc1parse->detecting_stream_format) + return GST_FLOW_OK; + + data = GST_BUFFER_DATA (buffer); + size = GST_BUFFER_SIZE (buffer); + +#if 0 + /* FIXME: disable BDU check for now as BDU parsing needs more work. + */ + while (size >= 4) { + guint32 startcode = GST_READ_UINT32_BE (data); + + if ((startcode & 0xffffff00) == 0x00000100) { + GST_DEBUG_OBJECT (vc1parse, "Found BDU startcode"); + vc1parse->input_stream_format = VC1_STREAM_FORMAT_BDU_FRAME; + goto detected; + } + + data += 4; + size -= 4; + } +#endif + + data = GST_BUFFER_DATA (buffer); + size = GST_BUFFER_SIZE (buffer); + while (size >= 40) { + if (data[3] == 0xc5 && GST_READ_UINT32_BE (data + 4) == 0x00000004 && + GST_READ_UINT32_BE (data + 20) == 0x0000000c) { + guint32 startcode; + + GST_DEBUG_OBJECT (vc1parse, "Found sequence layer"); + startcode = GST_READ_UINT32_BE (data + 36); + if ((startcode & 0xffffff00) == 0x00000100) { + GST_DEBUG_OBJECT (vc1parse, "Found BDU startcode after sequence layer"); + vc1parse->input_stream_format = + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU_FRAME; + goto detected; + } else { + GST_DEBUG_OBJECT (vc1parse, + "Assuming sequence-layer-frame-layer stream format"); + vc1parse->input_stream_format = + VC1_STREAM_FORMAT_SEQUENCE_LAYER_FRAME_LAYER; + goto detected; + } + } + data += 4; + size -= 4; + } + + if (GST_BUFFER_SIZE (buffer) <= 128) { + GST_DEBUG_OBJECT (vc1parse, "Requesting more data"); + return GST_FLOW_NOT_NEGOTIATED; + } + + if (GST_BASE_PARSE_DRAINING (vc1parse)) { + GST_ERROR_OBJECT (vc1parse, "Failed to detect or assume a stream format " + "and draining now"); + return GST_FLOW_ERROR; + } + + /* Otherwise we try some heuristics */ + if (vc1parse->input_header_format == VC1_HEADER_FORMAT_ASF) { + GST_DEBUG_OBJECT (vc1parse, "Assuming ASF stream format"); + vc1parse->input_stream_format = VC1_STREAM_FORMAT_ASF; + goto detected; + } else if (vc1parse->input_header_format == VC1_HEADER_FORMAT_SEQUENCE_LAYER) { + GST_DEBUG_OBJECT (vc1parse, "Assuming frame-layer stream format"); + vc1parse->input_stream_format = VC1_STREAM_FORMAT_FRAME_LAYER; + goto detected; + } else { + GST_ERROR_OBJECT (vc1parse, "Can't detect or assume a stream format"); + return GST_FLOW_ERROR; + } + + g_assert_not_reached (); + return GST_FLOW_ERROR; + +detected: + vc1parse->detecting_stream_format = FALSE; + gst_vc1_parse_update_stream_format_properties (vc1parse); + return GST_FLOW_OK; +} + +static gboolean +gst_vc1_parse_check_valid_frame (GstBaseParse * parse, + GstBaseParseFrame * frame, guint * framesize, gint * skipsize) +{ + GstVC1Parse *vc1parse = GST_VC1_PARSE (parse); + GstBuffer *buffer = frame->buffer; + guint8 *data = GST_BUFFER_DATA (buffer); + guint size = GST_BUFFER_SIZE (buffer); + + if (vc1parse->renegotiate) { + if (!gst_vc1_parse_renegotiate (vc1parse)) { + GST_ERROR_OBJECT (vc1parse, "Failed to negotiate with downstream"); + return FALSE; + } + } + + if (!vc1parse->seq_layer_buffer + && (vc1parse->input_stream_format == VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU_FRAME + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_RAW_FRAME + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_FRAME_LAYER)) { + if (data[3] == 0xc5 && GST_READ_UINT32_BE (data + 4) == 0x00000004 + && GST_READ_UINT32_BE (data + 20) == 0x0000000c) { + *framesize = 36; + return TRUE; + } + return FALSE; + } else if (vc1parse->input_stream_format == VC1_STREAM_FORMAT_BDU || + vc1parse->input_stream_format == VC1_STREAM_FORMAT_BDU_FRAME || + (vc1parse->seq_layer_buffer + && (vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU_FRAME))) { + GstVC1ParserResult pres; + GstVC1BDU bdu; + g_assert (size >= 4); + memset (&bdu, 0, sizeof (bdu)); + GST_DEBUG_OBJECT (vc1parse, + "Handling buffer of size %u at offset %" G_GUINT64_FORMAT, size, + GST_BUFFER_OFFSET (buffer)); + /* XXX: when a buffer contains multiple BDUs, does the first one start with + * a startcode? + */ + pres = gst_vc1_identify_next_bdu (data, size, &bdu); + switch (pres) { + case GST_VC1_PARSER_OK: + GST_DEBUG_OBJECT (vc1parse, "Have complete BDU"); + if (bdu.sc_offset > 4) { + *skipsize = bdu.sc_offset; + return FALSE; + } else { + *framesize = bdu.offset + bdu.size; + return TRUE; + } + break; + case GST_VC1_PARSER_BROKEN_DATA: + GST_ERROR_OBJECT (vc1parse, "Broken data"); + return FALSE; + break; + case GST_VC1_PARSER_NO_BDU: + GST_DEBUG_OBJECT (vc1parse, "Found no BDU startcode"); + *skipsize = size - 3; + return FALSE; + break; + case GST_VC1_PARSER_NO_BDU_END: + GST_DEBUG_OBJECT (vc1parse, "Found no BDU end"); + if (G_UNLIKELY (GST_BASE_PARSE_DRAINING (vc1parse))) { + GST_DEBUG_OBJECT (vc1parse, "Draining - assuming complete frame"); + *framesize = size; + return TRUE; + } else { + *skipsize = 0; + /* Request all that is available */ + *framesize = G_MAXUINT; + return FALSE; + } + break; + case GST_VC1_PARSER_ERROR: + GST_ERROR_OBJECT (vc1parse, "Parsing error"); + return FALSE; + break; + default: + g_assert_not_reached (); + break; + } + } else if (vc1parse->input_stream_format == VC1_STREAM_FORMAT_ASF || + (vc1parse->seq_layer_buffer + && vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_RAW_FRAME)) { + /* Must be packetized already */ + *framesize = size; + return TRUE; + } else { + /* frame-layer or sequence-layer-frame-layer */ + g_assert (size >= 8); + /* Parse frame layer size */ + *framesize = GST_READ_UINT24_BE (data + 1) + 8; + return TRUE; + } + + return FALSE; +} + +static gboolean +gst_vc1_parse_update_caps (GstVC1Parse * vc1parse) +{ + GstCaps *caps; + GstVC1Profile profile; + if (GST_PAD_CAPS (GST_BASE_PARSE_SRC_PAD (vc1parse)) + && !vc1parse->update_caps) + return TRUE; + caps = gst_caps_new_simple ("video/x-wmv", "wmvversion", G_TYPE_INT, 3, NULL); + /* Must have this here from somewhere */ + g_assert (vc1parse->width != 0 && vc1parse->height != 0); + gst_caps_set_simple (caps, "width", G_TYPE_INT, vc1parse->width, "height", + G_TYPE_INT, vc1parse->height, NULL); + if (vc1parse->fps_d != 0) { + gst_caps_set_simple (caps, "framerate", GST_TYPE_FRACTION, vc1parse->fps_n, + vc1parse->fps_d, NULL); + + vc1parse->frame_duration = gst_util_uint64_scale (GST_SECOND, + vc1parse->fps_d, vc1parse->fps_n); + } + if (vc1parse->par_n != 0 && vc1parse->par_d != 0) + gst_caps_set_simple (caps, "pixel-aspect-ratio", GST_TYPE_FRACTION, + vc1parse->par_n, vc1parse->par_d, NULL); + if (vc1parse->seq_hdr_buffer) + profile = vc1parse->seq_hdr.profile; + else if (vc1parse->seq_layer_buffer) + profile = vc1parse->seq_layer.struct_c.profile; + else + g_assert_not_reached (); + if (profile == GST_VC1_PROFILE_ADVANCED) { + const gchar *level; + /* Caller must make sure this is valid here */ + g_assert (vc1parse->seq_hdr_buffer); + switch ((GstVC1Level) vc1parse->seq_hdr.advanced.level) { + case GST_VC1_LEVEL_L0: + level = "0"; + break; + case GST_VC1_LEVEL_L1: + level = "1"; + break; + case GST_VC1_LEVEL_L2: + level = "2"; + break; + case GST_VC1_LEVEL_L3: + level = "3"; + break; + case GST_VC1_LEVEL_L4: + level = "4"; + break; + default: + g_assert_not_reached (); + break; + } + + gst_caps_set_simple (caps, "format", GST_TYPE_FOURCC, + GST_MAKE_FOURCC ('W', 'V', 'C', '1'), + "profile", G_TYPE_STRING, "advanced", + "level", G_TYPE_STRING, level, NULL); + } else + if (profile == GST_VC1_PROFILE_SIMPLE + || profile == GST_VC1_PROFILE_MAIN) { + const gchar *profile_str; + if (profile == GST_VC1_PROFILE_SIMPLE) + profile_str = "simple"; + else + profile_str = "main"; + gst_caps_set_simple (caps, "format", GST_TYPE_FOURCC, + GST_MAKE_FOURCC ('W', 'M', 'V', '3'), + "profile", G_TYPE_STRING, profile_str, NULL); + if (vc1parse->seq_layer_buffer) { + const gchar *level; + switch (vc1parse->seq_layer.struct_b.level) { + case GST_VC1_LEVEL_LOW: + level = "low"; + break; + case GST_VC1_LEVEL_MEDIUM: + level = "medium"; + break; + case GST_VC1_LEVEL_HIGH: + level = "high"; + break; + default: + g_assert_not_reached (); + break; + } + + gst_caps_set_simple (caps, "level", G_TYPE_STRING, level, NULL); + } + } else { + g_assert_not_reached (); + } + + switch (vc1parse->output_header_format) { + case VC1_HEADER_FORMAT_ASF: + if (vc1parse->profile != GST_VC1_PROFILE_ADVANCED) { + GstBuffer *codec_data; + if (vc1parse->seq_hdr_buffer) { + codec_data = gst_buffer_create_sub (vc1parse->seq_hdr_buffer, 0, 4); + } else { + guint32 seq_hdr = 0; + /* Build simple/main sequence header from sequence layer */ + seq_hdr |= (vc1parse->profile << 30); + seq_hdr |= (vc1parse->seq_layer.struct_c.wmvp << 28); + seq_hdr |= (vc1parse->seq_layer.struct_c.frmrtq_postproc << 25); + seq_hdr |= (vc1parse->seq_layer.struct_c.bitrtq_postproc << 20); + seq_hdr |= (vc1parse->seq_layer.struct_c.loop_filter << 19); + seq_hdr |= (vc1parse->seq_layer.struct_c.multires << 17); + seq_hdr |= (vc1parse->seq_layer.struct_c.fastuvmc << 15); + seq_hdr |= (vc1parse->seq_layer.struct_c.extended_mv << 14); + seq_hdr |= (vc1parse->seq_layer.struct_c.dquant << 12); + seq_hdr |= (vc1parse->seq_layer.struct_c.vstransform << 11); + seq_hdr |= (vc1parse->seq_layer.struct_c.overlap << 9); + seq_hdr |= (vc1parse->seq_layer.struct_c.syncmarker << 8); + seq_hdr |= (vc1parse->seq_layer.struct_c.rangered << 7); + seq_hdr |= (vc1parse->seq_layer.struct_c.maxbframes << 4); + seq_hdr |= (vc1parse->seq_layer.struct_c.quantizer << 2); + seq_hdr |= (vc1parse->seq_layer.struct_c.finterpflag << 1); + codec_data = gst_buffer_new_and_alloc (4); + GST_WRITE_UINT32_BE (GST_BUFFER_DATA (codec_data), seq_hdr); + } + + gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, codec_data, + NULL); + gst_buffer_unref (codec_data); + } else { + GstBuffer *codec_data; + /* Should have seqhdr and entrypoint for the advanced profile here */ + g_assert (vc1parse->seq_hdr_buffer && vc1parse->entrypoint_buffer); + codec_data = + gst_buffer_new_and_alloc (1 + 4 + + GST_BUFFER_SIZE (vc1parse->seq_hdr_buffer) + 4 + + GST_BUFFER_SIZE (vc1parse->entrypoint_buffer)); + if (vc1parse->profile == GST_VC1_PROFILE_SIMPLE) + GST_WRITE_UINT8 (GST_BUFFER_DATA (codec_data), 0x29); + else + GST_WRITE_UINT8 (GST_BUFFER_DATA (codec_data), 0x2b); + GST_WRITE_UINT32_BE (GST_BUFFER_DATA (codec_data) + 1, 0x0000010f); + memcpy (GST_BUFFER_DATA (codec_data) + 1 + 4, + GST_BUFFER_DATA (vc1parse->seq_hdr_buffer), + GST_BUFFER_SIZE (vc1parse->seq_hdr_buffer)); + GST_WRITE_UINT32_BE (GST_BUFFER_DATA (codec_data) + 1 + 4 + + GST_BUFFER_SIZE (vc1parse->seq_hdr_buffer), 0x0000010e); + memcpy (GST_BUFFER_DATA (codec_data) + 1 + 4 + + GST_BUFFER_SIZE (vc1parse->seq_hdr_buffer) + 4, + GST_BUFFER_DATA (vc1parse->entrypoint_buffer), + GST_BUFFER_SIZE (vc1parse->entrypoint_buffer)); + gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, codec_data, + NULL); + gst_buffer_unref (codec_data); + } + break; + case VC1_HEADER_FORMAT_SEQUENCE_LAYER: + if (vc1parse->seq_layer_buffer) { + gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, + vc1parse->seq_layer_buffer, NULL); + } else { + GstBuffer *codec_data = gst_buffer_new_and_alloc (36); + guint8 *data = GST_BUFFER_DATA (codec_data); + guint32 structC = 0; + /* Unknown number of frames and start code */ + data[0] = 0xff; + data[1] = 0xff; + data[2] = 0xff; + data[3] = 0xc5; + /* structC */ + structC |= (vc1parse->profile << 30); + if (vc1parse->profile != GST_VC1_PROFILE_ADVANCED) { + structC |= (vc1parse->seq_layer.struct_c.wmvp << 28); + structC |= (vc1parse->seq_layer.struct_c.frmrtq_postproc << 25); + structC |= (vc1parse->seq_layer.struct_c.bitrtq_postproc << 20); + structC |= (vc1parse->seq_layer.struct_c.loop_filter << 19); + structC |= (vc1parse->seq_layer.struct_c.multires << 17); + structC |= (vc1parse->seq_layer.struct_c.fastuvmc << 15); + structC |= (vc1parse->seq_layer.struct_c.extended_mv << 14); + structC |= (vc1parse->seq_layer.struct_c.dquant << 12); + structC |= (vc1parse->seq_layer.struct_c.vstransform << 11); + structC |= (vc1parse->seq_layer.struct_c.overlap << 9); + structC |= (vc1parse->seq_layer.struct_c.syncmarker << 8); + structC |= (vc1parse->seq_layer.struct_c.rangered << 7); + structC |= (vc1parse->seq_layer.struct_c.maxbframes << 4); + structC |= (vc1parse->seq_layer.struct_c.quantizer << 2); + structC |= (vc1parse->seq_layer.struct_c.finterpflag << 1); + } + GST_WRITE_UINT32_BE (data + 4, structC); + /* 0x00000004 */ + GST_WRITE_UINT32_BE (data + 8, 4); + /* structA */ + if (vc1parse->profile != GST_VC1_PROFILE_ADVANCED) { + GST_WRITE_UINT32_BE (data + 12, vc1parse->height); + GST_WRITE_UINT32_BE (data + 16, vc1parse->width); + } else { + GST_WRITE_UINT32_BE (data + 12, 0); + GST_WRITE_UINT32_BE (data + 16, 0); + } + + /* 0x0000000c */ + GST_WRITE_UINT32_BE (data + 20, 0x0000000c); + /* structB */ + if (vc1parse->level != -1) + data[24] = (vc1parse->level << 5); + else + data[24] = 0x40; /* Use HIGH level */ + /* Unknown HRD_BUFFER */ + GST_WRITE_UINT24_BE (data + 25, 0); + /* Unknown HRD_RATE */ + GST_WRITE_UINT32_BE (data + 28, 0); + /* Framerate */ + GST_WRITE_UINT32_BE (data + 32, + ((guint32) (((gdouble) vc1parse->fps_n) / + ((gdouble) vc1parse->fps_d) + 0.5))); + gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, codec_data, + NULL); + gst_buffer_unref (codec_data); + } + break; + case VC1_HEADER_FORMAT_NONE: + default: + /* Nothing here */ + break; + } + + GST_DEBUG_OBJECT (vc1parse, "Setting caps %" GST_PTR_FORMAT, caps); + gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (vc1parse), caps); + gst_caps_unref (caps); + vc1parse->update_caps = FALSE; + return TRUE; +} + +static gboolean +gst_vc1_parse_handle_bdu (GstVC1Parse * vc1parse, GstVC1StartCode startcode, + GstBuffer * buffer, guint offset, guint size) +{ + GST_DEBUG_OBJECT (vc1parse, "Handling BDU with startcode 0x%02x", startcode); + + switch (startcode) { + case GST_VC1_SEQUENCE:{ + GST_DEBUG_OBJECT (vc1parse, "Have new SequenceHeader header"); + if (!gst_vc1_parse_handle_seq_hdr (vc1parse, buffer, offset, size)) { + GST_ERROR_OBJECT (vc1parse, "Invalid VC1 sequence header"); + return FALSE; + } + break; + } + case GST_VC1_ENTRYPOINT: + GST_DEBUG_OBJECT (vc1parse, "Have new EntryPoint header"); + if (!gst_vc1_parse_handle_entrypoint (vc1parse, buffer, offset, size)) { + GST_ERROR_OBJECT (vc1parse, "Invalid VC1 entrypoint"); + return FALSE; + } + break; + case GST_VC1_FRAME: + /* TODO: Check if keyframe */ + break; + default: + break; + } + + return TRUE; +} + +static gboolean +gst_vc1_parse_handle_bdus (GstVC1Parse * vc1parse, GstBuffer * buffer, + guint offset, guint size) +{ + GstVC1BDU bdu; + GstVC1ParserResult pres; + guint8 *data = GST_BUFFER_DATA (buffer) + offset; + + do { + memset (&bdu, 0, sizeof (bdu)); + pres = gst_vc1_identify_next_bdu (data, size, &bdu); + if (pres == GST_VC1_PARSER_OK || pres == GST_VC1_PARSER_NO_BDU_END) { + if (pres == GST_VC1_PARSER_NO_BDU_END) { + pres = GST_VC1_PARSER_OK; + bdu.size = size - bdu.offset; + } + + data += bdu.offset; + size -= bdu.offset; + + if (!gst_vc1_parse_handle_bdu (vc1parse, bdu.type, buffer, + data - GST_BUFFER_DATA (buffer), bdu.size)) + return FALSE; + + data += bdu.size; + size -= bdu.size; + } + } while (pres == GST_VC1_PARSER_OK && size > 0); + + if (pres != GST_VC1_PARSER_OK) { + GST_DEBUG_OBJECT (vc1parse, "Failed to parse BDUs"); + return FALSE; + } + return TRUE; +} + +static GstFlowReturn +gst_vc1_parse_parse_frame (GstBaseParse * parse, GstBaseParseFrame * frame) +{ + GstVC1Parse *vc1parse = GST_VC1_PARSE (parse); + GstBuffer *buffer = frame->buffer; + guint8 *data = GST_BUFFER_DATA (buffer); + guint size = GST_BUFFER_SIZE (buffer); + + if (!vc1parse->seq_layer_buffer + && (vc1parse->input_stream_format == VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU_FRAME + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_RAW_FRAME + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_FRAME_LAYER)) { + g_assert (size >= 36); + if (!gst_vc1_parse_handle_seq_layer (vc1parse, buffer, 0, size)) { + GST_ERROR_OBJECT (vc1parse, "Invalid sequence layer"); + return GST_FLOW_ERROR; + } + + frame->flags |= GST_BASE_PARSE_FRAME_FLAG_NO_FRAME; + + if (vc1parse->input_stream_format == VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU_FRAME) { + gst_base_parse_set_min_frame_size (GST_BASE_PARSE (vc1parse), 4); + } else if (vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_RAW_FRAME) { + gst_base_parse_set_min_frame_size (GST_BASE_PARSE (vc1parse), 1); + } else { + /* frame-layer */ + gst_base_parse_set_min_frame_size (GST_BASE_PARSE (vc1parse), 8); + } + } else if (vc1parse->input_stream_format == VC1_STREAM_FORMAT_BDU || + vc1parse->input_stream_format == VC1_STREAM_FORMAT_BDU_FRAME || + (vc1parse->seq_layer_buffer + && (vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU_FRAME))) { + GstVC1StartCode startcode; + + /* Is already a complete BDU, should have at least the startcode */ + g_assert (size >= 4); + startcode = data[3]; + + if (startcode != GST_VC1_SEQUENCE) { + if (!vc1parse->seq_hdr_buffer && !vc1parse->seq_layer_buffer) { + GST_ERROR_OBJECT (vc1parse, + "Need sequence header/layer before anything else"); + return GST_FLOW_ERROR; + } + } else if (startcode != GST_VC1_ENTRYPOINT + && vc1parse->profile == GST_VC1_PROFILE_ADVANCED) { + if (vc1parse->seq_hdr_buffer && !vc1parse->entrypoint_buffer) { + GST_ERROR_OBJECT (vc1parse, + "Need entrypoint header after the sequence header for the " + "advanced profile"); + return GST_FLOW_ERROR; + } + } + + if (!gst_vc1_parse_handle_bdu (vc1parse, startcode, buffer, 4, size - 4)) + return GST_FLOW_ERROR; + } else if (vc1parse->input_stream_format == VC1_STREAM_FORMAT_ASF || + (vc1parse->seq_layer_buffer + && vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_RAW_FRAME)) { + GstVC1FrameHdr header; + + GST_LOG_OBJECT (vc1parse, "Have new ASF or RAW data unit"); + + if (!vc1parse->seq_hdr_buffer && !vc1parse->seq_layer_buffer) { + GST_ERROR_OBJECT (vc1parse, "Need a sequence header or sequence layer"); + return GST_FLOW_ERROR; + } + + if (gst_vc1_parse_frame_header (data, size, &header, + &vc1parse->seq_hdr, NULL) == GST_VC1_PARSER_OK) { + if (header.ptype == GST_VC1_PICTURE_TYPE_B) + GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_B_FRAME); + } + + if (GST_CLOCK_TIME_IS_VALID (vc1parse->frame_duration)) + GST_BUFFER_DURATION (buffer) = vc1parse->frame_duration; + + /* Might be multiple BDUs here, complex... */ + if (vc1parse->profile == GST_VC1_PROFILE_ADVANCED) { + gboolean startcodes = FALSE; + + if (size >= 4) { + guint32 startcode = GST_READ_UINT32_BE (data); + startcodes = ((startcode & 0xffffff00) == 0x00000100); + } + + if (startcodes) { + if (!gst_vc1_parse_handle_bdus (vc1parse, buffer, 0, size)) + return GST_FLOW_ERROR; + + /* For the advanced profile we need a sequence header here */ + if (!vc1parse->seq_hdr_buffer) { + GST_ERROR_OBJECT (vc1parse, "Need sequence header"); + return GST_FLOW_ERROR; + } + } else { + /* Must be a frame or a frame + field */ + /* TODO: Check if keyframe */ + } + } + } else { + GstVC1ParserResult pres; + GstVC1FrameLayer flayer; + gboolean startcodes = FALSE; + + /* frame-layer or sequence-layer-frame-layer */ + + /* Check if the frame-layer data contains BDUs with startcodes. + * Startcodes are not allowed in raw WMV9/VC1 streams + */ + if (size >= 8 + 4) { + guint32 startcode = GST_READ_UINT32_BE (data + 8); + startcodes = ((startcode & 0xffffff00) == 0x00000100); + } + + /* We either need a sequence layer or sequence header here + * or this has to be an advanced profile stream. + * + * For the advanced profile the frame-layer data contains + * BDUs with startcodes and includes the sequence header + */ + if (!vc1parse->seq_layer_buffer && !vc1parse->seq_hdr_buffer && !startcodes) { + GST_ERROR_OBJECT (vc1parse, "Need a sequence header or sequence layer"); + return GST_FLOW_ERROR; + } + + if ((vc1parse->seq_layer_buffer || vc1parse->seq_hdr_buffer) + && vc1parse->profile == GST_VC1_PROFILE_ADVANCED && !startcodes) { + GST_ERROR_OBJECT (vc1parse, + "Advanced profile frame-layer data must start with startcodes"); + return GST_FLOW_ERROR; + } + + memset (&flayer, 0, sizeof (flayer)); + pres = gst_vc1_parse_frame_layer (data, size, &flayer); + if (pres != GST_VC1_PARSER_OK) { + GST_ERROR_OBJECT (vc1parse, "Invalid VC1 frame layer"); + return GST_FLOW_ERROR; + } + + frame->buffer = buffer = gst_buffer_make_metadata_writable (buffer); + GST_BUFFER_TIMESTAMP (buffer) = + gst_util_uint64_scale (flayer.timestamp, GST_MSECOND, 1); + if (!flayer.key) + GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); + else + GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); + + /* For the simple/main profile this contains a single frame BDU without + * startcodes and for the advanced profile this contains BDUs with + * startcodes. In the case of the advanced profile parse them. + * + * Also for wrongly muxed simple/main profile streams with startcodes + * we do the same. + */ + if (startcodes) { + /* skip frame layer header */ + if (!gst_vc1_parse_handle_bdus (vc1parse, buffer, 8, size - 8)) + return GST_FLOW_ERROR; + + /* For the advanced profile we need a sequence header here */ + if (!vc1parse->seq_hdr_buffer) { + GST_ERROR_OBJECT (vc1parse, "Need sequence header"); + return GST_FLOW_ERROR; + } + } + } + + /* Need sequence header or sequence layer here, above code + * checks this already */ + g_assert (vc1parse->seq_layer_buffer || vc1parse->seq_hdr_buffer); + /* We need the entrypoint BDU for the advanced profile before we can set + * the caps. For the ASF header format it will already be in the codec_data, + * for the frame-layer stream format it will be in the first frame already. + * + * The only case where we wait another frame is the raw stream format, where + * it will be the second BDU + */ + if (vc1parse->profile == GST_VC1_PROFILE_ADVANCED + && !vc1parse->entrypoint_buffer) { + if (vc1parse->input_stream_format == VC1_STREAM_FORMAT_BDU + || vc1parse->input_stream_format == VC1_STREAM_FORMAT_BDU_FRAME + || vc1parse->input_stream_format == VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU_FRAME) { + return GST_BASE_PARSE_FLOW_QUEUED; + } else { + GST_ERROR_OBJECT (vc1parse, "Need entrypoint for the advanced profile"); + return GST_FLOW_ERROR; + } + } + + if (!gst_vc1_parse_update_caps (vc1parse)) + return GST_FLOW_NOT_NEGOTIATED; + + return GST_FLOW_OK; +} + +static GstFlowReturn +gst_vc1_parse_pre_push_frame (GstBaseParse * parse, GstBaseParseFrame * frame) +{ + GstVC1Parse *vc1parse = GST_VC1_PARSE (parse); + + if (vc1parse->input_header_format != vc1parse->output_header_format || + vc1parse->input_stream_format != vc1parse->output_stream_format) { + GST_WARNING_OBJECT (vc1parse, "stream conversion not implemented yet"); + return GST_FLOW_ERROR; + } + + return GST_FLOW_OK; +} + +/* SMPTE 421M Table 7 */ +static const struct +{ + gint par_n, par_d; +} aspect_ratios[] = { + { + 0, 0}, { + 1, 1}, { + 12, 11}, { + 10, 11}, { + 16, 11}, { + 40, 33}, { + 24, 11}, { + 20, 11}, { + 32, 11}, { + 80, 33}, { + 18, 11}, { + 15, 11}, { + 64, 33}, { + 160, 99}, { + 0, 0}, { + 0, 0} +}; + +/* SMPTE 421M Table 8 */ +static const guint framerates_n[] = { + 0, + 24 * 1000, + 25 * 1000, 30 * 1000, 50 * 1000, 60 * 1000, 48 * 1000, 72 * 1000 +}; + +/* SMPTE 421M Table 9 */ +static const guint framerates_d[] = { + 0, + 1000, + 1001 +}; + +static gboolean +gst_vc1_parse_handle_seq_hdr (GstVC1Parse * vc1parse, + GstBuffer * buf, guint offset, guint size) +{ + GstVC1ParserResult pres; + GstVC1Profile profile; + g_assert (GST_BUFFER_SIZE (buf) >= offset + size); + gst_buffer_replace (&vc1parse->seq_hdr_buffer, NULL); + memset (&vc1parse->seq_hdr, 0, sizeof (vc1parse->seq_hdr)); + pres = + gst_vc1_parse_sequence_header (GST_BUFFER_DATA (buf) + offset, + size, &vc1parse->seq_hdr); + if (pres != GST_VC1_PARSER_OK) { + GST_ERROR_OBJECT (vc1parse, "Invalid VC1 sequence header"); + return FALSE; + } + vc1parse->seq_hdr_buffer = gst_buffer_create_sub (buf, offset, size); + profile = vc1parse->seq_hdr.profile; + if (vc1parse->profile != profile) { + vc1parse->update_caps = TRUE; + vc1parse->profile = vc1parse->seq_hdr.profile; + } + + /* Only update fps if not from caps */ + if (!vc1parse->fps_from_caps && profile != GST_VC1_PROFILE_ADVANCED) { + gint fps; + /* This is only an estimate but better than nothing */ + fps = vc1parse->seq_hdr.struct_c.framerate; + if (fps != 0 && (vc1parse->fps_d == 0 || + gst_util_fraction_compare (fps, 1, vc1parse->fps_n, + vc1parse->fps_d) != 0)) { + vc1parse->update_caps = TRUE; + vc1parse->fps_n = fps; + vc1parse->fps_d = 1; + } + } + + if (profile == GST_VC1_PROFILE_ADVANCED) { + GstVC1Level level; + gint width, height; + level = vc1parse->seq_hdr.advanced.level; + if (vc1parse->level != level) { + vc1parse->update_caps = TRUE; + vc1parse->level = level; + } + + width = vc1parse->seq_hdr.advanced.max_coded_width; + height = vc1parse->seq_hdr.advanced.max_coded_height; + if (vc1parse->width != width || vc1parse->height != height) { + vc1parse->update_caps = TRUE; + vc1parse->width = width; + vc1parse->height = height; + } + + /* Only update fps if not from caps */ + if (!vc1parse->fps_from_caps) { + gint fps; + /* This is only an estimate but better than nothing */ + fps = vc1parse->seq_hdr.advanced.framerate; + if (fps != 0 && (vc1parse->fps_d == 0 || + gst_util_fraction_compare (fps, 1, vc1parse->fps_n, + vc1parse->fps_d) != 0)) { + vc1parse->update_caps = TRUE; + vc1parse->fps_n = fps; + vc1parse->fps_d = 1; + } + } + + if (vc1parse->seq_hdr.advanced.display_ext) { + /* Only update PAR if not from input caps */ + if (!vc1parse->par_from_caps + && vc1parse->seq_hdr.advanced.aspect_ratio_flag) { + gint par_n, par_d; + if (vc1parse->seq_hdr.advanced.aspect_ratio == 15) { + par_n = vc1parse->seq_hdr.advanced.aspect_horiz_size; + par_d = vc1parse->seq_hdr.advanced.aspect_vert_size; + } else { + par_n = aspect_ratios[vc1parse->seq_hdr.advanced.aspect_ratio].par_n; + par_d = aspect_ratios[vc1parse->seq_hdr.advanced.aspect_ratio].par_d; + } + + if (par_n != 0 && par_d != 0 && + (vc1parse->par_d == 0 + || gst_util_fraction_compare (par_n, par_d, + vc1parse->par_n, vc1parse->par_d) != 0)) { + vc1parse->update_caps = TRUE; + vc1parse->par_n = par_n; + vc1parse->par_d = par_d; + } + } + + /* Only update fps if not from caps, better value than above */ + if (!vc1parse->fps_from_caps && vc1parse->seq_hdr.advanced.framerate_flag) { + gint fps_n = 0, fps_d = 0; + if (!vc1parse->seq_hdr.advanced.framerateind) { + if (vc1parse->seq_hdr.advanced.frameratenr > 0 + && vc1parse->seq_hdr.advanced.frameratenr < 8 + && vc1parse->seq_hdr.advanced.frameratedr > 0 + && vc1parse->seq_hdr.advanced.frameratedr < 3) { + fps_n = framerates_n[vc1parse->seq_hdr.advanced.frameratenr]; + fps_d = framerates_d[vc1parse->seq_hdr.advanced.frameratedr]; + } + } else { + fps_n = vc1parse->seq_hdr.advanced.framerateexp + 1; + fps_d = 32; + } + + if (fps_n != 0 && fps_d != 0 && + (vc1parse->fps_d == 0 + || gst_util_fraction_compare (fps_n, fps_d, + vc1parse->fps_n, vc1parse->fps_d) != 0)) { + vc1parse->update_caps = TRUE; + vc1parse->fps_n = fps_n; + vc1parse->fps_d = fps_d; + } + } + } + } + + return TRUE; +} + +static gboolean +gst_vc1_parse_handle_seq_layer (GstVC1Parse * vc1parse, + GstBuffer * buf, guint offset, guint size) +{ + GstVC1ParserResult pres; + GstVC1Profile profile; + GstVC1Level level; + gint width, height; + g_assert (GST_BUFFER_SIZE (buf) >= offset + size); + gst_buffer_replace (&vc1parse->seq_layer_buffer, NULL); + memset (&vc1parse->seq_layer, 0, sizeof (vc1parse->seq_layer)); + pres = + gst_vc1_parse_sequence_layer (GST_BUFFER_DATA (buf) + offset, + size, &vc1parse->seq_layer); + if (pres != GST_VC1_PARSER_OK) { + GST_ERROR_OBJECT (vc1parse, "Invalid VC1 sequence layer"); + return FALSE; + } + vc1parse->seq_layer_buffer = gst_buffer_create_sub (buf, offset, size); + profile = vc1parse->seq_layer.struct_c.profile; + if (vc1parse->profile != profile) { + vc1parse->update_caps = TRUE; + vc1parse->profile = vc1parse->seq_layer.struct_c.profile; + } + + width = vc1parse->seq_layer.struct_a.vert_size; + height = vc1parse->seq_layer.struct_a.horiz_size; + if (vc1parse->width != width || vc1parse->height != height) { + vc1parse->update_caps = TRUE; + vc1parse->width = width; + vc1parse->height = height; + } + + level = vc1parse->seq_layer.struct_b.level; + if (vc1parse->level != level) { + vc1parse->update_caps = TRUE; + vc1parse->level = level; + } + + if (!vc1parse->fps_from_caps) { + gint fps; + fps = vc1parse->seq_layer.struct_c.framerate; + if (fps == 0 || fps == -1) + fps = vc1parse->seq_layer.struct_b.framerate; + if (fps != 0 && fps != -1 && (vc1parse->fps_d == 0 || + gst_util_fraction_compare (fps, 1, vc1parse->fps_n, + vc1parse->fps_d) != 0)) { + vc1parse->update_caps = TRUE; + vc1parse->fps_n = fps; + vc1parse->fps_d = 1; + } + } + + /* And now update the duration */ + if (vc1parse->seq_layer.numframes != 0 && vc1parse->seq_layer.numframes != -1) + gst_base_parse_set_duration (GST_BASE_PARSE (vc1parse), + GST_FORMAT_DEFAULT, vc1parse->seq_layer.numframes, 50); + return TRUE; +} + +static gboolean +gst_vc1_parse_handle_entrypoint (GstVC1Parse * vc1parse, + GstBuffer * buf, guint offset, guint size) +{ + g_assert (GST_BUFFER_SIZE (buf) >= offset + size); + gst_buffer_replace (&vc1parse->entrypoint_buffer, NULL); + vc1parse->entrypoint_buffer = gst_buffer_create_sub (buf, offset, size); + return TRUE; +} + +static void +gst_vc1_parse_update_stream_format_properties (GstVC1Parse * vc1parse) +{ + if (vc1parse->input_stream_format == VC1_STREAM_FORMAT_BDU + || vc1parse->input_stream_format == VC1_STREAM_FORMAT_BDU_FRAME) { + /* Need at least the 4 bytes start code */ + gst_base_parse_set_min_frame_size (GST_BASE_PARSE (vc1parse), 4); + gst_base_parse_set_syncable (GST_BASE_PARSE (vc1parse), TRUE); + } else + if (vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU_FRAME) { + /* Need at least the 36 bytes sequence layer */ + gst_base_parse_set_min_frame_size (GST_BASE_PARSE (vc1parse), 36); + gst_base_parse_set_syncable (GST_BASE_PARSE (vc1parse), TRUE); + } else + if (vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_RAW_FRAME + || vc1parse->input_stream_format == + VC1_STREAM_FORMAT_SEQUENCE_LAYER_FRAME_LAYER) { + /* Need at least the 36 bytes sequence layer */ + gst_base_parse_set_min_frame_size (GST_BASE_PARSE (vc1parse), 36); + gst_base_parse_set_syncable (GST_BASE_PARSE (vc1parse), FALSE); + } else if (vc1parse->input_stream_format == VC1_STREAM_FORMAT_ASF) { + vc1parse->input_stream_format = VC1_STREAM_FORMAT_ASF; + /* Need something, assume this is already packetized */ + gst_base_parse_set_min_frame_size (GST_BASE_PARSE (vc1parse), 1); + gst_base_parse_set_syncable (GST_BASE_PARSE (vc1parse), FALSE); + } else if (vc1parse->input_stream_format == VC1_STREAM_FORMAT_FRAME_LAYER) { + /* Need at least the frame layer header */ + gst_base_parse_set_min_frame_size (GST_BASE_PARSE (vc1parse), 8); + gst_base_parse_set_syncable (GST_BASE_PARSE (vc1parse), FALSE); + } else { + g_assert_not_reached (); + } +} + +static gboolean +gst_vc1_parse_set_caps (GstBaseParse * parse, GstCaps * caps) +{ + GstVC1Parse *vc1parse = GST_VC1_PARSE (parse); + GstStructure *s; + const GValue *value; + GstBuffer *codec_data = NULL; + const gchar *stream_format = NULL; + const gchar *header_format = NULL; + const gchar *profile = NULL; + GST_DEBUG_OBJECT (parse, "caps %" GST_PTR_FORMAT, caps); + /* Parse the caps to get as much information as possible */ + s = gst_caps_get_structure (caps, 0); + vc1parse->width = 0; + gst_structure_get_int (s, "width", &vc1parse->width); + vc1parse->height = 0; + gst_structure_get_int (s, "height", &vc1parse->height); + vc1parse->fps_n = vc1parse->fps_d = 0; + vc1parse->fps_from_caps = FALSE; + gst_structure_get_fraction (s, "framerate", &vc1parse->fps_n, + &vc1parse->fps_d); + if (vc1parse->fps_d != 0) + vc1parse->fps_from_caps = TRUE; + gst_structure_get_fraction (s, "pixel-aspect-ratio", + &vc1parse->par_n, &vc1parse->par_d); + if (vc1parse->par_n != 0 && vc1parse->par_d != 0) + vc1parse->par_from_caps = TRUE; + vc1parse->fourcc = 0; + gst_structure_get_fourcc (s, "format", &vc1parse->fourcc); + vc1parse->profile = -1; + profile = gst_structure_get_string (s, "profile"); + if (profile && strcmp (profile, "simple")) + vc1parse->profile = GST_VC1_PROFILE_SIMPLE; + else if (profile && strcmp (profile, "main")) + vc1parse->profile = GST_VC1_PROFILE_MAIN; + else if (profile && strcmp (profile, "advanced")) + vc1parse->profile = GST_VC1_PROFILE_ADVANCED; + else if (vc1parse->fourcc == GST_MAKE_FOURCC ('W', 'V', 'C', '1')) + vc1parse->profile = GST_VC1_PROFILE_ADVANCED; + else if (vc1parse->fourcc == GST_MAKE_FOURCC ('W', 'M', 'V', '3')) + vc1parse->profile = GST_VC1_PROFILE_MAIN; /* or SIMPLE */ + vc1parse->level = -1; + vc1parse->detecting_stream_format = FALSE; + header_format = gst_structure_get_string (s, "header-format"); + stream_format = gst_structure_get_string (s, "stream-format"); + /* Now parse the codec_data */ + gst_buffer_replace (&vc1parse->seq_layer_buffer, NULL); + gst_buffer_replace (&vc1parse->seq_hdr_buffer, NULL); + gst_buffer_replace (&vc1parse->entrypoint_buffer, NULL); + memset (&vc1parse->seq_layer, 0, sizeof (vc1parse->seq_layer)); + memset (&vc1parse->seq_hdr, 0, sizeof (vc1parse->seq_hdr)); + value = gst_structure_get_value (s, "codec_data"); + if (value != NULL) { + codec_data = gst_value_get_buffer (value); + if ((GST_BUFFER_SIZE (codec_data) == 4 || + GST_BUFFER_SIZE (codec_data) == 5)) { + /* ASF, VC1/WMV3 simple/main profile + * This is the sequence header without start codes + */ + if (!gst_vc1_parse_handle_seq_hdr (vc1parse, codec_data, + 0, GST_BUFFER_SIZE (codec_data))) + return FALSE; + if (header_format && strcmp (header_format, "asf") != 0) + GST_WARNING_OBJECT (vc1parse, + "Upstream claimed '%s' header format but 'asf' detected", + header_format); + vc1parse->input_header_format = VC1_HEADER_FORMAT_ASF; + } else + if (GST_BUFFER_SIZE (codec_data) == 36 && + GST_BUFFER_DATA (codec_data)[3] == 0xc5) { + /* Sequence Layer, SMPTE S421M-2006 Annex L.3 */ + if (!gst_vc1_parse_handle_seq_layer (vc1parse, codec_data, 0, + GST_BUFFER_SIZE (codec_data))) { + GST_ERROR_OBJECT (vc1parse, "Invalid VC1 sequence layer"); + return FALSE; + } + + if (header_format && strcmp (header_format, "sequence-layer") != 0) + GST_WARNING_OBJECT (vc1parse, + "Upstream claimed '%s' header format but 'sequence-layer' detected", + header_format); + vc1parse->input_header_format = VC1_HEADER_FORMAT_SEQUENCE_LAYER; + } else { + guint32 start_code; + /* ASF, VC1 advanced profile + * This should be the + * 1) ASF binding byte + * 2) Sequence Header with startcode + * 3) EntryPoint Header with startcode + */ + if (GST_BUFFER_SIZE (codec_data) < 1 + 4 + 4 + 4 + 2) { + GST_ERROR_OBJECT (vc1parse, + "Too small for VC1 advanced profile ASF header"); + return FALSE; + } + + /* Some sanity checking */ + if ((GST_BUFFER_DATA (codec_data)[0] & 0x01) != 0x01) { + GST_ERROR_OBJECT (vc1parse, + "Invalid binding byte for VC1 advanced profile ASF header"); + return FALSE; + } + + start_code = GST_READ_UINT32_BE (GST_BUFFER_DATA (codec_data) + 1); + if (start_code != 0x000010f) { + GST_ERROR_OBJECT (vc1parse, + "VC1 advanced profile ASF header does not start with SequenceHeader startcode"); + return FALSE; + } + + if (!gst_vc1_parse_handle_bdus (vc1parse, codec_data, 1, + GST_BUFFER_SIZE (codec_data) - 1)) + return FALSE; + + if (!vc1parse->seq_hdr_buffer || !vc1parse->entrypoint_buffer) { + GST_ERROR_OBJECT (vc1parse, + "Need sequence header and entrypoint header in the codec_data"); + return FALSE; + } + + if (header_format && strcmp (header_format, "asf") != 0) + GST_WARNING_OBJECT (vc1parse, + "Upstream claimed '%s' header format but 'asf' detected", + header_format); + + vc1parse->input_header_format = VC1_HEADER_FORMAT_ASF; + } + } else { + vc1parse->input_header_format = VC1_HEADER_FORMAT_NONE; + if (header_format && strcmp (header_format, "sequence-layer") != 0) + vc1parse->input_header_format = VC1_HEADER_FORMAT_SEQUENCE_LAYER; + else if (header_format && strcmp (header_format, "none") != 0) + GST_WARNING_OBJECT (vc1parse, + "Upstream claimed '%s' header format but 'none' detected", + header_format); + } + + /* If no stream-format was set we try to detect it */ + if (!stream_format) { + vc1parse->detecting_stream_format = TRUE; + } else { + vc1parse->input_stream_format = stream_format_from_string (stream_format); + gst_vc1_parse_update_stream_format_properties (vc1parse); + } + + vc1parse->renegotiate = TRUE; + vc1parse->update_caps = TRUE; + return TRUE; +} diff --git a/gst/videoparsers/gstvc1parse.h b/gst/videoparsers/gstvc1parse.h new file mode 100644 index 0000000..c1abe6e --- /dev/null +++ b/gst/videoparsers/gstvc1parse.h @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2011, Hewlett-Packard Development Company, L.P. + * Author: Sebastian Dröge <sebastian.droege@collabora.co.uk>, Collabora Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __GST_VC1_PARSE_H__ +#define __GST_VC1_PARSE_H__ + +#include <gst/gst.h> +#include <gst/base/gstbaseparse.h> +#include <gst/codecparsers/gstvc1parser.h> + +G_BEGIN_DECLS + +#define GST_TYPE_VC1_PARSE \ + (gst_vc1_parse_get_type()) +#define GST_VC1_PARSE(obj) \ + (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_VC1_PARSE,GstVC1Parse)) +#define GST_VC1_PARSE_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_VC1_PARSE,GstVC1ParseClass)) +#define GST_IS_VC1_PARSE(obj) \ + (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_VC1_PARSE)) +#define GST_IS_VC1_PARSE_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_VC1_PARSE)) + +typedef enum { + VC1_HEADER_FORMAT_NONE = 0, + VC1_HEADER_FORMAT_ASF, + VC1_HEADER_FORMAT_SEQUENCE_LAYER +} VC1HeaderFormat; + +typedef enum { + VC1_STREAM_FORMAT_BDU = 0, + VC1_STREAM_FORMAT_BDU_FRAME, + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU, + VC1_STREAM_FORMAT_SEQUENCE_LAYER_BDU_FRAME, + VC1_STREAM_FORMAT_SEQUENCE_LAYER_RAW_FRAME, + VC1_STREAM_FORMAT_SEQUENCE_LAYER_FRAME_LAYER, + VC1_STREAM_FORMAT_ASF, + VC1_STREAM_FORMAT_FRAME_LAYER +} VC1StreamFormat; + +/* FIXME move into baseparse, or anything equivalent; + * see https://bugzilla.gnome.org/show_bug.cgi?id=650093 */ +#define GST_BASE_PARSE_FRAME_FLAG_PARSING 0x10000 + +typedef struct _GstVC1Parse GstVC1Parse; +typedef struct _GstVC1ParseClass GstVC1ParseClass; + +struct _GstVC1Parse +{ + GstBaseParse baseparse; + + /* Caps */ + GstVC1Profile profile; + GstVC1Level level; + guint32 fourcc; + gint width, height; + + gint fps_n, fps_d; + gboolean fps_from_caps; + GstClockTime frame_duration; + gint par_n, par_d; + gboolean par_from_caps; + + /* TRUE if we should negotiate with downstream */ + gboolean renegotiate; + /* TRUE if the srcpads should be updated */ + gboolean update_caps; + + VC1HeaderFormat input_header_format; + VC1HeaderFormat output_header_format; + VC1StreamFormat input_stream_format; + VC1StreamFormat output_stream_format; + gboolean detecting_stream_format; + + GstVC1SeqHdr seq_hdr; + GstBuffer *seq_hdr_buffer; + GstBuffer *entrypoint_buffer; + + GstVC1SeqLayer seq_layer; + GstBuffer *seq_layer_buffer; + + /* Metadata about the currently parsed frame, only + * valid if the GstBaseParseFrame has the + * GST_BASE_PARSE_FRAME_FLAG_PARSING flag */ + GstVC1StartCode startcode; +}; + +struct _GstVC1ParseClass +{ + GstBaseParseClass parent_class; +}; + +G_END_DECLS + +GType gst_vc1_parse_get_type (void); + +#endif /* __GST_VC1_PARSE_H__ */ diff --git a/gst/videoparsers/h264parse.c b/gst/videoparsers/h264parse.c index 0fac34e..a556d44 100644 --- a/gst/videoparsers/h264parse.c +++ b/gst/videoparsers/h264parse.c @@ -148,16 +148,14 @@ gst_nal_bs_read_se (GstNalBs * bs) /* end parser helper */ static void -gst_h264_params_store_nal (GstH264Params * params, GstBuffer ** store, gint id, - GstNalBs * bs) +gst_h264_params_store_nal (GstH264Params * params, GstBuffer ** store, + gint store_size, gint id, GstNalBs * bs) { const guint8 *data; GstBuffer *buf; guint size; - g_return_if_fail (MAX_SPS_COUNT == MAX_PPS_COUNT); - - if (id >= MAX_SPS_COUNT) { + if (id >= store_size) { GST_DEBUG_OBJECT (params->el, "unable to store nal, id out-of-range %d", id); return; @@ -207,12 +205,6 @@ gst_h264_params_get_pps (GstH264Params * params, guint8 pps_id, gboolean set) g_return_val_if_fail (params != NULL, NULL); - if (G_UNLIKELY (pps_id >= MAX_PPS_COUNT)) { - GST_WARNING_OBJECT (params->el, - "requested pps_id=%04x out of range", pps_id); - return NULL; - } - pps = ¶ms->pps_buffers[pps_id]; if (set) { if (pps->valid) { @@ -395,7 +387,8 @@ gst_h264_params_decode_sps (GstH264Params * params, GstNalBs * bs) if (G_UNLIKELY (sps == NULL)) return FALSE; - gst_h264_params_store_nal (params, params->sps_nals, sps_id, bs); + gst_h264_params_store_nal (params, params->sps_nals, MAX_SPS_COUNT, sps_id, + bs); /* could be redefined mid stream, arrange for clear state */ memset (sps, 0, sizeof (*sps)); @@ -557,10 +550,16 @@ gst_h264_params_decode_sps (GstH264Params * params, GstNalBs * bs) static gboolean gst_h264_params_decode_pps (GstH264Params * params, GstNalBs * bs) { - guint8 pps_id; + gint pps_id; GstH264ParamsPPS *pps = NULL; pps_id = gst_nal_bs_read_ue (bs); + if (G_UNLIKELY (pps_id >= MAX_PPS_COUNT)) { + GST_WARNING_OBJECT (params->el, + "requested pps_id=%04x out of range", pps_id); + return FALSE; + } + pps = gst_h264_params_get_pps (params, pps_id, FALSE); if (G_UNLIKELY (pps == NULL)) @@ -570,7 +569,8 @@ gst_h264_params_decode_pps (GstH264Params * params, GstNalBs * bs) pps->valid = TRUE; params->pps = pps; - gst_h264_params_store_nal (params, params->pps_nals, pps_id, bs); + gst_h264_params_store_nal (params, params->pps_nals, MAX_PPS_COUNT, pps_id, + bs); pps->sps_id = gst_nal_bs_read_ue (bs); GST_LOG_OBJECT (params->el, "pps %d referencing sps %d", pps_id, pps->sps_id); diff --git a/gst/videoparsers/h264parse.h b/gst/videoparsers/h264parse.h index 141564b..517c854 100644 --- a/gst/videoparsers/h264parse.h +++ b/gst/videoparsers/h264parse.h @@ -74,7 +74,7 @@ typedef struct _GstH264ParamsSPS GstH264ParamsSPS; typedef struct _GstH264ParamsPPS GstH264ParamsPPS; #define MAX_SPS_COUNT 32 -#define MAX_PPS_COUNT 32 +#define MAX_PPS_COUNT 256 /* SPS: sequential parameter sets */ struct _GstH264ParamsSPS diff --git a/gst/videoparsers/mpegvideoparse.c b/gst/videoparsers/mpegvideoparse.c new file mode 100644 index 0000000..5a90c2b --- /dev/null +++ b/gst/videoparsers/mpegvideoparse.c @@ -0,0 +1,285 @@ +/* GStreamer + * Copyright (C) <2007> Jan Schmidt <thaytan@mad.scientist.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "mpegvideoparse.h" + +#include <string.h> +#include <gst/base/gstbitreader.h> + +GST_DEBUG_CATEGORY_EXTERN (mpegv_parse_debug); +#define GST_CAT_DEFAULT mpegv_parse_debug + + +#define GET_BITS(b, num, bits) G_STMT_START { \ + if (!gst_bit_reader_get_bits_uint32(b, bits, num)) \ + goto failed; \ + GST_TRACE ("parsed %d bits: %d", num, *(bits)); \ +} G_STMT_END + +#define MARKER_BIT(b) G_STMT_START { \ + guint32 i; \ + GET_BITS(b, 1, &i); \ + if (i != 0x1) \ + goto failed; \ +} G_STMT_END + +static inline gboolean +find_start_code (GstBitReader * b) +{ + guint32 bits; + + /* 0 bits until byte aligned */ + while (b->bit != 0) { + GET_BITS (b, 1, &bits); + } + + /* 0 bytes until startcode */ + while (gst_bit_reader_peek_bits_uint32 (b, &bits, 32)) { + if (bits >> 8 == 0x1) { + return TRUE; + } else { + gst_bit_reader_skip (b, 8); + } + } + + return FALSE; + +failed: + return FALSE; +} + +static gboolean +gst_mpeg_video_params_parse_extension (MPEGVParams * params, GstBitReader * br) +{ + guint32 bits; + + /* double-check */ + GET_BITS (br, 32, &bits); + if (bits != 0x100 + MPEG_PACKET_EXTENSION) + goto failed; + + /* extension_start_code identifier */ + GET_BITS (br, 4, &bits); + if (bits != 0x1) { + /* not sequence_extension_id.. our caller got fooled by it's + * limited understanding of mpeg1/2 syntax.. let's just pretend + * and caller will skip fwd to next start code. + * + * Note: see http://www.omegacs.net/misc/glamdring/misc/mpeg2.parse + * with mpeg2 from DVD, I see us hit this a second time with + * sequence_display_extension_id (0x2) + */ + return TRUE; + } + + /* profile_and_level_indication */ + GET_BITS (br, 4, &bits); + params->profile = bits; + GET_BITS (br, 4, &bits); + params->level = bits; + + /* progressive_sequence */ + GET_BITS (br, 1, &bits); + params->progressive = bits; + + /* chroma_format */ + GET_BITS (br, 2, &bits); + + /* horizontal_size_extension */ + GET_BITS (br, 2, &bits); + params->width += (bits << 12); + /* vertical_size_extension */ + GET_BITS (br, 2, &bits); + params->height += (bits << 12); + + /* bit_rate_extension */ + GET_BITS (br, 12, &bits); + if (params->bitrate) + params->bitrate += (bits << 18) * 400; + /* marker_bit */ + MARKER_BIT (br); + /* vbv_buffer_size_extension */ + GET_BITS (br, 8, &bits); + /* low_delay */ + GET_BITS (br, 1, &bits); + + /* frame_rate_extension_n */ + GET_BITS (br, 2, &bits); + params->fps_n *= bits + 1; + /* frame_rate_extension_d */ + GET_BITS (br, 5, &bits); + params->fps_d *= bits + 1; + + return TRUE; + + /* ERRORS */ +failed: + { + GST_WARNING ("Failed to parse sequence extension"); + return FALSE; + } +} + +/* Set the Pixel Aspect Ratio in our hdr from a DAR code in the data */ +static void +set_par_from_dar (MPEGVParams * params, guint8 asr_code) +{ + /* Pixel_width = DAR_width * display_vertical_size */ + /* Pixel_height = DAR_height * display_horizontal_size */ + switch (asr_code) { + case 0x02: /* 3:4 DAR = 4:3 pixels */ + params->par_w = 4 * params->height; + params->par_h = 3 * params->width; + break; + case 0x03: /* 9:16 DAR */ + params->par_w = 16 * params->height; + params->par_h = 9 * params->width; + break; + case 0x04: /* 1:2.21 DAR */ + params->par_w = 221 * params->height; + params->par_h = 100 * params->width; + break; + case 0x01: /* Square pixels */ + params->par_w = params->par_h = 1; + break; + default: + GST_DEBUG ("unknown/invalid aspect_ratio_information %d", asr_code); + break; + } +} + +static void +set_fps_from_code (MPEGVParams * params, guint8 fps_code) +{ + const gint framerates[][2] = { + {30, 1}, {24000, 1001}, {24, 1}, {25, 1}, + {30000, 1001}, {30, 1}, {50, 1}, {60000, 1001}, + {60, 1}, {30, 1} + }; + + if (fps_code && fps_code < 10) { + params->fps_n = framerates[fps_code][0]; + params->fps_d = framerates[fps_code][1]; + } else { + GST_DEBUG ("unknown/invalid frame_rate_code %d", fps_code); + /* Force a valid framerate */ + /* FIXME or should this be kept unknown ?? */ + params->fps_n = 30000; + params->fps_d = 1001; + } +} + +static gboolean +gst_mpeg_video_params_parse_sequence (MPEGVParams * params, GstBitReader * br) +{ + guint32 bits; + + GET_BITS (br, 32, &bits); + if (bits != 0x100 + MPEG_PACKET_SEQUENCE) + goto failed; + + /* assume MPEG-1 till otherwise discovered */ + params->mpeg_version = 1; + + GET_BITS (br, 12, &bits); + params->width = bits; + GET_BITS (br, 12, &bits); + params->height = bits; + + GET_BITS (br, 4, &bits); + set_par_from_dar (params, bits); + GET_BITS (br, 4, &bits); + set_fps_from_code (params, bits); + + GET_BITS (br, 18, &bits); + if (bits == 0x3ffff) { + /* VBR stream */ + params->bitrate = 0; + } else { + /* Value in header is in units of 400 bps */ + params->bitrate *= 400; + } + + /* skip 1 + VBV buffer size */ + if (!gst_bit_reader_skip (br, 11)) + goto failed; + + /* constrained_parameters_flag */ + GET_BITS (br, 1, &bits); + + /* load_intra_quantiser_matrix */ + GET_BITS (br, 1, &bits); + if (bits) { + if (!gst_bit_reader_skip (br, 8 * 64)) + goto failed; + } + + /* load_non_intra_quantiser_matrix */ + GET_BITS (br, 1, &bits); + if (bits) { + if (!gst_bit_reader_skip (br, 8 * 64)) + goto failed; + } + + /* check for MPEG-2 sequence extension */ + while (find_start_code (br)) { + gst_bit_reader_peek_bits_uint32 (br, &bits, 32); + if (bits == 0x100 + MPEG_PACKET_EXTENSION) { + if (!gst_mpeg_video_params_parse_extension (params, br)) + goto failed; + params->mpeg_version = 2; + } + } + + /* dump some info */ + GST_LOG ("width x height: %d x %d", params->width, params->height); + GST_LOG ("fps: %d/%d", params->fps_n, params->fps_d); + GST_LOG ("par: %d/%d", params->par_w, params->par_h); + GST_LOG ("profile/level: %d/%d", params->profile, params->level); + GST_LOG ("bitrate/progressive: %d/%d", params->bitrate, params->progressive); + + return TRUE; + + /* ERRORS */ +failed: + { + GST_WARNING ("Failed to parse sequence header"); + /* clear out stuff */ + memset (params, 0, sizeof (*params)); + return FALSE; + } +} + +gboolean +gst_mpeg_video_params_parse_config (MPEGVParams * params, const guint8 * data, + guint size) +{ + GstBitReader br; + + if (size < 4) + return FALSE; + + gst_bit_reader_init (&br, data, size); + + return gst_mpeg_video_params_parse_sequence (params, &br); +} diff --git a/gst/videoparsers/mpegvideoparse.h b/gst/videoparsers/mpegvideoparse.h new file mode 100644 index 0000000..f0092b7 --- /dev/null +++ b/gst/videoparsers/mpegvideoparse.h @@ -0,0 +1,77 @@ +/* GStreamer + * Copyright (C) <2007> Jan Schmidt <thaytan@mad.scientist.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifndef __GST_MPEGVIDEO_PARAMS_H__ +#define __GST_MPEGVIDEO_PARAMS_H__ + +#include <gst/gst.h> + +G_BEGIN_DECLS + +/* Packet ID codes for different packet types we + * care about */ +#define MPEG_PACKET_PICTURE 0x00 +#define MPEG_PACKET_SLICE_MIN 0x01 +#define MPEG_PACKET_SLICE_MAX 0xaf +#define MPEG_PACKET_SEQUENCE 0xb3 +#define MPEG_PACKET_EXTENSION 0xb5 +#define MPEG_PACKET_SEQUENCE_END 0xb7 +#define MPEG_PACKET_GOP 0xb8 +#define MPEG_PACKET_NONE 0xff + +/* Extension codes we care about */ +#define MPEG_PACKET_EXT_SEQUENCE 0x01 +#define MPEG_PACKET_EXT_SEQUENCE_DISPLAY 0x02 +#define MPEG_PACKET_EXT_QUANT_MATRIX 0x03 + +/* Flags indicating what type of packets are in this block, some are mutually + * exclusive though - ie, sequence packs are accumulated separately. GOP & + * Picture may occur together or separately */ +#define MPEG_BLOCK_FLAG_SEQUENCE 0x01 +#define MPEG_BLOCK_FLAG_PICTURE 0x02 +#define MPEG_BLOCK_FLAG_GOP 0x04 + +#define MPEG_PICTURE_TYPE_I 0x01 +#define MPEG_PICTURE_TYPE_P 0x02 +#define MPEG_PICTURE_TYPE_B 0x03 +#define MPEG_PICTURE_TYPE_D 0x04 + +typedef struct _MPEGVParams MPEGVParams; + +struct _MPEGVParams +{ + gint mpeg_version; + + gint profile; + gint level; + + gint width, height; + gint par_w, par_h; + gint fps_n, fps_d; + + gint bitrate; + gboolean progressive; +}; + +GstFlowReturn gst_mpeg_video_params_parse_config (MPEGVParams * params, + const guint8 * data, guint size); + +G_END_DECLS + +#endif diff --git a/gst/videoparsers/plugin.c b/gst/videoparsers/plugin.c index cfd8a48..c7506da 100644 --- a/gst/videoparsers/plugin.c +++ b/gst/videoparsers/plugin.c @@ -25,6 +25,8 @@ #include "gsth263parse.h" #include "gsth264parse.h" #include "gstdiracparse.h" +#include "gstmpegvideoparse.h" +#include "gstvc1parse.h" static gboolean plugin_init (GstPlugin * plugin) @@ -34,9 +36,13 @@ plugin_init (GstPlugin * plugin) ret = gst_element_register (plugin, "h263parse", GST_RANK_NONE, GST_TYPE_H263_PARSE); ret = gst_element_register (plugin, "h264parse", - GST_RANK_NONE, GST_TYPE_H264_PARSE); + GST_RANK_PRIMARY + 1, GST_TYPE_H264_PARSE); ret = gst_element_register (plugin, "diracparse", GST_RANK_NONE, GST_TYPE_DIRAC_PARSE); + ret = gst_element_register (plugin, "mpegvideoparse", + GST_RANK_PRIMARY + 1, GST_TYPE_MPEGVIDEO_PARSE); + ret |= gst_element_register (plugin, "vc1parse", + GST_RANK_PRIMARY + 1, GST_TYPE_VC1_PARSE); return ret; } diff --git a/pkgconfig/gstreamer-plugins-bad-uninstalled.pc.in b/pkgconfig/gstreamer-plugins-bad-uninstalled.pc.in index 4b6d69d..6c9b950 100644 --- a/pkgconfig/gstreamer-plugins-bad-uninstalled.pc.in +++ b/pkgconfig/gstreamer-plugins-bad-uninstalled.pc.in @@ -1,6 +1,6 @@ prefix= exec_prefix= -libdir=${pcfiledir}/../gst-libs/gst/interfaces +libdir=${pcfiledir}/../gst-libs/gst includedir=${pcfiledir}/../gst-libs Name: GStreamer Bad Plugin libraries, Uninstalled @@ -8,6 +8,6 @@ Description: Currently includes the photography interface library, uninstalled Version: @VERSION@ Requires: gstreamer-@GST_MAJORMINOR@ gstreamer-base-@GST_MAJORMINOR@ -Libs: -L${libdir} ${libdir}/libgstphotography-@GST_MAJORMINOR@.la +Libs: -L${libdir}/video -L${libdir}/interfaces ${libdir}/libgstphotography-@GST_MAJORMINOR@.la Cflags: -I${includedir} diff --git a/sys/Makefile.am b/sys/Makefile.am index 8c161d0..d6f678f 100644 --- a/sys/Makefile.am +++ b/sys/Makefile.am @@ -95,13 +95,20 @@ else VDPAU_DIR= endif + if USE_SHM SHM_DIR=shm else SHM_DIR= endif -SUBDIRS = $(ACM_DIR) $(APPLE_MEDIA_DIR) $(DECKLINK_DIR) $(DIRECTDRAW_DIR) $(DIRECTSOUND_DIR) $(DVB_DIR) $(FBDEV_DIR) $(LINSYS_DIR) $(OSX_VIDEO_DIR) $(QT_DIR) $(SHM_DIR) $(VCD_DIR) $(VDPAU_DIR) $(WININET_DIR) +if USE_PVR +PVR_DIR=pvr2d +else +PVR_DIR= +endif + +SUBDIRS = $(ACM_DIR) $(APPLE_MEDIA_DIR) $(DECKLINK_DIR) $(DIRECTDRAW_DIR) $(DIRECTSOUND_DIR) $(DVB_DIR) $(FBDEV_DIR) $(LINSYS_DIR) $(OSX_VIDEO_DIR) $(QT_DIR) $(SHM_DIR) $(VCD_DIR) $(VDPAU_DIR) $(WININET_DIR) $(PVR_DIR) DIST_SUBDIRS = acmenc acmmp3dec applemedia decklink directdraw directsound dvb linsys fbdev dshowdecwrapper dshowsrcwrapper dshowvideosink \ osxvideo qtwrapper shm vcd vdpau wasapi wininet winks winscreencap diff --git a/sys/pvr2d/Makefile.am b/sys/pvr2d/Makefile.am new file mode 100644 index 0000000..b450710 --- /dev/null +++ b/sys/pvr2d/Makefile.am @@ -0,0 +1,22 @@ +plugin_LTLIBRARIES = libgstpvr.la + +libgstpvr_la_SOURCES = \ + gstpvr.c \ + gstpvrbufferpool.c \ + gstpvrvideosink.c + +libgstpvr_la_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_CFLAGS) $(PVR_CFLAGS) + +libgstpvr_la_LIBADD = $(GST_LIBS) $(GST_BASE_LIBS) \ + $(GST_PLUGINS_BASE_LIBS) $(X11_LIBS) -lgstvideo-$(GST_MAJORMINOR) \ + -lgstinterfaces-$(GST_MAJORMINOR) -lpvr2d -lpvrPVR2D_DRIWSEGL\ + $(PVR_LIBS) \ + $(LIBM) + +libgstpvr_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS) +libgstpvr_la_LIBTOOLFLAGS = --tag=disable-static + +noinst_HEADERS = \ + gstpvr.h \ + gstpvrbufferpool.h \ + gstpvrvideosink.h diff --git a/sys/pvr2d/gstpvr.c b/sys/pvr2d/gstpvr.c new file mode 100644 index 0000000..0112d1e --- /dev/null +++ b/sys/pvr2d/gstpvr.c @@ -0,0 +1,85 @@ +/* + * GStreamer + * Copyright (c) 2010, Texas Instruments Incorporated + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifdef HAVE_CONFIG_H +# include <config.h> +#endif + +#include "gstpvr.h" +#include "gstpvrvideosink.h" + +GST_DEBUG_CATEGORY (gst_debug_pvrvideosink); + +static gboolean +plugin_init (GstPlugin * plugin) +{ + GST_DEBUG_CATEGORY_INIT (gst_debug_pvrvideosink, "pvrvideosink", 0, + "pvrvideosink"); + + return gst_element_register (plugin, "pvrvideosink", GST_RANK_PRIMARY, + GST_TYPE_PVRVIDEOSINK); +} + +void * +gst_ducati_alloc_1d (gint sz) +{ + MemAllocBlock block = { + .pixelFormat = PIXEL_FMT_PAGE, + .dim.len = sz, + }; + return MemMgr_Alloc (&block, 1); +} + +void * +gst_ducati_alloc_2d (gint width, gint height, guint * sz) +{ + MemAllocBlock block[] = { { + .pixelFormat = PIXEL_FMT_8BIT, + .dim = {.area = { + .width = width, + .height = ALIGN2 (height, 1), + }}, + .stride = 4096}, { + .pixelFormat = PIXEL_FMT_16BIT, + .dim = {.area = { + .width = width, + .height = ALIGN2 (height, 1) / 2, + }}, + .stride = 4096} + }; + if (sz) { + *sz = (4096 * ALIGN2 (height, 1) * 3) / 2; + } + return MemMgr_Alloc (block, 2); +} + +/* PACKAGE: this is usually set by autotools depending on some _INIT macro + * in configure.ac and then written into and defined in config.h, but we can + * just set it ourselves here in case someone doesn't use autotools to + * compile this code. GST_PLUGIN_DEFINE needs PACKAGE to be defined. + */ +#ifndef PACKAGE +# define PACKAGE "ducati" +#endif + +GST_PLUGIN_DEFINE (GST_VERSION_MAJOR, + GST_VERSION_MINOR, + "pvr", + "Pvr2d based plugin", + plugin_init, VERSION, "LGPL", "GStreamer", "http://gstreamer.net/") diff --git a/sys/pvr2d/gstpvr.h b/sys/pvr2d/gstpvr.h new file mode 100644 index 0000000..d2c57af --- /dev/null +++ b/sys/pvr2d/gstpvr.h @@ -0,0 +1,42 @@ +/* + * GStreamer + * Copyright (c) 2010, Texas Instruments Incorporated + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __GST_DUCATI_H__ +#define __GST_DUCATI_H__ + +#include <stdint.h> +#include <string.h> + +#include <tiler.h> +#include <tilermem.h> +#include <memmgr.h> + +#include <gst/gst.h> + +G_BEGIN_DECLS + +/* align x to next highest multiple of 2^n */ +#define ALIGN2(x,n) (((x) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1)) + +void * gst_ducati_alloc_1d (gint sz); +void * gst_ducati_alloc_2d (gint width, gint height, guint * sz); + +G_END_DECLS + +#endif /* __GST_DUCATI_H__ */ diff --git a/sys/pvr2d/gstpvrbufferpool.c b/sys/pvr2d/gstpvrbufferpool.c new file mode 100644 index 0000000..f0eaa06 --- /dev/null +++ b/sys/pvr2d/gstpvrbufferpool.c @@ -0,0 +1,343 @@ +/* + * GStreamer + * Copyright (c) 2010, Texas Instruments Incorporated + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "gstpvrbufferpool.h" + +GST_DEBUG_CATEGORY_EXTERN (gst_debug_pvrvideosink); +#define GST_CAT_DEFAULT gst_debug_pvrvideosink + +/* + * GstDucatiBuffer + */ + +static GstBufferClass *buffer_parent_class; + +PVR2DMEMINFO * +gst_ducati_buffer_get_meminfo (GstDucatiBuffer * self) +{ + return self->src_mem; +} + + +static GstDucatiBuffer * +gst_ducati_buffer_new (GstPvrBufferPool * pool) +{ + PVR2DERROR pvr_error; + void *buf; + guint sz; + GstDucatiBuffer *self = (GstDucatiBuffer *) + gst_mini_object_new (GST_TYPE_DUCATIBUFFER); + + GST_LOG_OBJECT (pool->element, "creating buffer %p in pool %p", self, pool); + + self->pool = (GstPvrBufferPool *) + gst_mini_object_ref (GST_MINI_OBJECT (pool)); + + if (pool->strided) { + buf = gst_ducati_alloc_2d (pool->padded_width, pool->padded_height, &sz); + } else { + sz = pool->size; + buf = gst_ducati_alloc_1d (sz); + } + + GST_BUFFER_DATA (self) = buf; + GST_BUFFER_SIZE (self) = pool->size; + GST_LOG_OBJECT (pool->element, "width=%d, height=%d and size=%d", + pool->padded_width, pool->padded_height, pool->size); + + pvr_error = + PVR2DMemWrap (pool->pvr_context, GST_BUFFER_DATA (self), 0, pool->size, + NULL, &(self->src_mem)); + if (pvr_error != PVR2D_OK) { + GST_LOG_OBJECT (pool->element, "Failed to Wrap buffer memory" + "returned %d", pvr_error); + } else { + self->wrapped = TRUE; + } + + gst_buffer_set_caps (GST_BUFFER (self), pool->caps); + + return self; +} + +static GstDucatiBuffer * +gst_ducati_buffer_copy (GstDucatiBuffer * self) +{ + GstPvrBufferPool *pool = self->pool; + GstDucatiBuffer *copy; + + g_return_val_if_fail (self != NULL, NULL); + + GST_LOG_OBJECT (pool->element, "copy buffer %p", self); + + copy = gst_pvr_bufferpool_get (pool); + + if (!copy) + copy = gst_buffer_new_and_alloc (GST_BUFFER_SIZE (self)); + + memcpy (GST_BUFFER_DATA (copy), + GST_BUFFER_DATA (self), GST_BUFFER_SIZE (self)); + + gst_buffer_copy_metadata (GST_BUFFER (copy), + GST_BUFFER (self), GST_BUFFER_COPY_ALL); + + return copy; + +} + +static void +gst_ducati_buffer_finalize (GstDucatiBuffer * self) +{ + PVR2DERROR pvr_error; + GstPvrBufferPool *pool = self->pool; + gboolean resuscitated = FALSE; + + GST_LOG_OBJECT (pool->element, "finalizing buffer %p", self); + + GST_PVR_BUFFERPOOL_LOCK (pool); + g_queue_remove (pool->used_buffers, self); + if (pool->running) { + resuscitated = TRUE; + + g_queue_push_head (pool->free_buffers, self); + } else { + GST_LOG_OBJECT (pool->element, "the pool is shutting down"); + } + GST_PVR_BUFFERPOOL_UNLOCK (pool); + + if (resuscitated) { + GST_LOG_OBJECT (pool->element, "reviving buffer %p", self); + gst_buffer_ref (GST_BUFFER (self)); + GST_BUFFER_SIZE (self) = 0; + } + + if (!resuscitated) { + GST_LOG_OBJECT (pool->element, + "buffer %p (data %p, len %u) not recovered, freeing", + self, GST_BUFFER_DATA (self), GST_BUFFER_SIZE (self)); + + if (self->wrapped) { + pvr_error = PVR2DMemFree (pool->pvr_context, self->src_mem); + if (pvr_error != PVR2D_OK) { + GST_ERROR_OBJECT (pool->element, "Failed to Unwrap buffer memory" + "returned %d", pvr_error); + } + self->wrapped = FALSE; + } + MemMgr_Free ((void *) GST_BUFFER_DATA (self)); + GST_BUFFER_DATA (self) = NULL; + gst_mini_object_unref (GST_MINI_OBJECT (pool)); + GST_MINI_OBJECT_CLASS (buffer_parent_class)->finalize (GST_MINI_OBJECT + (self)); + } +} + +static void +gst_ducati_buffer_class_init (gpointer g_class, gpointer class_data) +{ + GstMiniObjectClass *mini_object_class = GST_MINI_OBJECT_CLASS (g_class); + + buffer_parent_class = g_type_class_peek_parent (g_class); + + mini_object_class->copy = (GstMiniObjectCopyFunction) + GST_DEBUG_FUNCPTR (gst_ducati_buffer_copy); + mini_object_class->finalize = (GstMiniObjectFinalizeFunction) + GST_DEBUG_FUNCPTR (gst_ducati_buffer_finalize); +} + +GType +gst_ducati_buffer_get_type (void) +{ + static GType type; + + if (G_UNLIKELY (type == 0)) { + static const GTypeInfo info = { + .class_size = sizeof (GstBufferClass), + .class_init = gst_ducati_buffer_class_init, + .instance_size = sizeof (GstDucatiBuffer), + }; + type = g_type_register_static (GST_TYPE_BUFFER, + "GstDucatiBufferPvrsink", &info, 0); + } + return type; +} + +/* + * GstDucatiBufferPool + */ + +static GstMiniObjectClass *bufferpool_parent_class = NULL; + +/** create new bufferpool + * @element : the element that owns this pool + * @caps: the caps to set on the buffer + * @num_buffers: the requested number of buffers in the pool + */ +GstPvrBufferPool * +gst_pvr_bufferpool_new (GstElement * element, GstCaps * caps, gint num_buffers, + gint size, PVR2DCONTEXTHANDLE pvr_context) +{ + GstPvrBufferPool *self = (GstPvrBufferPool *) + gst_mini_object_new (GST_TYPE_PVRBUFFERPOOL); + GstStructure *s; + + g_return_val_if_fail (caps != NULL, NULL); + s = gst_caps_get_structure (caps, 0); + + self->element = gst_object_ref (element); + gst_structure_get_int (s, "width", &self->padded_width); + gst_structure_get_int (s, "height", &self->padded_height); + if (strcmp (gst_structure_get_name (s), "video/x-raw-yuv-strided")) { + self->strided = FALSE; + } else { + self->strided = TRUE; + gst_structure_get_int (s, "rowstride", &self->rowstride); + } + self->caps = gst_caps_ref (caps); + self->size = size; + self->pvr_context = pvr_context; + + self->free_buffers = g_queue_new (); + self->used_buffers = g_queue_new (); + self->lock = g_mutex_new (); + self->running = TRUE; + + return self; +} + +static void +unwrap_buffer (gpointer buffer, gpointer user_data) +{ + PVR2DERROR pvr_error; + GstDucatiBuffer *buf = GST_DUCATIBUFFER (buffer); + GstPvrBufferPool *pool = (GstPvrBufferPool *) user_data; + + if (buf->wrapped) { + pvr_error = PVR2DMemFree (pool->pvr_context, buf->src_mem); + if (pvr_error != PVR2D_OK) { + GST_ERROR_OBJECT (pool->element, "Failed to Unwrap buffer memory" + "returned %d", pvr_error); + } + buf->wrapped = FALSE; + } +} + +void +gst_pvr_bufferpool_stop_running (GstPvrBufferPool * self, gboolean unwrap) +{ + gboolean empty = FALSE; + + g_return_if_fail (self); + + GST_PVR_BUFFERPOOL_LOCK (self); + self->running = FALSE; + GST_PVR_BUFFERPOOL_UNLOCK (self); + + GST_DEBUG_OBJECT (self->element, "free available buffers"); + + /* free all buffers on the freelist */ + while (!empty) { + GstDucatiBuffer *buf; + GST_PVR_BUFFERPOOL_LOCK (self); + buf = g_queue_pop_head (self->free_buffers); + GST_PVR_BUFFERPOOL_UNLOCK (self); + if (buf) + gst_buffer_unref (GST_BUFFER (buf)); + else + empty = TRUE; + } + + if (unwrap) + g_queue_foreach (self->used_buffers, unwrap_buffer, self); + + gst_mini_object_unref (GST_MINI_OBJECT (self)); +} + +/** get buffer from bufferpool, allocate new buffer if needed */ +GstDucatiBuffer * +gst_pvr_bufferpool_get (GstPvrBufferPool * self) +{ + GstDucatiBuffer *buf = NULL; + + g_return_val_if_fail (self, NULL); + + GST_PVR_BUFFERPOOL_LOCK (self); + if (self->running) { + /* re-use a buffer off the freelist if any are available + */ + buf = g_queue_pop_head (self->free_buffers); + if (!buf) + buf = gst_ducati_buffer_new (self); + g_queue_push_head (self->used_buffers, buf); + + GST_BUFFER_SIZE (buf) = self->size; + } + GST_PVR_BUFFERPOOL_UNLOCK (self); + + return buf; +} + +static void +gst_pvr_bufferpool_finalize (GstPvrBufferPool * self) +{ + GST_DEBUG_OBJECT (self->element, "destroy bufferpool"); + g_mutex_free (self->lock); + self->lock = NULL; + + g_queue_free (self->free_buffers); + self->free_buffers = NULL; + g_queue_free (self->used_buffers); + self->used_buffers = NULL; + + gst_caps_unref (self->caps); + self->caps = NULL; + gst_object_unref (self->element); + self->element = NULL; + + GST_MINI_OBJECT_CLASS (bufferpool_parent_class)->finalize (GST_MINI_OBJECT + (self)); +} + +static void +gst_pvr_bufferpool_class_init (gpointer g_class, gpointer class_data) +{ + GstMiniObjectClass *mini_object_class = GST_MINI_OBJECT_CLASS (g_class); + + bufferpool_parent_class = g_type_class_peek_parent (g_class); + + mini_object_class->finalize = (GstMiniObjectFinalizeFunction) + GST_DEBUG_FUNCPTR (gst_pvr_bufferpool_finalize); +} + +GType +gst_pvr_bufferpool_get_type (void) +{ + static GType type; + + if (G_UNLIKELY (type == 0)) { + static const GTypeInfo info = { + .class_size = sizeof (GstMiniObjectClass), + .class_init = gst_pvr_bufferpool_class_init, + .instance_size = sizeof (GstPvrBufferPool), + }; + type = g_type_register_static (GST_TYPE_MINI_OBJECT, + "GstPvrBufferPool", &info, 0); + } + return type; +} diff --git a/sys/pvr2d/gstpvrbufferpool.h b/sys/pvr2d/gstpvrbufferpool.h new file mode 100644 index 0000000..8c0f024 --- /dev/null +++ b/sys/pvr2d/gstpvrbufferpool.h @@ -0,0 +1,88 @@ +/* + * GStreamer + * Copyright (c) 2010, 2011 Texas Instruments Incorporated + * Copyright (c) 2011, Collabora Ltda + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __GSTPVRBUFFERPOOL_H__ +#define __GSTPVRBUFFERPOOL_H__ + +#include "gstpvr.h" +#include <pvr2d.h> + +G_BEGIN_DECLS + +GType gst_ducati_buffer_get_type (void); +#define GST_TYPE_DUCATIBUFFER (gst_ducati_buffer_get_type()) +#define GST_IS_DUCATIBUFFER(obj) \ + (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_DUCATIBUFFER)) +#define GST_DUCATIBUFFER(obj) \ + (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_DUCATIBUFFER, GstDucatiBuffer)) + +GType gst_pvr_bufferpool_get_type (void); +#define GST_TYPE_PVRBUFFERPOOL (gst_pvr_bufferpool_get_type()) +#define GST_IS_PVRBUFFERPOOL(obj) \ + (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_PVRBUFFERPOOL)) +#define GST_PVRBUFFERPOOL(obj) \ + (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_PVRBUFFERPOOL, \ + GstPvrBufferPool)) + +typedef struct _GstPvrBufferPool GstPvrBufferPool; +typedef struct _GstDucatiBuffer GstDucatiBuffer; + +struct _GstPvrBufferPool +{ + GstMiniObject parent; + + /* output (padded) size including any codec padding: */ + gint padded_width, padded_height; + gint size; + gboolean strided; + gint rowstride; + PVR2DCONTEXTHANDLE pvr_context; + + GstCaps *caps; + GMutex *lock; + gboolean running; /* with lock */ + GstElement *element; /* the element that owns us.. */ + GQueue *free_buffers; + GQueue *used_buffers; + guint buffer_count; +}; + +GstPvrBufferPool * gst_pvr_bufferpool_new (GstElement * element, + GstCaps * caps, gint num_buffers, gint size, + PVR2DCONTEXTHANDLE pvr_context); +void gst_pvr_bufferpool_stop_running (GstPvrBufferPool * pool, gboolean unwrap); +GstDucatiBuffer * gst_pvr_bufferpool_get (GstPvrBufferPool * self); + +#define GST_PVR_BUFFERPOOL_LOCK(self) g_mutex_lock ((self)->lock) +#define GST_PVR_BUFFERPOOL_UNLOCK(self) g_mutex_unlock ((self)->lock) + +struct _GstDucatiBuffer { + GstBuffer parent; + + GstPvrBufferPool *pool; /* buffer-pool that this buffer belongs to */ + PVR2DMEMINFO *src_mem; /* Memory wrapped by pvr */ + gboolean wrapped; +}; + +PVR2DMEMINFO * gst_ducati_buffer_get_meminfo (GstDucatiBuffer * self); + +G_END_DECLS + +#endif /* __GSTPVRBUFFERPOOL_H__ */ diff --git a/sys/pvr2d/gstpvrvideosink.c b/sys/pvr2d/gstpvrvideosink.c new file mode 100644 index 0000000..9f739a5 --- /dev/null +++ b/sys/pvr2d/gstpvrvideosink.c @@ -0,0 +1,1974 @@ +/* GStreamer + * + * Copyright (C) 2011 Collabora Ltda + * Copyright (C) 2011 Texas Instruments + * @author: Luciana Fujii Pontello <luciana.fujii@collabora.co.uk> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +/* Object header */ +#include "gstpvrvideosink.h" + +#include "gstpvrbufferpool.h" +#include <gst/video/gstvideosink.h> +#include <gst/interfaces/xoverlay.h> +#include <gst/interfaces/navigation.h> + +/* Debugging category */ +#include <gst/gstinfo.h> + +#define LINUX +#include <dri2_ws.h> +#include <services.h> +#include <img_defs.h> +#include <servicesext.h> + +#define DEFAULT_QUEUE_SIZE 12 +#define DEFAULT_MIN_QUEUED_BUFS 1 + +GST_DEBUG_CATEGORY_EXTERN (gst_debug_pvrvideosink); +#define GST_CAT_DEFAULT gst_debug_pvrvideosink + +#define PVR2DMEMINFO_INITIALISE(d, s) \ +{ \ + (d)->hPrivateData = (IMG_VOID *)(s); \ + (d)->hPrivateMapData = (IMG_VOID *)(s->hKernelMemInfo); \ + (d)->ui32DevAddr = (IMG_UINT32) (s)->sDevVAddr.uiAddr; \ + (d)->ui32MemSize = (s)->uAllocSize; \ + (d)->pBase = (s)->pvLinAddr;\ + (d)->ulFlags = (s)->ui32Flags;\ +} + +/* end of internal definitions */ + +static void gst_pvrvideosink_reset (GstPVRVideoSink * pvrvideosink); +static GstFlowReturn gst_pvrvideosink_buffer_alloc (GstBaseSink * bsink, + guint64 offset, guint size, GstCaps * caps, GstBuffer ** buf); +static void gst_pvrvideosink_xwindow_draw_borders (GstPVRVideoSink * + pvrvideosink, GstXWindow * xwindow, GstVideoRectangle rect); +static void gst_pvrvideosink_expose (GstXOverlay * overlay); +static void gst_pvrvideosink_xwindow_destroy (GstPVRVideoSink * pvrvideosink, + GstXWindow * xwindow); +static void gst_pvrvideosink_set_event_handling (GstXOverlay * overlay, + gboolean handle_events); + +static GstStaticPadTemplate gst_pvrvideosink_sink_template_factory = + GST_STATIC_PAD_TEMPLATE ("sink", + GST_PAD_SINK, + GST_PAD_ALWAYS, + GST_STATIC_CAPS ("video/x-raw-yuv, " + "format = (fourcc) NV12, " + "width = " GST_VIDEO_SIZE_RANGE ", " + "height = " GST_VIDEO_SIZE_RANGE ", " + "framerate = " GST_VIDEO_FPS_RANGE ";" + "video/x-raw-yuv-strided, " + "format = (fourcc) NV12, " + "rowstride = (int) 4096, " + "width = " GST_VIDEO_SIZE_RANGE ", " + "height = " GST_VIDEO_SIZE_RANGE ", " + "framerate = " GST_VIDEO_FPS_RANGE)); + +enum +{ + PROP_0, + PROP_FORCE_ASPECT_RATIO, + PROP_WINDOW_WIDTH, + PROP_WINDOW_HEIGHT +}; + +static GstVideoSinkClass *parent_class = NULL; + +/* ============================================================= */ +/* */ +/* Private Methods */ +/* */ +/* ============================================================= */ + +/* pvrvideo buffers */ + +#define GST_TYPE_PVRVIDEO_BUFFER (gst_pvrvideo_buffer_get_type()) + +#define GST_IS_PVRVIDEO_BUFFER(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_PVRVIDEO_BUFFER)) +#define GST_PVRVIDEO_BUFFER(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_PVRVIDEO_BUFFER, GstPVRVideoBuffer)) +#define GST_PVRVIDEO_BUFFER_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), GST_TYPE_PVRVIDEO_BUFFER, GstPVRVideoBufferClass)) + + +static const char * +pvr2dstrerr (PVR2DERROR err) +{ + switch (err) { + case PVR2D_OK: + return "Ok"; + case PVR2DERROR_DEVICE_UNAVAILABLE: + return "Failed to blit, device unavailable"; + case PVR2DERROR_INVALID_CONTEXT: + return "Failed to blit, invalid context"; + case PVR2DERROR_INVALID_PARAMETER: + return "Failed to blit, invalid parameter"; + case PVR2DERROR_HW_FEATURE_NOT_SUPPORTED: + return "Failed to blit, hardware feature not supported"; + case PVR2DERROR_GENERIC_ERROR: + return "Failed to blit, generic error"; + default: + return "Unknown error"; + } +} + +static const char * +wseglstrerr (WSEGLError err) +{ + switch (err) { + case WSEGL_SUCCESS: + return "Ok"; + case WSEGL_CANNOT_INITIALISE: + return "Cannot initialize"; + case WSEGL_BAD_NATIVE_DISPLAY: + return "Bad native display"; + case WSEGL_BAD_NATIVE_WINDOW: + return "Bad native window"; + case WSEGL_BAD_NATIVE_PIXMAP: + return "Bad native pixmap"; + case WSEGL_BAD_NATIVE_ENGINE: + return "Bad native engine"; + case WSEGL_BAD_DRAWABLE: + return "Bad drawable"; + case WSEGL_BAD_MATCH: + return "Bad match"; + case WSEGL_OUT_OF_MEMORY: + return "Out of memory"; + default: + return "Unknown error"; + } +} + +/* This function calculates the pixel aspect ratio based on the properties + * * in the xcontext structure and stores it there. */ +static void +gst_pvrvideosink_calculate_pixel_aspect_ratio (GstDrawContext * dcontext) +{ + static const gint par[][2] = { + {1, 1}, /* regular screen */ + {16, 15}, /* PAL TV */ + {11, 10}, /* 525 line Rec.601 video */ + {54, 59}, /* 625 line Rec.601 video */ + {64, 45}, /* 1280x1024 on 16:9 display */ + {5, 3}, /* 1280x1024 on 4:3 display */ + {4, 3} /* 800x600 on 16:9 display */ + }; + gint i; + gint index; + gdouble ratio; + gdouble delta; + +#define DELTA(idx) (ABS (ratio - ((gdouble) par[idx][0] / par[idx][1]))) + + /* first calculate the "real" ratio; which is the "physical" w/h divided + * by the w/h in pixels of the display */ + ratio = (gdouble) (dcontext->physical_width * dcontext->display_height) + / (dcontext->physical_height * dcontext->display_width); + + GST_DEBUG ("calculated pixel aspect ratio: %f", ratio); + /* now find the one from par[][2] with the lowest delta to the real one */ + delta = DELTA (0); + index = 0; + + for (i = 1; i < sizeof (par) / (sizeof (gint) * 2); ++i) { + gdouble this_delta = DELTA (i); + + if (this_delta < delta) { + index = i; + delta = this_delta; + } + } + + GST_DEBUG ("Decided on index %d (%d/%d)", index, + par[index][0], par[index][1]); + + g_free (dcontext->par); + dcontext->par = g_new0 (GValue, 1); + g_value_init (dcontext->par, GST_TYPE_FRACTION); + gst_value_set_fraction (dcontext->par, par[index][0], par[index][1]); + GST_DEBUG ("set dcontext PAR to %d/%d", + gst_value_get_fraction_numerator (dcontext->par), + gst_value_get_fraction_denominator (dcontext->par)); +} + +static void +pvr_recreate_drawable (GstPVRVideoSink * pvrvideosink) +{ + WSEGLError glerror; + GstDrawContext *dcontext = pvrvideosink->dcontext; + + if (dcontext->drawable_handle) { + glerror = + dcontext->wsegl_table-> + pfnWSEGL_DeleteDrawable (dcontext->drawable_handle); + if (glerror) { + GST_ELEMENT_ERROR (pvrvideosink, RESOURCE, FAILED, + ("error deleting drawable"), ("%s", wseglstrerr (glerror))); + return; + } + } + + glerror = + dcontext->wsegl_table-> + pfnWSEGL_CreateWindowDrawable (dcontext->display_handle, + dcontext->glconfig, &dcontext->drawable_handle, + (NativeWindowType) pvrvideosink->xwindow->window, &dcontext->rotation); + if (glerror) { + GST_ELEMENT_ERROR (pvrvideosink, RESOURCE, FAILED, + ("error creating drawable"), ("%s", wseglstrerr (glerror))); + } +} + +static void +pvr_get_drawable_params (GstPVRVideoSink * pvrvideosink) +{ + WSEGLError glerror; + WSEGLDrawableParams source_params; + PVRSRV_CLIENT_MEM_INFO *client_mem_info; + GstDrawContext *dcontext = pvrvideosink->dcontext; + + glerror = + dcontext->wsegl_table-> + pfnWSEGL_GetDrawableParameters (dcontext->drawable_handle, &source_params, + &pvrvideosink->render_params); + + if (glerror == WSEGL_BAD_DRAWABLE) { + /* this can happen if window size changes, window is redirected/ + * unredirected, etc.. when this happens, recreate drawable and + * try again. + */ + GST_DEBUG_OBJECT (pvrvideosink, "drawable changed, recreating"); + pvr_recreate_drawable (pvrvideosink); + pvr_get_drawable_params (pvrvideosink); + return; + } + + if (glerror) { + GST_ERROR_OBJECT (pvrvideosink, "%s", wseglstrerr (glerror)); + } + + client_mem_info = + (PVRSRV_CLIENT_MEM_INFO *) pvrvideosink->render_params.hPrivateData; + + PVR2DMEMINFO_INITIALISE (&dcontext->dst_mem, client_mem_info); +} + +static void +pvr_swap_buffers (GstPVRVideoSink * pvrvideosink) +{ + GstDrawContext *dcontext = pvrvideosink->dcontext; + + dcontext->wsegl_table->pfnWSEGL_SwapDrawable (dcontext->drawable_handle, 1); + + pvr_get_drawable_params (pvrvideosink); +} + +/* wait for previous blits to dst surface to complete */ +static void +pvr_query_blits_complete (GstPVRVideoSink * pvrvideosink) +{ + GstDrawContext *dcontext = pvrvideosink->dcontext; + PVR2DERROR pvr_error; + + pvr_error = PVR2DQueryBlitsComplete (dcontext->pvr_context, + &dcontext->dst_mem, TRUE); + + if (pvr_error) { + GST_ERROR_OBJECT (pvrvideosink, "%s (%d)", + pvr2dstrerr (pvr_error), pvr_error); + } +} + +static void +gst_pvrvideosink_xwindow_update_geometry (GstPVRVideoSink * pvrvideosink) +{ + XWindowAttributes attr; + + /* Update the window geometry */ + g_mutex_lock (pvrvideosink->dcontext->x_lock); + if (G_UNLIKELY (pvrvideosink->xwindow == NULL)) { + g_mutex_unlock (pvrvideosink->dcontext->x_lock); + return; + } + pvrvideosink->redraw_borders = 2; + + XGetWindowAttributes (pvrvideosink->dcontext->x_display, + pvrvideosink->xwindow->window, &attr); + + pvrvideosink->xwindow->width = attr.width; + pvrvideosink->xwindow->height = attr.height; + + if (!pvrvideosink->have_render_rect) { + pvrvideosink->render_rect.x = pvrvideosink->render_rect.y = 0; + pvrvideosink->render_rect.w = attr.width; + pvrvideosink->render_rect.h = attr.height; + } + + if (pvrvideosink->dcontext != NULL) { + pvr_recreate_drawable (pvrvideosink); + pvr_get_drawable_params (pvrvideosink); + } + + g_mutex_unlock (pvrvideosink->dcontext->x_lock); +} + +/* This function handles XEvents that might be in the queue. It generates + GstEvent that will be sent upstream in the pipeline to handle interactivity + and navigation. It will also listen for configure events on the window to + trigger caps renegotiation so on the fly software scaling can work. */ +static void +gst_pvrvideosink_handle_xevents (GstPVRVideoSink * pvrvideosink) +{ + Display *dpy = pvrvideosink->dcontext->x_display; + Window win = pvrvideosink->xwindow->window; + XEvent e; + gboolean exposed = FALSE; + gboolean configured = FALSE; + guint pointer_x = 0, pointer_y = 0; + gboolean pointer_moved = FALSE; + + g_mutex_lock (pvrvideosink->flow_lock); + g_mutex_lock (pvrvideosink->dcontext->x_lock); + + /* First get all pointer motion events, only the last position is + * interesting so throw out the earlier ones: + */ + while (XCheckWindowEvent (dpy, win, PointerMotionMask, &e)) { + switch (e.type) { + case MotionNotify: + pointer_x = e.xmotion.x; + pointer_y = e.xmotion.y; + pointer_moved = TRUE; + break; + default: + break; + } + } + + if (pointer_moved) { + GST_DEBUG_OBJECT (pvrvideosink, + "pointer moved over window at %d,%d", pointer_x, pointer_y); + g_mutex_unlock (pvrvideosink->dcontext->x_lock); + gst_navigation_send_mouse_event (GST_NAVIGATION (pvrvideosink), + "mouse-move", 0, e.xbutton.x, e.xbutton.y); + g_mutex_lock (pvrvideosink->dcontext->x_lock); + } + + /* Then handle all the other events: */ + while (XCheckWindowEvent (pvrvideosink->dcontext->x_display, + pvrvideosink->xwindow->window, + ExposureMask | StructureNotifyMask | + KeyPressMask | KeyReleaseMask | + ButtonPressMask | ButtonReleaseMask, &e)) { + KeySym keysym; + const char *key_str = NULL; + + g_mutex_unlock (pvrvideosink->dcontext->x_lock); + + switch (e.type) { + case Expose: + exposed = TRUE; + break; + case ConfigureNotify: + gst_pvrvideosink_xwindow_update_geometry (pvrvideosink); + configured = TRUE; + break; + case ButtonPress: + GST_DEBUG_OBJECT (pvrvideosink, + "button %d pressed over window at %d,%d", + e.xbutton.button, e.xbutton.x, e.xbutton.y); + gst_navigation_send_mouse_event (GST_NAVIGATION (pvrvideosink), + "mouse-button-press", e.xbutton.button, e.xbutton.x, e.xbutton.y); + break; + case ButtonRelease: + GST_DEBUG_OBJECT (pvrvideosink, + "button %d released over window at %d,%d", e.xbutton.button, + e.xbutton.x, e.xbutton.y); + gst_navigation_send_mouse_event (GST_NAVIGATION (pvrvideosink), + "mouse-button-release", e.xbutton.button, e.xbutton.x, e.xbutton.y); + break; + case KeyPress: + case KeyRelease: + g_mutex_lock (pvrvideosink->dcontext->x_lock); + keysym = XKeycodeToKeysym (dpy, e.xkey.keycode, 0); + if (keysym != NoSymbol) { + key_str = XKeysymToString (keysym); + } else { + key_str = "unknown"; + } + g_mutex_unlock (pvrvideosink->dcontext->x_lock); + GST_DEBUG_OBJECT (pvrvideosink, + "key %d pressed over window at %d,%d (%s)", + e.xkey.keycode, e.xkey.x, e.xkey.y, key_str); + gst_navigation_send_key_event (GST_NAVIGATION (pvrvideosink), + e.type == KeyPress ? "key-press" : "key-release", key_str); + break; + default: + GST_DEBUG_OBJECT (pvrvideosink, "unhandled X event (%d)", e.type); + break; + } + + g_mutex_lock (pvrvideosink->dcontext->x_lock); + } + + if (exposed || configured) { + g_mutex_unlock (pvrvideosink->dcontext->x_lock); + g_mutex_unlock (pvrvideosink->flow_lock); + + gst_pvrvideosink_expose (GST_X_OVERLAY (pvrvideosink)); + + g_mutex_lock (pvrvideosink->flow_lock); + g_mutex_lock (pvrvideosink->dcontext->x_lock); + } + + /* Handle Display events */ + while (XPending (pvrvideosink->dcontext->x_display)) { + XNextEvent (pvrvideosink->dcontext->x_display, &e); + + switch (e.type) { + case ClientMessage:{ + Atom wm_delete; + + wm_delete = XInternAtom (pvrvideosink->dcontext->x_display, + "WM_DELETE_WINDOW", True); + if (wm_delete != None && wm_delete == (Atom) e.xclient.data.l[0]) { + /* Handle window deletion by posting an error on the bus */ + GST_ELEMENT_ERROR (pvrvideosink, RESOURCE, NOT_FOUND, + ("Output window was closed"), (NULL)); + + g_mutex_unlock (pvrvideosink->dcontext->x_lock); + gst_pvrvideosink_xwindow_destroy (pvrvideosink, + pvrvideosink->xwindow); + pvrvideosink->xwindow = NULL; + g_mutex_lock (pvrvideosink->dcontext->x_lock); + } + break; + } + default: + break; + } + } + + g_mutex_unlock (pvrvideosink->dcontext->x_lock); + g_mutex_unlock (pvrvideosink->flow_lock); +} + +static gpointer +gst_pvrvideosink_event_thread (GstPVRVideoSink * pvrvideosink) +{ + GST_OBJECT_LOCK (pvrvideosink); + while (pvrvideosink->running) { + GST_OBJECT_UNLOCK (pvrvideosink); + + if (pvrvideosink->xwindow) { + gst_pvrvideosink_handle_xevents (pvrvideosink); + } + g_usleep (G_USEC_PER_SEC / 20); + + GST_OBJECT_LOCK (pvrvideosink); + } + GST_OBJECT_UNLOCK (pvrvideosink); + + return NULL; +} + +static void +gst_pvrvideosink_manage_event_thread (GstPVRVideoSink * pvrvideosink) +{ + GThread *thread = NULL; + + /* don't start the thread too early */ + if (pvrvideosink->dcontext == NULL) { + return; + } + + GST_OBJECT_LOCK (pvrvideosink); + if (!pvrvideosink->event_thread) { + /* Setup our event listening thread */ + GST_DEBUG_OBJECT (pvrvideosink, "run xevent thread"); + pvrvideosink->running = TRUE; + pvrvideosink->event_thread = g_thread_create ( + (GThreadFunc) gst_pvrvideosink_event_thread, pvrvideosink, TRUE, NULL); + } + GST_OBJECT_UNLOCK (pvrvideosink); + + /* Wait for our event thread to finish */ + if (thread) + g_thread_join (thread); +} + + +static GstDrawContext * +gst_pvrvideosink_get_dcontext (GstPVRVideoSink * pvrvideosink) +{ + GstDrawContext *dcontext; + PVR2DERROR pvr_error; + gint refresh_rate; + DRI2WSDisplay *displayImpl; + WSEGLError glerror; + const WSEGLCaps *glcaps; + PVR2DMISCDISPLAYINFO misc_display_info; + + dcontext = g_new0 (GstDrawContext, 1); + dcontext->x_lock = g_mutex_new (); + + dcontext->x_display = XOpenDisplay (NULL); + + dcontext->wsegl_table = WSEGL_GetFunctionTablePointer (); + glerror = dcontext->wsegl_table->pfnWSEGL_IsDisplayValid ( + (NativeDisplayType) dcontext->x_display); + + if (glerror) { + GST_ELEMENT_ERROR (pvrvideosink, RESOURCE, WRITE, + ("Display is not valid"), ("%s", wseglstrerr (glerror))); + return NULL; + } + + glerror = dcontext->wsegl_table->pfnWSEGL_InitialiseDisplay ( + (NativeDisplayType) dcontext->x_display, &dcontext->display_handle, + &glcaps, &dcontext->glconfig); + if (glerror) { + GST_ELEMENT_ERROR (pvrvideosink, RESOURCE, WRITE, + ("Failed to initialize display"), ("%s", wseglstrerr (glerror))); + return NULL; + } + + displayImpl = (DRI2WSDisplay *) dcontext->display_handle; + dcontext->pvr_context = displayImpl->hContext; + + pvr_error = PVR2DGetScreenMode (dcontext->pvr_context, + &dcontext->display_format, &dcontext->display_width, + &dcontext->display_height, &dcontext->stride, &refresh_rate); + if (pvr_error != PVR2D_OK) { + GST_ELEMENT_ERROR (pvrvideosink, RESOURCE, READ, + ("Failed to get screen mode"), ("returned %d", pvr_error)); + return NULL; + } + pvr_error = PVR2DGetMiscDisplayInfo (dcontext->pvr_context, + &misc_display_info); + if (pvr_error != PVR2D_OK) { + GST_ELEMENT_ERROR (pvrvideosink, RESOURCE, READ, + ("Failed to get display info"), ("returned %d", pvr_error)); + return NULL; + } + dcontext->physical_width = misc_display_info.ulPhysicalWidthmm; + dcontext->physical_height = misc_display_info.ulPhysicalHeightmm; + dcontext->screen_num = DefaultScreen (dcontext->x_display); + dcontext->black = XBlackPixel (dcontext->x_display, dcontext->screen_num); + gst_pvrvideosink_calculate_pixel_aspect_ratio (dcontext); + + return dcontext; +} + +static void +gst_pvrvideosink_xwindow_set_title (GstPVRVideoSink * pvrvideosink, + GstXWindow * xwindow, const gchar * media_title) +{ + if (media_title) { + g_free (pvrvideosink->media_title); + pvrvideosink->media_title = g_strdup (media_title); + } + if (xwindow) { + /* we have a window */ + if (xwindow->internal) { + XTextProperty xproperty; + const gchar *app_name; + const gchar *title = NULL; + gchar *title_mem = NULL; + + /* set application name as a title */ + app_name = g_get_application_name (); + + if (app_name && pvrvideosink->media_title) { + title = title_mem = g_strconcat (pvrvideosink->media_title, " : ", + app_name, NULL); + } else if (app_name) { + title = app_name; + } else if (pvrvideosink->media_title) { + title = pvrvideosink->media_title; + } + + if (title) { + if ((XStringListToTextProperty (((char **) &title), 1, + &xproperty)) != 0) { + XSetWMName (pvrvideosink->dcontext->x_display, xwindow->window, + &xproperty); + XFree (xproperty.value); + } + + g_free (title_mem); + } + } + } +} + +static GstXWindow * +gst_pvrvideosink_create_window (GstPVRVideoSink * pvrvideosink, gint width, + gint height) +{ + Window root; + GstXWindow *xwindow; + GstDrawContext *dcontext; + XGCValues values; + Atom wm_delete; + + GST_DEBUG_OBJECT (pvrvideosink, "begin"); + + dcontext = pvrvideosink->dcontext; + xwindow = g_new0 (GstXWindow, 1); + + xwindow->internal = TRUE; + + g_mutex_lock (dcontext->x_lock); + + root = DefaultRootWindow (dcontext->x_display); + xwindow->window = XCreateSimpleWindow (dcontext->x_display, root, 0, 0, + width, height, 2, 2, dcontext->black); + + /* Tell the window manager we'd like delete client messages instead of + * being killed */ + wm_delete = XInternAtom (dcontext->x_display, "WM_DELETE_WINDOW", True); + if (wm_delete != None) { + (void) XSetWMProtocols (dcontext->x_display, xwindow->window, + &wm_delete, 1); + } + + XMapWindow (dcontext->x_display, xwindow->window); + + /* We have to do that to prevent X from redrawing the background on + * ConfigureNotify. This takes away flickering of video when resizing. */ + XSetWindowBackgroundPixmap (dcontext->x_display, xwindow->window, None); + + gst_pvrvideosink_xwindow_set_title (pvrvideosink, xwindow, NULL); + + xwindow->gc = XCreateGC (dcontext->x_display, xwindow->window, 0, &values); + + g_mutex_unlock (dcontext->x_lock); + + gst_pvrvideosink_xwindow_update_geometry (pvrvideosink); + + GST_DEBUG_OBJECT (pvrvideosink, "end"); + return xwindow; +} + +static void +gst_pvrvideosink_blit (GstPVRVideoSink * pvrvideosink, GstBuffer * buffer) +{ + PVR2DERROR pvr_error = PVR2D_OK; + GstDrawContext *dcontext = pvrvideosink->dcontext; + gint video_width; + gint video_height; + gboolean draw_border = FALSE; + PVR2D_3DBLT_EXT s_blt_3d = { }; + PPVR2D_3DBLT_EXT p_blt_3d; + PVR2DMEMINFO *src_mem; + PVR2DFORMAT pvr_format = pvrvideosink->format == GST_VIDEO_FORMAT_NV12 ? + PVR2D_YUV420_2PLANE : PVR2D_ARGB8888; + PVR2DRECT *crop = &pvrvideosink->crop; + GstVideoRectangle result; + + GST_DEBUG_OBJECT (pvrvideosink, "begin"); + + g_mutex_lock (pvrvideosink->flow_lock); + if (buffer == NULL) + buffer = pvrvideosink->current_buffer; + + if (buffer == NULL) { + g_mutex_unlock (pvrvideosink->flow_lock); + return; + } + + video_width = pvrvideosink->video_width; + video_height = pvrvideosink->video_height; + + src_mem = gst_ducati_buffer_get_meminfo ((GstDucatiBuffer *) buffer); + p_blt_3d = &s_blt_3d; + + g_mutex_lock (dcontext->x_lock); + + /* Draw borders when displaying the first frame. After this + draw borders only on expose event or after a size change. */ + if (!(pvrvideosink->current_buffer) || pvrvideosink->redraw_borders) { + draw_border = TRUE; + } + + if (!pvrvideosink->xwindow) { + goto done; + } + + /* Sometimes the application hasn't really given us valid dimensions + * when we want to render the first frame, which throws pvr into a + * tizzy, so let's just detect it and bail early: + */ + if ((pvrvideosink->xwindow->width <= 1) || + (pvrvideosink->xwindow->height <= 1)) { + GST_DEBUG_OBJECT (pvrvideosink, "skipping render due to invalid " + "window dimensions: %dx%d", pvrvideosink->xwindow->width, + pvrvideosink->xwindow->height); + goto done; + } + + /* Store a reference to the last image we put, lose the previous one */ + if (buffer && pvrvideosink->current_buffer != buffer) { + if (pvrvideosink->current_buffer) { + GST_LOG_OBJECT (pvrvideosink, "unreffing %p", + pvrvideosink->current_buffer); + gst_buffer_unref (GST_BUFFER_CAST (pvrvideosink->current_buffer)); + } + GST_LOG_OBJECT (pvrvideosink, "reffing %p as our current buffer", buffer); + pvrvideosink->current_buffer = gst_buffer_ref (buffer); + } + + if (pvrvideosink->keep_aspect) { + GstVideoRectangle src, dst; + + src.w = GST_VIDEO_SINK_WIDTH (pvrvideosink); + src.h = GST_VIDEO_SINK_HEIGHT (pvrvideosink); + dst.w = pvrvideosink->render_rect.w; + dst.h = pvrvideosink->render_rect.h; + gst_video_sink_center_rect (src, dst, &result, TRUE); + result.x += pvrvideosink->render_rect.x; + result.y += pvrvideosink->render_rect.y; + } else { + memcpy (&result, &pvrvideosink->render_rect, sizeof (GstVideoRectangle)); + } + + p_blt_3d->sDst.pSurfMemInfo = &dcontext->dst_mem; + p_blt_3d->sDst.SurfOffset = 0; + p_blt_3d->sDst.Stride = + gst_video_format_get_row_stride (GST_VIDEO_FORMAT_BGRx, 0, + pvrvideosink->render_params.ui32Stride); + p_blt_3d->sDst.Format = PVR2D_ARGB8888; + p_blt_3d->sDst.SurfWidth = pvrvideosink->xwindow->width; + p_blt_3d->sDst.SurfHeight = pvrvideosink->xwindow->height; + + p_blt_3d->rcDest.left = result.x; + p_blt_3d->rcDest.top = result.y; + p_blt_3d->rcDest.right = result.w + result.x; + p_blt_3d->rcDest.bottom = result.h + result.y; + + p_blt_3d->sSrc.pSurfMemInfo = src_mem; + p_blt_3d->sSrc.SurfOffset = 0; + p_blt_3d->sSrc.Stride = pvrvideosink->rowstride; + p_blt_3d->sSrc.Format = pvr_format; + p_blt_3d->sSrc.SurfWidth = video_width; + p_blt_3d->sSrc.SurfHeight = video_height; + + if (crop->left || crop->top || crop->right || crop->bottom) { + p_blt_3d->rcSource = *crop; + } else { + p_blt_3d->rcSource.left = 0; + p_blt_3d->rcSource.top = 0; + p_blt_3d->rcSource.right = video_width; + p_blt_3d->rcSource.bottom = video_height; + } + + GST_DEBUG_OBJECT (pvrvideosink, "blit: %dx%d (%d) -> %dx%d (%d)", + p_blt_3d->sSrc.SurfWidth, p_blt_3d->sSrc.SurfHeight, + p_blt_3d->sSrc.Stride, p_blt_3d->sDst.SurfWidth, + p_blt_3d->sDst.SurfHeight, p_blt_3d->sDst.Stride); + GST_DEBUG_OBJECT (pvrvideosink, "crop: %d,%d %d,%d -> %d,%d %d,%d", + p_blt_3d->rcSource.left, p_blt_3d->rcSource.top, + p_blt_3d->rcSource.right, p_blt_3d->rcSource.bottom, + p_blt_3d->rcDest.left, p_blt_3d->rcDest.top, + p_blt_3d->rcDest.right, p_blt_3d->rcDest.bottom); + + if (pvrvideosink->format == GST_VIDEO_FORMAT_NV12) + p_blt_3d->bDisableDestInput = TRUE; + else + /* blit fails for RGB without this... not sure why yet... */ + /* I'd guess because using ARGB format (ie. has alpha channel) */ + p_blt_3d->bDisableDestInput = FALSE; + + pvr_query_blits_complete (pvrvideosink); + + if (pvrvideosink->interlaced) { + /* NOTE: this probably won't look so good if linear (instead + * of point) filtering is used. + */ + p_blt_3d->bFilter = FALSE; + + /* for interlaced blits, we split up the image into two blits.. + * we expect even field on top, odd field on bottom. We blit + * from first top half, then bottom half, doubling up the + * stride of the destination buffer. + */ + /* step 1: */ + p_blt_3d->rcSource.bottom /= 2; + p_blt_3d->rcDest.bottom /= 2; + p_blt_3d->sDst.Stride *= 2; + + pvr_error = PVR2DBlt3DExt (dcontext->pvr_context, p_blt_3d); + if (pvr_error) + goto done; + + /* step 2: */ + p_blt_3d->rcSource.top += video_height / 2; + p_blt_3d->rcSource.bottom += video_height / 2; + p_blt_3d->sDst.SurfOffset = p_blt_3d->sDst.Stride / 2; + + pvr_error = PVR2DBlt3DExt (dcontext->pvr_context, p_blt_3d); + + } else { + p_blt_3d->bFilter = TRUE; + pvr_error = PVR2DBlt3DExt (dcontext->pvr_context, p_blt_3d); + } + + if (pvr_error) + goto done; + + if (draw_border) { + gst_pvrvideosink_xwindow_draw_borders (pvrvideosink, pvrvideosink->xwindow, + result); + pvrvideosink->redraw_borders--; + if (pvrvideosink->redraw_borders < 0) + pvrvideosink->redraw_borders = 0; + } else { + pvr_swap_buffers (pvrvideosink); + } + +done: + if (pvr_error) { + GST_ERROR_OBJECT (pvrvideosink, "%s (%d)", + pvr2dstrerr (pvr_error), pvr_error); + } + GST_DEBUG_OBJECT (pvrvideosink, "end"); + g_mutex_unlock (dcontext->x_lock); + g_mutex_unlock (pvrvideosink->flow_lock); +} + +static void +gst_pvrvideosink_destroy_drawable (GstPVRVideoSink * pvrvideosink) +{ + if (pvrvideosink->dcontext != NULL) { + if (pvrvideosink->dcontext->drawable_handle) + pvrvideosink->dcontext-> + wsegl_table->pfnWSEGL_DeleteDrawable (pvrvideosink->dcontext-> + drawable_handle); + + pvrvideosink->dcontext->wsegl_table->pfnWSEGL_CloseDisplay (pvrvideosink-> + dcontext->display_handle); + } +} + +/* We are called with the x_lock taken */ +static void +gst_pvrvideosink_pvrfill_rectangle (GstPVRVideoSink * pvrvideosink, + GstVideoRectangle rect) +{ + PVR2DERROR pvr_error; + PVR2DBLTINFO s_blt2d_info = { 0 }; + PPVR2DBLTINFO p_blt2d_info; + GstDrawContext *dcontext = pvrvideosink->dcontext; + + GST_DEBUG_OBJECT (pvrvideosink, "begin"); + + p_blt2d_info = &s_blt2d_info; + + p_blt2d_info->pDstMemInfo = &dcontext->dst_mem; + p_blt2d_info->BlitFlags = PVR2D_BLIT_DISABLE_ALL; + p_blt2d_info->DstOffset = 0; + p_blt2d_info->CopyCode = PVR2DROPclear; + p_blt2d_info->DstStride = + gst_video_format_get_row_stride (GST_VIDEO_FORMAT_BGRx, 0, + pvrvideosink->render_params.ui32Stride); + p_blt2d_info->DstFormat = PVR2D_ARGB8888; + p_blt2d_info->DstSurfWidth = pvrvideosink->xwindow->width; + p_blt2d_info->DstSurfHeight = pvrvideosink->xwindow->height; + p_blt2d_info->DstX = rect.x; + p_blt2d_info->DstY = rect.y; + p_blt2d_info->DSizeX = rect.w; + p_blt2d_info->DSizeY = rect.h; + + pvr_error = PVR2DBlt (dcontext->pvr_context, p_blt2d_info); + if (pvr_error) { + GST_ERROR_OBJECT (pvrvideosink, "%s (%d)", + pvr2dstrerr (pvr_error), pvr_error); + } + GST_DEBUG_OBJECT (pvrvideosink, "end"); +} + +/* We are called with the x_lock taken */ +static void +gst_pvrvideosink_xwindow_draw_borders (GstPVRVideoSink * pvrvideosink, + GstXWindow * xwindow, GstVideoRectangle rect) +{ + gint t1, t2; + GstVideoRectangle result; + + g_return_if_fail (GST_IS_PVRVIDEOSINK (pvrvideosink)); + g_return_if_fail (xwindow != NULL); + + /* Left border */ + result.x = pvrvideosink->render_rect.x; + result.y = pvrvideosink->render_rect.y; + result.w = rect.x - pvrvideosink->render_rect.x; + result.h = pvrvideosink->render_rect.h; + if (rect.x > pvrvideosink->render_rect.x) + gst_pvrvideosink_pvrfill_rectangle (pvrvideosink, result); + + /* Right border */ + t1 = rect.x + rect.w; + t2 = pvrvideosink->render_rect.x + pvrvideosink->render_rect.w; + result.x = t1; + result.y = pvrvideosink->render_rect.y; + result.w = t2 - t1; + result.h = pvrvideosink->render_rect.h; + if (t1 < t2) + gst_pvrvideosink_pvrfill_rectangle (pvrvideosink, result); + + /* Top border */ + result.x = pvrvideosink->render_rect.x; + result.y = pvrvideosink->render_rect.y; + result.w = pvrvideosink->render_rect.w; + result.h = rect.y - pvrvideosink->render_rect.y; + if (rect.y > pvrvideosink->render_rect.y) + gst_pvrvideosink_pvrfill_rectangle (pvrvideosink, result); + + /* Bottom border */ + t1 = rect.y + rect.h; + t2 = pvrvideosink->render_rect.y + pvrvideosink->render_rect.h; + result.x = pvrvideosink->render_rect.x; + result.y = t1; + result.w = pvrvideosink->render_rect.w; + result.h = t2 - t1; + if (t1 < t2) + gst_pvrvideosink_pvrfill_rectangle (pvrvideosink, result); + + pvr_swap_buffers (pvrvideosink); +} + +/* Element stuff */ + +static gboolean +gst_pvrvideosink_configure_overlay (GstPVRVideoSink * pvrvideosink, gint width, + gint height, gint video_par_n, gint video_par_d, gint display_par_n, + gint display_par_d) +{ + guint calculated_par_n; + guint calculated_par_d; + + if (!gst_video_calculate_display_ratio (&calculated_par_n, &calculated_par_d, + width, height, video_par_n, video_par_d, display_par_n, + display_par_d)) { + GST_ELEMENT_ERROR (pvrvideosink, CORE, NEGOTIATION, (NULL), + ("Error calculating the output display ratio of the video.")); + return FALSE; + } + + GST_DEBUG_OBJECT (pvrvideosink, + "video width/height: %dx%d, calculated display ratio: %d/%d", + width, height, calculated_par_n, calculated_par_d); + + /* now find a width x height that respects this display ratio. + * prefer those that have one of w/h the same as the incoming video + * using wd / hd = calculated_pad_n / calculated_par_d */ + + /* start with same height, because of interlaced video */ + /* check hd / calculated_par_d is an integer scale factor, and scale wd with the PAR */ + if (height % calculated_par_d == 0) { + GST_DEBUG_OBJECT (pvrvideosink, "keeping video height"); + GST_VIDEO_SINK_WIDTH (pvrvideosink) = (guint) + gst_util_uint64_scale_int (height, calculated_par_n, calculated_par_d); + GST_VIDEO_SINK_HEIGHT (pvrvideosink) = height; + } else if (width % calculated_par_n == 0) { + GST_DEBUG_OBJECT (pvrvideosink, "keeping video width"); + GST_VIDEO_SINK_WIDTH (pvrvideosink) = width; + GST_VIDEO_SINK_HEIGHT (pvrvideosink) = (guint) + gst_util_uint64_scale_int (width, calculated_par_d, calculated_par_n); + } else { + GST_DEBUG_OBJECT (pvrvideosink, "approximating while keeping video height"); + GST_VIDEO_SINK_WIDTH (pvrvideosink) = (guint) + gst_util_uint64_scale_int (height, calculated_par_n, calculated_par_d); + GST_VIDEO_SINK_HEIGHT (pvrvideosink) = height; + } + GST_DEBUG_OBJECT (pvrvideosink, "scaling to %dx%d", + GST_VIDEO_SINK_WIDTH (pvrvideosink), + GST_VIDEO_SINK_HEIGHT (pvrvideosink)); + + return TRUE; +} + +static gboolean +gst_pvrvideosink_setcaps (GstBaseSink * bsink, GstCaps * caps) +{ + GstPVRVideoSink *pvrvideosink; + gboolean ret = TRUE; + GstStructure *structure; + gint width, height; + const GValue *fps; + const GValue *caps_par; + GstQuery *query; + + pvrvideosink = GST_PVRVIDEOSINK (bsink); + + GST_DEBUG_OBJECT (pvrvideosink, + "sinkconnect possible caps with given caps %", caps); + + if (pvrvideosink->current_caps) { + GST_DEBUG_OBJECT (pvrvideosink, "already have caps set"); + if (gst_caps_is_equal (pvrvideosink->current_caps, caps)) { + GST_DEBUG_OBJECT (pvrvideosink, "caps are equal!"); + return TRUE; + } + GST_DEBUG_OBJECT (pvrvideosink, "caps are different"); + } + + structure = gst_caps_get_structure (caps, 0); + + ret = gst_video_format_parse_caps_strided (caps, &pvrvideosink->format, + &width, &height, &pvrvideosink->rowstride); + if (pvrvideosink->rowstride == 0) + pvrvideosink->rowstride = + gst_video_format_get_row_stride (pvrvideosink->format, 0, width); + fps = gst_structure_get_value (structure, "framerate"); + ret &= (fps != NULL); + if (!ret) { + GST_ERROR_OBJECT (pvrvideosink, "problem at parsing caps"); + return FALSE; + } + + pvrvideosink->video_width = width; + pvrvideosink->video_height = height; + + /* figure out if we are dealing w/ interlaced */ + pvrvideosink->interlaced = FALSE; + gst_structure_get_boolean (structure, "interlaced", + &pvrvideosink->interlaced); + + /* get video's pixel-aspect-ratio */ + caps_par = gst_structure_get_value (structure, "pixel-aspect-ratio"); + if (caps_par) { + pvrvideosink->video_par_n = gst_value_get_fraction_numerator (caps_par); + pvrvideosink->video_par_d = gst_value_get_fraction_denominator (caps_par); + } else { + pvrvideosink->video_par_n = 1; + pvrvideosink->video_par_d = 1; + } + + /* get display's pixel-aspect-ratio */ + if (pvrvideosink->display_par) { + pvrvideosink->display_par_n = + gst_value_get_fraction_numerator (pvrvideosink->display_par); + pvrvideosink->display_par_d = + gst_value_get_fraction_denominator (pvrvideosink->display_par); + } else { + pvrvideosink->display_par_n = 1; + pvrvideosink->display_par_d = 1; + } + + if (!gst_pvrvideosink_configure_overlay (pvrvideosink, width, height, + pvrvideosink->video_par_n, pvrvideosink->video_par_d, + pvrvideosink->display_par_n, pvrvideosink->display_par_d)) + return FALSE; + + g_mutex_lock (pvrvideosink->pool_lock); + if (pvrvideosink->buffer_pool) { + if (!gst_caps_is_equal (pvrvideosink->buffer_pool->caps, caps)) { + GST_INFO_OBJECT (pvrvideosink, "in set caps, pool->caps != caps"); + gst_pvr_bufferpool_stop_running (pvrvideosink->buffer_pool, FALSE); + pvrvideosink->buffer_pool = NULL; + } + } + g_mutex_unlock (pvrvideosink->pool_lock); + + /* query to find if anyone upstream using these buffers has any + * minimum requirements: + */ + query = gst_query_new_buffers (caps); + if (gst_element_query (GST_ELEMENT (pvrvideosink), query)) { + gint min_buffers; + + gst_query_parse_buffers_count (query, &min_buffers); + + GST_DEBUG_OBJECT (pvrvideosink, "min_buffers=%d", min_buffers); + + /* XXX need to account for some buffers used by queue, etc.. probably + * queue should handle query, pass on to sink pad, and then add some + * number of buffers to the min, so this value is dynamic depending + * on the pipeline? + */ + if (min_buffers != -1) { + min_buffers += 3 + pvrvideosink->min_queued_bufs; + pvrvideosink->num_buffers_can_change = FALSE; + } + + if (min_buffers > pvrvideosink->num_buffers) { + pvrvideosink->num_buffers = min_buffers; + } + } + gst_query_unref (query); + + /* Notify application to set xwindow id now */ + g_mutex_lock (pvrvideosink->flow_lock); + if (!pvrvideosink->xwindow) { + g_mutex_unlock (pvrvideosink->flow_lock); + gst_x_overlay_prepare_xwindow_id (GST_X_OVERLAY (pvrvideosink)); + } else { + g_mutex_unlock (pvrvideosink->flow_lock); + } + + g_mutex_lock (pvrvideosink->flow_lock); + if (!pvrvideosink->xwindow) + pvrvideosink->xwindow = gst_pvrvideosink_create_window (pvrvideosink, + GST_VIDEO_SINK_WIDTH (pvrvideosink), + GST_VIDEO_SINK_HEIGHT (pvrvideosink)); + + g_mutex_unlock (pvrvideosink->flow_lock); + + gst_pvrvideosink_set_event_handling (GST_X_OVERLAY (pvrvideosink), TRUE); + + pvrvideosink->fps_n = gst_value_get_fraction_numerator (fps); + pvrvideosink->fps_d = gst_value_get_fraction_denominator (fps); + + pvrvideosink->current_caps = gst_caps_ref (caps); + + return TRUE; +} + +static GstCaps * +gst_pvrvideosink_getcaps (GstBaseSink * bsink) +{ + GstPVRVideoSink *pvrvideosink; + GstCaps *caps; + + pvrvideosink = GST_PVRVIDEOSINK (bsink); + + caps = gst_caps_copy (gst_pad_get_pad_template_caps (GST_BASE_SINK + (pvrvideosink)->sinkpad)); + return caps; +} + +static GstStateChangeReturn +gst_pvrvideosink_change_state (GstElement * element, GstStateChange transition) +{ + GstPVRVideoSink *pvrvideosink; + GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS; + GstDrawContext *dcontext; + + pvrvideosink = GST_PVRVIDEOSINK (element); + + switch (transition) { + case GST_STATE_CHANGE_NULL_TO_READY: + if (pvrvideosink->dcontext == NULL) { + dcontext = gst_pvrvideosink_get_dcontext (pvrvideosink); + if (dcontext == NULL) + return GST_STATE_CHANGE_FAILURE; + GST_OBJECT_LOCK (pvrvideosink); + pvrvideosink->dcontext = dcontext; + GST_OBJECT_UNLOCK (pvrvideosink); + } + + /* update object's pixel-aspect-ratio with calculated one */ + if (!pvrvideosink->display_par) { + pvrvideosink->display_par = g_new0 (GValue, 1); + gst_value_init_and_copy (pvrvideosink->display_par, + pvrvideosink->dcontext->par); + GST_DEBUG_OBJECT (pvrvideosink, "set calculated PAR on object's PAR"); + } + + gst_pvrvideosink_manage_event_thread (pvrvideosink); + break; + case GST_STATE_CHANGE_READY_TO_PAUSED: + g_mutex_lock (pvrvideosink->pool_lock); + pvrvideosink->pool_invalid = FALSE; + g_mutex_unlock (pvrvideosink->pool_lock); + break; + case GST_STATE_CHANGE_PAUSED_TO_READY: + g_mutex_lock (pvrvideosink->pool_lock); + pvrvideosink->pool_invalid = TRUE; + g_mutex_unlock (pvrvideosink->pool_lock); + break; + case GST_STATE_CHANGE_PAUSED_TO_PLAYING: + break; + default: + break; + } + + ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); + + switch (transition) { + case GST_STATE_CHANGE_PLAYING_TO_PAUSED: + break; + case GST_STATE_CHANGE_PAUSED_TO_READY: + pvrvideosink->fps_n = 0; + pvrvideosink->fps_d = 1; + GST_VIDEO_SINK_WIDTH (pvrvideosink) = 0; + GST_VIDEO_SINK_HEIGHT (pvrvideosink) = 0; + break; + case GST_STATE_CHANGE_READY_TO_NULL: + gst_pvrvideosink_reset (pvrvideosink); + break; + default: + break; + } + + return ret; +} + +static void +gst_pvrvideosink_get_times (GstBaseSink * bsink, GstBuffer * buf, + GstClockTime * start, GstClockTime * end) +{ + GstPVRVideoSink *pvrvideosink; + + pvrvideosink = GST_PVRVIDEOSINK (bsink); + + if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) { + *start = GST_BUFFER_TIMESTAMP (buf); + if (GST_BUFFER_DURATION_IS_VALID (buf)) { + *end = *start + GST_BUFFER_DURATION (buf); + } else { + if (pvrvideosink->fps_n > 0) { + *end = *start + + gst_util_uint64_scale_int (GST_SECOND, pvrvideosink->fps_d, + pvrvideosink->fps_n); + } + } + } +} + +static gboolean +gst_pvrvideosink_event (GstBaseSink * bsink, GstEvent * event) +{ + gboolean res; + GstPVRVideoSink *pvrvideosink = GST_PVRVIDEOSINK (bsink); + GstStructure *structure; + GstMessage *message; + + switch (GST_EVENT_TYPE (event)) { + case GST_EVENT_CROP: + { + gint left, top, width, height; + + PVR2DRECT *c = &pvrvideosink->crop; + gst_event_parse_crop (event, &top, &left, &width, &height); + c->top = top; + c->left = left; + if (width == -1) { + c->right = GST_VIDEO_SINK_WIDTH (pvrvideosink); + width = GST_VIDEO_SINK_WIDTH (pvrvideosink); + } else { + c->right = left + width; + } + + if (height >= 0) { + if (pvrvideosink->current_caps) { + if (pvrvideosink->interlaced) + height *= 2; + } + c->bottom = top + height; + } else { + c->bottom = GST_VIDEO_SINK_HEIGHT (pvrvideosink); + height = GST_VIDEO_SINK_HEIGHT (pvrvideosink); + } + + structure = gst_structure_new ("video-size-crop", "width", G_TYPE_INT, + width, "height", G_TYPE_INT, height, NULL); + message = gst_message_new_application (GST_OBJECT (pvrvideosink), + structure); + gst_bus_post (gst_element_get_bus (GST_ELEMENT (pvrvideosink)), message); + + + if (!gst_pvrvideosink_configure_overlay (pvrvideosink, width, height, + pvrvideosink->video_par_n, pvrvideosink->video_par_d, + pvrvideosink->display_par_n, pvrvideosink->display_par_d)) + return FALSE; + break; + } + default: + res = TRUE; + } + + + return res; +} + +static GstFlowReturn +gst_pvrvideosink_show_frame (GstBaseSink * vsink, GstBuffer * buf) +{ + GstPVRVideoSink *pvrvideosink; + GstBuffer *newbuf = NULL; + g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR); + + pvrvideosink = GST_PVRVIDEOSINK (vsink); + + GST_DEBUG_OBJECT (pvrvideosink, "render buffer: %p", buf); + + if (!GST_IS_DUCATIBUFFER (buf)) { + GstFlowReturn ret; + + /* special case check for sub-buffers: In certain cases, places like + * GstBaseTransform, which might check that the buffer is writable + * before copying metadata, timestamp, and such, will find that the + * buffer has more than one reference to it. In these cases, they + * will create a sub-buffer with an offset=0 and length equal to the + * original buffer size. + * + * This could happen in two scenarios: (1) a tee in the pipeline, and + * (2) because the refcnt is incremented in gst_mini_object_free() + * before the finalize function is called, and decremented after it + * returns.. but returning this buffer to the buffer pool in the + * finalize function, could wake up a thread blocked in _buffer_alloc() + * which could run and get a buffer w/ refcnt==2 before the thread + * originally unref'ing the buffer returns from finalize function and + * decrements the refcnt back to 1! + */ + if (buf->parent && + (GST_BUFFER_DATA (buf) == GST_BUFFER_DATA (buf->parent)) && + (GST_BUFFER_SIZE (buf) == GST_BUFFER_SIZE (buf->parent))) { + GST_DEBUG_OBJECT (pvrvideosink, "I have a sub-buffer!"); + return gst_pvrvideosink_show_frame (vsink, buf->parent); + } + + GST_DEBUG_OBJECT (pvrvideosink, + "slow-path.. I got a %s so I need to memcpy", + g_type_name (G_OBJECT_TYPE (buf))); + + ret = gst_pvrvideosink_buffer_alloc (GST_BASE_SINK (vsink), + GST_BUFFER_OFFSET (buf), GST_BUFFER_SIZE (buf), GST_BUFFER_CAPS (buf), + &newbuf); + + if (GST_FLOW_OK != ret) { + GST_DEBUG_OBJECT (pvrvideosink, "dropping frame!!"); + return GST_FLOW_OK; + } + + memcpy (GST_BUFFER_DATA (newbuf), + GST_BUFFER_DATA (buf), + MIN (GST_BUFFER_SIZE (newbuf), GST_BUFFER_SIZE (buf))); + + GST_DEBUG_OBJECT (pvrvideosink, "render copied buffer: %p", newbuf); + + buf = newbuf; + } + + gst_pvrvideosink_blit (pvrvideosink, buf); + + if (newbuf) { + gst_buffer_unref (newbuf); + } + + return GST_FLOW_OK; +} + + +/* Buffer management + * + * The buffer_alloc function must either return a buffer with given size and + * caps or create a buffer with different caps attached to the buffer. This + * last option is called reverse negotiation, ie, where the sink suggests a + * different format from the upstream peer. + * + * We try to do reverse negotiation when our geometry changes and we like a + * resized buffer. + */ +static GstFlowReturn +gst_pvrvideosink_buffer_alloc (GstBaseSink * bsink, guint64 offset, guint size, + GstCaps * caps, GstBuffer ** buf) +{ + GstPVRVideoSink *pvrvideosink; + GstDucatiBuffer *pvrvideo = NULL; + GstFlowReturn ret = GST_FLOW_OK; + + pvrvideosink = GST_PVRVIDEOSINK (bsink); + + GST_DEBUG_OBJECT (pvrvideosink, "begin"); + + if (G_UNLIKELY (!caps)) { + GST_WARNING_OBJECT (pvrvideosink, + "have no caps, doing fallback allocation"); + *buf = NULL; + ret = GST_FLOW_OK; + goto beach; + } + + g_mutex_lock (pvrvideosink->pool_lock); + if (G_UNLIKELY (pvrvideosink->pool_invalid)) { + GST_DEBUG_OBJECT (pvrvideosink, "the pool is flushing"); + ret = GST_FLOW_WRONG_STATE; + g_mutex_unlock (pvrvideosink->pool_lock); + goto beach; + } + + GST_LOG_OBJECT (pvrvideosink, + "a buffer of %d bytes was requested with caps %" GST_PTR_FORMAT + " and offset %" G_GUINT64_FORMAT, size, caps, offset); + + /* initialize the buffer pool if not initialized yet */ + if (G_UNLIKELY (!pvrvideosink->buffer_pool || + pvrvideosink->buffer_pool->size != size)) { + if (pvrvideosink->buffer_pool) { + GST_INFO_OBJECT (pvrvideosink, "in buffer alloc, pool->size != size"); + gst_pvr_bufferpool_stop_running (pvrvideosink->buffer_pool, FALSE); + } + + GST_LOG_OBJECT (pvrvideosink, "Creating a buffer pool with %d buffers", + pvrvideosink->num_buffers); + if (!(pvrvideosink->buffer_pool = + gst_pvr_bufferpool_new (GST_ELEMENT (pvrvideosink), + caps, 8, size, pvrvideosink->dcontext->pvr_context))) { + g_mutex_unlock (pvrvideosink->pool_lock); + return GST_FLOW_ERROR; + } + } + pvrvideo = gst_pvr_bufferpool_get (pvrvideosink->buffer_pool); + g_mutex_unlock (pvrvideosink->pool_lock); + + *buf = GST_BUFFER_CAST (pvrvideo); + +beach: + return ret; +} + +/* Interfaces stuff */ + +static gboolean +gst_pvrvideosink_interface_supported (GstImplementsInterface * iface, + GType type) +{ + if (type == GST_TYPE_X_OVERLAY || type == GST_TYPE_NAVIGATION) + return TRUE; + else + return FALSE; +} + +static void +gst_pvrvideosink_interface_init (GstImplementsInterfaceClass * klass) +{ + klass->supported = gst_pvrvideosink_interface_supported; +} + +/* This function destroys a GstXWindow */ +static void +gst_pvrvideosink_xwindow_destroy (GstPVRVideoSink * pvrvideosink, + GstXWindow * xwindow) +{ + g_return_if_fail (xwindow != NULL); + + g_mutex_lock (pvrvideosink->dcontext->x_lock); + + /* If we did not create that window we just free the GC and let it live */ + if (xwindow->internal) + XDestroyWindow (pvrvideosink->dcontext->x_display, xwindow->window); + else + XSelectInput (pvrvideosink->dcontext->x_display, xwindow->window, 0); + + XFreeGC (pvrvideosink->dcontext->x_display, xwindow->gc); + + XSync (pvrvideosink->dcontext->x_display, FALSE); + + g_mutex_unlock (pvrvideosink->dcontext->x_lock); + + g_free (xwindow); +} + +/* + * GstXOverlay Interface: + */ + +static void +gst_pvrvideosink_set_window_handle (GstXOverlay * overlay, guintptr id) +{ + XID xwindow_id = id; + GstPVRVideoSink *pvrvideosink = GST_PVRVIDEOSINK (overlay); + GstXWindow *xwindow = NULL; + + g_return_if_fail (GST_IS_PVRVIDEOSINK (pvrvideosink)); + + g_mutex_lock (pvrvideosink->flow_lock); + + /* If we already use that window return */ + if (pvrvideosink->xwindow && (xwindow_id == pvrvideosink->xwindow->window)) { + g_mutex_unlock (pvrvideosink->flow_lock); + return; + } + + /* If the element has not initialized the X11 context try to do so */ + if (!pvrvideosink->dcontext && !(pvrvideosink->dcontext = + gst_pvrvideosink_get_dcontext (pvrvideosink))) { + g_mutex_unlock (pvrvideosink->flow_lock); + /* we have thrown a GST_ELEMENT_ERROR now */ + return; + } + + /* Clear image pool as the images are unusable anyway */ + g_mutex_lock (pvrvideosink->pool_lock); + if (pvrvideosink->buffer_pool) { + gst_pvr_bufferpool_stop_running (pvrvideosink->buffer_pool, FALSE); + pvrvideosink->buffer_pool = NULL; + } + g_mutex_unlock (pvrvideosink->pool_lock); + + /* If a window is there already we destroy it */ + if (pvrvideosink->xwindow) { + gst_pvrvideosink_xwindow_destroy (pvrvideosink, pvrvideosink->xwindow); + pvrvideosink->xwindow = NULL; + } + + /* If the xid is 0 we will create an internal one in buffer_alloc */ + if (xwindow_id != 0) { + XWindowAttributes attr; + + xwindow = g_new0 (GstXWindow, 1); + xwindow->window = xwindow_id; + + /* Set the event we want to receive and create a GC */ + g_mutex_lock (pvrvideosink->dcontext->x_lock); + + XGetWindowAttributes (pvrvideosink->dcontext->x_display, xwindow->window, + &attr); + + xwindow->width = attr.width; + xwindow->height = attr.height; + xwindow->internal = FALSE; + if (!pvrvideosink->have_render_rect) { + pvrvideosink->render_rect.x = pvrvideosink->render_rect.y = 0; + pvrvideosink->render_rect.w = attr.width; + pvrvideosink->render_rect.h = attr.height; + } + + XSetWindowBackgroundPixmap (pvrvideosink->dcontext->x_display, + xwindow->window, None); + + XMapWindow (pvrvideosink->dcontext->x_display, xwindow->window); + xwindow->gc = XCreateGC (pvrvideosink->dcontext->x_display, + xwindow->window, 0, NULL); + g_mutex_unlock (pvrvideosink->dcontext->x_lock); + + pvrvideosink->xwindow = xwindow; + pvr_recreate_drawable (pvrvideosink); + pvr_get_drawable_params (pvrvideosink); + } + + g_mutex_unlock (pvrvideosink->flow_lock); + + gst_pvrvideosink_set_event_handling (overlay, TRUE); +} + +static void +gst_pvrvideosink_expose (GstXOverlay * overlay) +{ + GstPVRVideoSink *pvrvideosink = GST_PVRVIDEOSINK (overlay); + + gst_pvrvideosink_blit (pvrvideosink, NULL); +} + +static void +gst_pvrvideosink_set_event_handling (GstXOverlay * overlay, + gboolean handle_events) +{ + GstPVRVideoSink *pvrvideosink = GST_PVRVIDEOSINK (overlay); + long event_mask; + + g_mutex_lock (pvrvideosink->flow_lock); + + if (G_UNLIKELY (!pvrvideosink->xwindow)) { + g_mutex_unlock (pvrvideosink->flow_lock); + return; + } + + g_mutex_lock (pvrvideosink->dcontext->x_lock); + + event_mask = ExposureMask | StructureNotifyMask | + PointerMotionMask | KeyPressMask | KeyReleaseMask; + + if (pvrvideosink->xwindow->internal) { + event_mask |= ButtonPressMask | ButtonReleaseMask; + } + + XSelectInput (pvrvideosink->dcontext->x_display, + pvrvideosink->xwindow->window, event_mask); + + g_mutex_unlock (pvrvideosink->dcontext->x_lock); + + g_mutex_unlock (pvrvideosink->flow_lock); +} + +static void +gst_pvrvideosink_set_render_rectangle (GstXOverlay * overlay, gint x, gint y, + gint width, gint height) +{ + GstPVRVideoSink *pvrvideosink = GST_PVRVIDEOSINK (overlay); + + /* FIXME: how about some locking? */ + if (width >= 0 && height >= 0) { + pvrvideosink->render_rect.x = x; + pvrvideosink->render_rect.y = y; + pvrvideosink->render_rect.w = width; + pvrvideosink->render_rect.h = height; + pvrvideosink->have_render_rect = TRUE; + } else { + pvrvideosink->render_rect.x = 0; + pvrvideosink->render_rect.y = 0; + pvrvideosink->render_rect.w = pvrvideosink->xwindow->width; + pvrvideosink->render_rect.h = pvrvideosink->xwindow->height; + pvrvideosink->have_render_rect = FALSE; + } + GST_DEBUG_OBJECT (pvrvideosink, "render_rect is %dX%d", + pvrvideosink->render_rect.w, pvrvideosink->render_rect.h); +} + +static void +gst_pvrvideosink_xoverlay_init (GstXOverlayClass * iface) +{ + iface->set_window_handle = gst_pvrvideosink_set_window_handle; + iface->expose = gst_pvrvideosink_expose; + iface->handle_events = gst_pvrvideosink_set_event_handling; + iface->set_render_rectangle = gst_pvrvideosink_set_render_rectangle; +} + +/* + * GstNavigation Interface: + */ + +static void +gst_pvrvideosink_send_event (GstNavigation * navigation, + GstStructure * structure) +{ + GstPVRVideoSink *pvrvideosink = GST_PVRVIDEOSINK (navigation); + GstPad *peer; + + if ((peer = gst_pad_get_peer (GST_VIDEO_SINK_PAD (pvrvideosink)))) { + GstVideoRectangle result; + gdouble x, y, xscale = 1.0, yscale = 1.0; + + if (pvrvideosink->keep_aspect) { + GstVideoRectangle src = { + .w = GST_VIDEO_SINK_WIDTH (pvrvideosink), + .h = GST_VIDEO_SINK_HEIGHT (pvrvideosink), + }; + GstVideoRectangle dst = { + .w = pvrvideosink->render_rect.w, + .h = pvrvideosink->render_rect.h, + }; + + gst_video_sink_center_rect (src, dst, &result, TRUE); + result.x += pvrvideosink->render_rect.x; + result.y += pvrvideosink->render_rect.y; + } else { + result = pvrvideosink->render_rect; + } + + /* We calculate scaling using the original video frames geometry to + * include pixel aspect ratio scaling. + */ + xscale = (gdouble) pvrvideosink->video_width / result.w; + yscale = (gdouble) pvrvideosink->video_height / result.h; + + /* Note: this doesn't account for crop top/left offsets.. which + * is probably not quite right.. OTOH, I don't think the ducati + * decoder elements subtract back out the crop offsets as the + * event propagates upstream, so as long as the one receiving + * the event is upstream of the decoder, the net effect will be + * correct.. although this might be worth fixing correctly at + * some point. + */ + + /* Converting pointer coordinates to the non scaled geometry */ + if (gst_structure_get_double (structure, "pointer_x", &x)) { + x = MIN (x, result.x + result.w); + x = MAX (x - result.x, 0); + gst_structure_set (structure, "pointer_x", G_TYPE_DOUBLE, + (gdouble) x * xscale, NULL); + } + if (gst_structure_get_double (structure, "pointer_y", &y)) { + y = MIN (y, result.y + result.h); + y = MAX (y - result.y, 0); + gst_structure_set (structure, "pointer_y", G_TYPE_DOUBLE, + (gdouble) y * yscale, NULL); + } + + gst_pad_send_event (peer, gst_event_new_navigation (structure)); + gst_object_unref (peer); + } +} + +static void +gst_pvrvideosink_navigation_init (GstNavigationInterface * iface) +{ + iface->send_event = gst_pvrvideosink_send_event; +} + +/* =========================================== */ +/* */ +/* Init & Class init */ +/* */ +/* =========================================== */ + +static void +gst_pvrvideosink_set_property (GObject * object, guint prop_id, + const GValue * value, GParamSpec * pspec) +{ + GstPVRVideoSink *pvrvideosink; + + g_return_if_fail (GST_IS_PVRVIDEOSINK (object)); + + pvrvideosink = GST_PVRVIDEOSINK (object); + + switch (prop_id) { + case PROP_FORCE_ASPECT_RATIO: + pvrvideosink->keep_aspect = g_value_get_boolean (value); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); + break; + } +} + +static void +gst_pvrvideosink_get_property (GObject * object, guint prop_id, + GValue * value, GParamSpec * pspec) +{ + GstPVRVideoSink *pvrvideosink; + + g_return_if_fail (GST_IS_PVRVIDEOSINK (object)); + + pvrvideosink = GST_PVRVIDEOSINK (object); + + switch (prop_id) { + case PROP_FORCE_ASPECT_RATIO: + g_value_set_boolean (value, pvrvideosink->keep_aspect); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); + break; + } +} + +static void +gst_pvrvideosink_dcontext_clear (GstPVRVideoSink * pvrvideosink) +{ + GstDrawContext *dcontext; + + GST_OBJECT_LOCK (pvrvideosink); + if (!pvrvideosink->dcontext) { + GST_OBJECT_UNLOCK (pvrvideosink); + return; + } + + dcontext = pvrvideosink->dcontext; + pvrvideosink->dcontext = NULL; + GST_OBJECT_UNLOCK (pvrvideosink); + + g_free (dcontext->par); + + g_mutex_lock (dcontext->x_lock); + XCloseDisplay (dcontext->x_display); + g_mutex_unlock (dcontext->x_lock); + g_mutex_free (dcontext->x_lock); + + g_free (dcontext); +} + +static void +gst_pvrvideosink_reset (GstPVRVideoSink * pvrvideosink) +{ + GThread *thread; + + GST_OBJECT_LOCK (pvrvideosink); + pvrvideosink->running = FALSE; + thread = pvrvideosink->event_thread; + pvrvideosink->event_thread = NULL; + GST_OBJECT_UNLOCK (pvrvideosink); + + if (thread) + g_thread_join (thread); + + if (pvrvideosink->current_buffer) { + gst_buffer_unref (pvrvideosink->current_buffer); + pvrvideosink->current_buffer = NULL; + } + + g_mutex_lock (pvrvideosink->pool_lock); + pvrvideosink->pool_invalid = TRUE; + if (pvrvideosink->buffer_pool) { + gst_pvr_bufferpool_stop_running (pvrvideosink->buffer_pool, TRUE); + pvrvideosink->buffer_pool = NULL; + } + g_mutex_unlock (pvrvideosink->pool_lock); + memset (&pvrvideosink->crop, 0, sizeof (PVR2DRECT)); + memset (&pvrvideosink->render_params, 0, sizeof (WSEGLDrawableParams)); + + pvrvideosink->render_rect.x = pvrvideosink->render_rect.y = 0; + pvrvideosink->render_rect.w = pvrvideosink->render_rect.h = 0; + pvrvideosink->have_render_rect = FALSE; + + gst_pvrvideosink_destroy_drawable (pvrvideosink); + + if (pvrvideosink->xwindow) { + gst_pvrvideosink_xwindow_destroy (pvrvideosink, pvrvideosink->xwindow); + pvrvideosink->xwindow = NULL; + } + + g_free (pvrvideosink->display_par); + pvrvideosink->display_par = NULL; + gst_pvrvideosink_dcontext_clear (pvrvideosink); +} + +static void +gst_pvrvideosink_finalize (GObject * object) +{ + GstPVRVideoSink *pvrvideosink; + + pvrvideosink = GST_PVRVIDEOSINK (object); + + gst_pvrvideosink_reset (pvrvideosink); + + if (pvrvideosink->flow_lock) { + g_mutex_free (pvrvideosink->flow_lock); + pvrvideosink->flow_lock = NULL; + } + if (pvrvideosink->pool_lock) { + g_mutex_free (pvrvideosink->pool_lock); + pvrvideosink->pool_lock = NULL; + } + + G_OBJECT_CLASS (parent_class)->finalize (object); +} + +static void +gst_pvrvideosink_init (GstPVRVideoSink * pvrvideosink) +{ + pvrvideosink->running = FALSE; + + pvrvideosink->fps_n = 0; + pvrvideosink->fps_d = 1; + pvrvideosink->video_width = 0; + pvrvideosink->video_height = 0; + + pvrvideosink->flow_lock = g_mutex_new (); + pvrvideosink->pool_lock = g_mutex_new (); + pvrvideosink->buffer_pool = NULL; + pvrvideosink->pool_invalid = TRUE; + + pvrvideosink->keep_aspect = FALSE; + pvrvideosink->current_caps = NULL; + pvrvideosink->num_buffers = DEFAULT_QUEUE_SIZE; + pvrvideosink->num_buffers_can_change = TRUE; + pvrvideosink->min_queued_bufs = DEFAULT_MIN_QUEUED_BUFS; + pvrvideosink->dcontext = NULL; + pvrvideosink->xwindow = NULL; + pvrvideosink->redraw_borders = 2; + pvrvideosink->current_buffer = NULL; + pvrvideosink->event_thread = NULL; + pvrvideosink->display_par = NULL; + memset (&pvrvideosink->crop, 0, sizeof (PVR2DRECT)); + memset (&pvrvideosink->render_params, 0, sizeof (WSEGLDrawableParams)); +} + +static void +gst_pvrvideosink_base_init (gpointer g_class) +{ + GstElementClass *element_class = GST_ELEMENT_CLASS (g_class); + + gst_element_class_set_details_simple (element_class, + "PVR Video sink", "Sink/Video", + "A PVR videosink", + "Luciana Fujii Pontello <luciana.fujii@collabora.co.uk"); + + gst_element_class_add_pad_template (element_class, + gst_static_pad_template_get (&gst_pvrvideosink_sink_template_factory)); +} + +static void +gst_pvrvideosink_class_init (GstPVRVideoSinkClass * klass) +{ + GObjectClass *gobject_class; + GstElementClass *gstelement_class; + GstBaseSinkClass *gstbasesink_class; + + gobject_class = (GObjectClass *) klass; + gstelement_class = (GstElementClass *) klass; + gstbasesink_class = (GstBaseSinkClass *) klass; + + parent_class = g_type_class_peek_parent (klass); + + gobject_class->finalize = gst_pvrvideosink_finalize; + gobject_class->set_property = gst_pvrvideosink_set_property; + gobject_class->get_property = gst_pvrvideosink_get_property; + + g_object_class_install_property (gobject_class, PROP_FORCE_ASPECT_RATIO, + g_param_spec_boolean ("force-aspect-ratio", "Force aspect ratio", + "When enabled, reverse caps negotiation (scaling) will respect " + "original aspect ratio", FALSE, + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + gstelement_class->change_state = gst_pvrvideosink_change_state; + + gstbasesink_class->set_caps = GST_DEBUG_FUNCPTR (gst_pvrvideosink_setcaps); + gstbasesink_class->get_caps = GST_DEBUG_FUNCPTR (gst_pvrvideosink_getcaps); + gstbasesink_class->buffer_alloc = + GST_DEBUG_FUNCPTR (gst_pvrvideosink_buffer_alloc); + gstbasesink_class->get_times = GST_DEBUG_FUNCPTR (gst_pvrvideosink_get_times); + gstbasesink_class->event = GST_DEBUG_FUNCPTR (gst_pvrvideosink_event); + + gstbasesink_class->render = GST_DEBUG_FUNCPTR (gst_pvrvideosink_show_frame); +} + +/* ============================================================= */ +/* */ +/* Public Methods */ +/* */ +/* ============================================================= */ + +/* =========================================== */ +/* */ +/* Object typing & Creation */ +/* */ +/* =========================================== */ + +GType +gst_pvrvideosink_get_type (void) +{ + static GType pvrvideosink_type = 0; + + if (!pvrvideosink_type) { + static const GTypeInfo pvrvideosink_info = { + sizeof (GstPVRVideoSinkClass), + gst_pvrvideosink_base_init, + NULL, + (GClassInitFunc) gst_pvrvideosink_class_init, + NULL, + NULL, + sizeof (GstPVRVideoSink), 0, (GInstanceInitFunc) gst_pvrvideosink_init, + }; + static const GInterfaceInfo iface_info = { + (GInterfaceInitFunc) gst_pvrvideosink_interface_init, NULL, NULL, + }; + static const GInterfaceInfo overlay_info = { + (GInterfaceInitFunc) gst_pvrvideosink_xoverlay_init, NULL, NULL, + }; + static const GInterfaceInfo navigation_info = { + (GInterfaceInitFunc) gst_pvrvideosink_navigation_init, NULL, NULL, + }; + + pvrvideosink_type = g_type_register_static (GST_TYPE_VIDEO_SINK, + "GstPVRVideoSink", &pvrvideosink_info, 0); + + g_type_add_interface_static (pvrvideosink_type, + GST_TYPE_IMPLEMENTS_INTERFACE, &iface_info); + g_type_add_interface_static (pvrvideosink_type, GST_TYPE_X_OVERLAY, + &overlay_info); + g_type_add_interface_static (pvrvideosink_type, GST_TYPE_NAVIGATION, + &navigation_info); + } + + return pvrvideosink_type; +} diff --git a/sys/pvr2d/gstpvrvideosink.h b/sys/pvr2d/gstpvrvideosink.h new file mode 100644 index 0000000..9e59b9a --- /dev/null +++ b/sys/pvr2d/gstpvrvideosink.h @@ -0,0 +1,168 @@ +/* GStreamer + * + * Copyright (C) 2011 - Collabora Ltda + * Copyright (C) 2011 - Texas Instruments + * @author: Luciana Fujii Pontello <luciana.fujii@collabora.co.uk> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifndef __GST_PVRVIDEOSINK_H__ +#define __GST_PVRVIDEOSINK_H__ + +#include <gst/video/gstvideosink.h> +#include <gst/video/video.h> +#include "gstpvrbufferpool.h" + +#include <string.h> +#include <math.h> +#include <pvr2d.h> +#include <EGL/egl.h> +#include <wsegl.h> +#include <X11/Xlib.h> +#include <X11/Xutil.h> + +G_BEGIN_DECLS +#define GST_TYPE_PVRVIDEOSINK (gst_pvrvideosink_get_type()) +#define GST_PVRVIDEOSINK(obj) \ + (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_PVRVIDEOSINK, GstPVRVideoSink)) +#define GST_PVRVIDEOSINK_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_PVRVIDEOSINK, GstPVRVideoSinkClass)) +#define GST_IS_PVRVIDEOSINK(obj) \ + (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_PVRVIDEOSINK)) +#define GST_IS_PVRVIDEOSINK_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_PVRVIDEOSINK)) +typedef struct _GstDrawContext GstDrawContext; +typedef struct _GstXWindow GstXWindow; + +typedef struct _GstPVRVideoBuffer GstPVRVideoBuffer; +typedef struct _GstPVRVideoBufferClass GstPVRVideoBufferClass; + +typedef struct _GstPVRVideoSink GstPVRVideoSink; +typedef struct _GstPVRVideoSinkClass GstPVRVideoSinkClass; + +struct _GstDrawContext +{ + /* PVR2D */ + PVR2DCONTEXTHANDLE pvr_context; + PVR2DMEMINFO dst_mem; + + long stride; + PVR2DFORMAT display_format; + long display_width; + long display_height; + + gulong physical_width; + gulong physical_height; + GValue *par; + + /* WSEGL */ + const WSEGL_FunctionTable *wsegl_table; + + WSEGLDisplayHandle display_handle; + const WSEGLCaps **glcaps; + WSEGLConfig *glconfig; + WSEGLDrawableHandle drawable_handle; + WSEGLRotationAngle rotation; + + GMutex *x_lock; + Display *x_display; + gint screen_num; + gulong black; +}; + +struct _GstXWindow +{ + Window window; + gint width, height; + gboolean internal; + GC gc; +}; + + +/** + * GstPVRVideoSink: + * @running: used to inform @event_thread if it should run/shutdown + * @fps_n: the framerate fraction numerator + * @fps_d: the framerate fraction denominator + * @flow_lock: used to protect data flow routines from external calls such as + * events from @event_thread or methods from the #GstXOverlay interface + * @pool_lock: used to protect the buffer pool + * @x_lock: used to protect X calls + * @buffer_pool: a list of #GstPVRVideoBuffer that could be reused at next buffer + * allocation call + * @keep_aspect: used to remember if reverse negotiation scaling should respect + * aspect ratio + * + * The #GstPVRVideoSink data structure. + */ +struct _GstPVRVideoSink +{ + /* Our element stuff */ + GstVideoSink videosink; + + gboolean running; + + /* Framerate numerator and denominator */ + gint fps_n, fps_d; + /* size of incoming video, used as the size for XvImage */ + guint video_width, video_height; + + GstVideoFormat format; + gint rowstride; + gboolean interlaced; + + GThread *event_thread; + GMutex *flow_lock; + + GMutex *pool_lock; + GstPvrBufferPool *buffer_pool; + gboolean pool_invalid; + gint num_buffers; + gboolean num_buffers_can_change; + gint min_queued_bufs; + + gboolean keep_aspect; + + GstCaps *current_caps; + GstDrawContext *dcontext; + GstXWindow *xwindow; + + GstVideoRectangle render_rect; + gboolean have_render_rect; + + GValue *display_par; + gint video_par_n; + gint video_par_d; + gint display_par_n; + gint display_par_d; + + gchar *media_title; + gint redraw_borders; + GstBuffer *current_buffer; + PVR2DRECT crop; + WSEGLDrawableParams render_params; +}; + +struct _GstPVRVideoSinkClass +{ + GstVideoSinkClass parent_class; +}; + +GType gst_pvrvideosink_get_type (void); + +G_END_DECLS +#endif /* __GST_PVRVIDEOSINK_H__ */ diff --git a/sys/pvr2d/pvr_includes/dri2_ws.h b/sys/pvr2d/pvr_includes/dri2_ws.h new file mode 100644 index 0000000..743d5bb --- /dev/null +++ b/sys/pvr2d/pvr_includes/dri2_ws.h @@ -0,0 +1,176 @@ +/********************************************************************** +* +* Copyright(c) Imagination Technologies Ltd. +* +* The contents of this file are subject to the MIT license as set out below. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +* OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +* +* This License is also included in this distribution in the file called +* "COPYING". +* +******************************************************************************/ + + + +#if !defined(__DRI2_WS_H__) +#define __DRI2_WS_H__ + +#define DRI2WS_DISPFLAG_DEFAULT_DISPLAY 0x00000001 + +/* +// Constants (macros) related to back-buffering. +*/ + +#define XWS_FLIP_BUFFERS 3 +#define DRI2_FLIP_BUFFERS_NUM XWS_FLIP_BUFFERS +#define XWS_FLIP_BUFFER_INDEX (XWS_MAX_FLIP_BUFFERS - 1) + +#define XWS_BLIT_BUFFERS 2 +#define DRI2_BLIT_BUFFERS_NUM XWS_BLIT_BUFFERS +#define XWS_BLIT_BUFFER_INDEX (XWS_MAX_BLIT_BUFFERS - 1) + +#if 0 +#define MIN(a,b) ((a)<(b)?(a):(b)) +#define MAX(a,b) ((a)>(b)?(a):(b)) +#endif + +#define XWS_MAX_BUFFERS MAX(XWS_FLIP_BUFFERS, XWS_BLIT_BUFFERS) +#define DRI2_MAX_BUFFERS_NUM XWS_MAX_BUFFERS + + +#define __DRI_BUFFER_EMPTY 103 + +/** Used for ugly ugly ugly swap interval passing to dri2 driver and receiving current frame index */ +#define __DRI_BUFFER_PVR_CTRL 0x80 /* 100000XX <- last 2 bits for swap interval value */ +#define __DRI_BUFFER_PVR_CTRL_RET 0x90 /* 11000000 */ + + + +#define DRI2_BACK_BUFFER_EXPORT_TYPE_BUFFERS 1 +#define DRI2_BACK_BUFFER_EXPORT_TYPE_SWAPCHAIN 2 + +#define UNREFERENCED_PARAMETER(x) (x) = (x) + + +/* + * Structure used to pass information about back buffers between client application and + * X.Org. Watch out for equivalent structure in pvr_video lib + */ +typedef struct _PVRDRI2BackBuffersExport_ +{ + /* Type of export. _BUFFERS mean set of handles, _SWAPCHAIN mean Swap chain ID */ + unsigned int ui32Type; + PVR2D_HANDLE hBuffers[DRI2_MAX_BUFFERS_NUM]; + unsigned int ui32BuffersCount; + unsigned int ui32SwapChainID; +} PVRDRI2BackBuffersExport; + +/* +// Private window system display information +*/ +typedef struct DRI2WS_Display_TAG +{ + unsigned int ui32RefCount; + + Display *display; + int screen; + unsigned int ui32Flags; + + unsigned int ui32Width; + unsigned int ui32Height; + unsigned int ui32StrideInBytes; + unsigned int ui32BytesPerPixel; + WSEGLPixelFormat ePixelFormat; + + PVR2DFORMAT ePVR2DPixelFormat; + PVR2DCONTEXTHANDLE hContext; + PVR2DMEMINFO *psMemInfo; + + int iDRMfd; +} DRI2WSDisplay; + + +typedef enum DRI2WS_DrawableType_TAG +{ + DRI2_DRAWABLE_UNKNOWN = 0, + DRI2_DRAWABLE_WINDOW = 1, + DRI2_DRAWABLE_PIXMAP = 2, +} DRI2WS_DrawableType; + + +/* +// Private window system drawable information +*/ +typedef struct DRI2WS_Drawable_TAG +{ + DRI2WS_DrawableType eDrawableType; + + Window nativeWin; + + /** Index of current render-to back buffer (received from Xserver) */ + unsigned int ui32BackBufferCurrent; + + /** Number of buffers */ + unsigned int ui32BackBufferNum; + + /** Swap interval (works only in fliping/fullscreen case, values 0-3) */ + unsigned int ui32SwapInterval; + + /** PVR2D Handles received from Xserver (back buffers export structure) */ + PVR2D_HANDLE hPVR2DBackBufferExport; + + /** Stamp of current back buffer */ + unsigned char ucBackBufferExportStamp; + + /** Array of PVR2D Handles received from Xserver (our back buffers) */ + PVR2D_HANDLE hPVR2DBackBuffer[XWS_MAX_BUFFERS]; + + /** Array of PVR2D mapped back buffers */ + PVR2DMEMINFO *psMemBackBuffer[XWS_MAX_BUFFERS]; + + /** Stamp of current back buffer */ + unsigned char ucFrontBufferStamp; + + /** Array of PVR2D Handles received from Xserver (our back buffers) */ + PVR2D_HANDLE hPVR2DFrontBuffer; + + /** Array of PVR2D mapped back buffers */ + PVR2DMEMINFO *psMemFrontBuffer; + + /** ID of flip/swap chain received from X.Org */ + unsigned int ui32FlipChainID; + + /** PVR2D Handle of flip chain used to get buffers to draw to */ + PVR2DFLIPCHAINHANDLE hFlipChain; + + int iWidth; + int iHeight; + + WSEGLPixelFormat ePixelFormat; + unsigned int ui32BytesPerPixel; + unsigned int ui32StrideInPixels; + unsigned int ui32StrideInBytes; + PVR2DFORMAT ePVR2DPixelFormat; + + DRI2WSDisplay *psXWSDisplay; + +} DRI2WSDrawable; + +#endif /* __DRI2_WS_H__ */ diff --git a/sys/pvr2d/pvr_includes/img_defs.h b/sys/pvr2d/pvr_includes/img_defs.h new file mode 100644 index 0000000..64db711 --- /dev/null +++ b/sys/pvr2d/pvr_includes/img_defs.h @@ -0,0 +1,123 @@ +/********************************************************************** +* +* Copyright(c) Imagination Technologies Ltd. +* +* The contents of this file are subject to the MIT license as set out below. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +* OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +* +* This License is also included in this distribution in the file called +* "COPYING". +* +******************************************************************************/ + + + +#if !defined (__IMG_DEFS_H__) +#define __IMG_DEFS_H__ + +#include "img_types.h" + +typedef enum img_tag_TriStateSwitch +{ + IMG_ON = 0x00, + IMG_OFF, + IMG_IGNORE + +} img_TriStateSwitch, * img_pTriStateSwitch; + +#define IMG_SUCCESS 0 + +#define IMG_NO_REG 1 + +#if defined (NO_INLINE_FUNCS) + #define INLINE + #define FORCE_INLINE +#else +#if defined (__cplusplus) + #define INLINE inline + #define FORCE_INLINE inline +#else +#if !defined(INLINE) + #define INLINE __inline +#endif + #define FORCE_INLINE static __inline +#endif +#endif + + +#ifndef PVR_UNREFERENCED_PARAMETER +#define PVR_UNREFERENCED_PARAMETER(param) (param) = (param) +#endif + +#ifdef __GNUC__ +#define unref__ __attribute__ ((unused)) +#else +#define unref__ +#endif + +#ifndef _TCHAR_DEFINED +#if defined(UNICODE) +typedef unsigned short TCHAR, *PTCHAR, *PTSTR; +#else +typedef char TCHAR, *PTCHAR, *PTSTR; +#endif +#define _TCHAR_DEFINED +#endif + + + #if defined(__linux__) || defined(__METAG) + + #define IMG_CALLCONV + #define IMG_INTERNAL __attribute__((visibility("hidden"))) + #define IMG_EXPORT __attribute__((visibility("default"))) + #define IMG_IMPORT + #define IMG_RESTRICT __restrict__ + + #else + #error("define an OS") + #endif + +#ifndef IMG_ABORT + #define IMG_ABORT() abort() +#endif + +#ifndef IMG_MALLOC + #define IMG_MALLOC(A) malloc (A) +#endif + +#ifndef IMG_FREE + #define IMG_FREE(A) free (A) +#endif + +#define IMG_CONST const + +#if defined(__GNUC__) +#define IMG_FORMAT_PRINTF(x,y) __attribute__((format(printf,x,y))) +#else +#define IMG_FORMAT_PRINTF(x,y) +#endif + +#if defined (_WIN64) +#define IMG_UNDEF (~0ULL) +#else +#define IMG_UNDEF (~0UL) +#endif + +#endif diff --git a/sys/pvr2d/pvr_includes/img_types.h b/sys/pvr2d/pvr_includes/img_types.h new file mode 100644 index 0000000..c312c83 --- /dev/null +++ b/sys/pvr2d/pvr_includes/img_types.h @@ -0,0 +1,143 @@ +/********************************************************************** +* +* Copyright(c) Imagination Technologies Ltd. +* +* The contents of this file are subject to the MIT license as set out below. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +* OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +* +* This License is also included in this distribution in the file called +* "COPYING". +* +******************************************************************************/ + + + +#ifndef __IMG_TYPES_H__ +#define __IMG_TYPES_H__ + +#if !defined(IMG_ADDRSPACE_CPUVADDR_BITS) +#define IMG_ADDRSPACE_CPUVADDR_BITS 32 +#endif + +#if !defined(IMG_ADDRSPACE_PHYSADDR_BITS) +#define IMG_ADDRSPACE_PHYSADDR_BITS 32 +#endif + +typedef unsigned int IMG_UINT, *IMG_PUINT; +typedef signed int IMG_INT, *IMG_PINT; + +typedef unsigned char IMG_UINT8, *IMG_PUINT8; +typedef unsigned char IMG_BYTE, *IMG_PBYTE; +typedef signed char IMG_INT8, *IMG_PINT8; +typedef char IMG_CHAR, *IMG_PCHAR; + +typedef unsigned short IMG_UINT16, *IMG_PUINT16; +typedef signed short IMG_INT16, *IMG_PINT16; +#if !defined(IMG_UINT32_IS_ULONG) +typedef unsigned int IMG_UINT32, *IMG_PUINT32; +typedef signed int IMG_INT32, *IMG_PINT32; +#else +typedef unsigned long IMG_UINT32, *IMG_PUINT32; +typedef signed long IMG_INT32, *IMG_PINT32; +#endif +#if !defined(IMG_UINT32_MAX) + #define IMG_UINT32_MAX 0xFFFFFFFFUL +#endif + + #if (defined(LINUX) || defined(__METAG)) +#if !defined(USE_CODE) + typedef unsigned long long IMG_UINT64, *IMG_PUINT64; + typedef long long IMG_INT64, *IMG_PINT64; +#endif + #else + + #error("define an OS") + + #endif + +#if !(defined(LINUX) && defined (__KERNEL__)) +typedef float IMG_FLOAT, *IMG_PFLOAT; +typedef double IMG_DOUBLE, *IMG_PDOUBLE; +#endif + +typedef enum tag_img_bool +{ + IMG_FALSE = 0, + IMG_TRUE = 1, + IMG_FORCE_ALIGN = 0x7FFFFFFF +} IMG_BOOL, *IMG_PBOOL; + +typedef void IMG_VOID, *IMG_PVOID; + +typedef IMG_INT32 IMG_RESULT; + +#if defined(_WIN64) +typedef unsigned __int64 IMG_UINTPTR_T; +#else +typedef unsigned int IMG_UINTPTR_T; +#endif + +typedef IMG_PVOID IMG_HANDLE; + +typedef void** IMG_HVOID, * IMG_PHVOID; + +typedef IMG_UINT32 IMG_SIZE_T; + +#define IMG_NULL 0 + +typedef IMG_UINT32 IMG_SID; + + +typedef IMG_PVOID IMG_CPU_VIRTADDR; + +typedef struct _IMG_DEV_VIRTADDR +{ + + IMG_UINT32 uiAddr; +#define IMG_CAST_TO_DEVVADDR_UINT(var) (IMG_UINT32)(var) + +} IMG_DEV_VIRTADDR; + +typedef struct _IMG_CPU_PHYADDR +{ + + IMG_UINTPTR_T uiAddr; +} IMG_CPU_PHYADDR; + +typedef struct _IMG_DEV_PHYADDR +{ +#if IMG_ADDRSPACE_PHYSADDR_BITS == 32 + + IMG_UINTPTR_T uiAddr; +#else + IMG_UINT32 uiAddr; + IMG_UINT32 uiHighAddr; +#endif +} IMG_DEV_PHYADDR; + +typedef struct _IMG_SYS_PHYADDR +{ + + IMG_UINTPTR_T uiAddr; +} IMG_SYS_PHYADDR; + +#include "img_defs.h" + +#endif diff --git a/sys/pvr2d/pvr_includes/pvr2d.h b/sys/pvr2d/pvr_includes/pvr2d.h new file mode 100644 index 0000000..d75dd4c --- /dev/null +++ b/sys/pvr2d/pvr_includes/pvr2d.h @@ -0,0 +1,670 @@ +/********************************************************************** +* +* Copyright(c) Imagination Technologies Ltd. +* +* The contents of this file are subject to the MIT license as set out below. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +* OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +* +* This License is also included in this distribution in the file called +* "COPYING". +* +******************************************************************************/ + + + +/****************************************************************************** +Modifications :- +$Log: pvr2d.h $ + + --- Revision Logs Removed --- +******************************************************************************/ + +#ifndef _PVR2D_H_ +#define _PVR2D_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* PVR2D Platform-specific definitions */ +#if defined (__linux__) +#define PVR2D_EXPORT __attribute__((visibility("default"))) +#define PVR2D_IMPORT +#else +#define PVR2D_EXPORT +#define PVR2D_IMPORT +#endif + +/* PVR2D header revision */ +#define PVR2D_REV_MAJOR 3 +#define PVR2D_REV_MINOR 5 + +/* Basic types */ +typedef enum +{ + PVR2D_FALSE = 0, + PVR2D_TRUE +} PVR2D_BOOL; + +typedef void* PVR2D_HANDLE; + +typedef char PVR2D_CHAR, *PVR2D_PCHAR; +typedef unsigned char PVR2D_UCHAR, *PVR2D_PUCHAR; +typedef int PVR2D_INT, *PVR2D_PINT; +typedef unsigned int PVR2D_UINT, *PVR2D_PUINT; +typedef long PVR2D_LONG, *PVR2D_PLONG; +typedef unsigned long PVR2D_ULONG, *PVR2D_PULONG; + +typedef void PVR2D_VOID, *PVR2D_PVOID; + + +/* error codes */ +typedef enum +{ + PVR2D_OK = 0, + PVR2DERROR_INVALID_PARAMETER = -1, + PVR2DERROR_DEVICE_UNAVAILABLE = -2, + PVR2DERROR_INVALID_CONTEXT = -3, + PVR2DERROR_MEMORY_UNAVAILABLE = -4, + PVR2DERROR_DEVICE_NOT_PRESENT = -5, + PVR2DERROR_IOCTL_ERROR = -6, + PVR2DERROR_GENERIC_ERROR = -7, + PVR2DERROR_BLT_NOTCOMPLETE = -8, + PVR2DERROR_HW_FEATURE_NOT_SUPPORTED = -9, + PVR2DERROR_NOT_YET_IMPLEMENTED = -10, + PVR2DERROR_MAPPING_FAILED = -11 +}PVR2DERROR; + +/* 32 bit PVR2D pixel format specifier */ +typedef unsigned long PVR2DFORMAT; + +/* Standard PVR2D pixel formats */ +#define PVR2D_1BPP 0x00UL // 1bpp mask surface or palletized 1 bit source with 2x32 bit CLUT +#define PVR2D_RGB565 0x01UL // Common rgb 565 format +#define PVR2D_ARGB4444 0x02UL // Common argb 4444 format +#define PVR2D_RGB888 0x03UL // Common rgb 888 format (not supported) +#define PVR2D_ARGB8888 0x04UL // Common argb 8888 format +#define PVR2D_ARGB1555 0x05UL // Common argb 1555 format +#define PVR2D_ALPHA8 0x06UL // Alpha-only 8 bit per pixel (used with a constant fill colour) +#define PVR2D_ALPHA4 0x07UL // Alpha-only 4 bits per pixel (used with a constant fill colour) +#define PVR2D_PAL2 0x08UL // Palletized 2 bit format (requires 4x32 bit CLUT) +#define PVR2D_PAL4 0x09UL // Palletized 4 bit format (requires 16x32 bit CLUT) +#define PVR2D_PAL8 0x0AUL // Palletized 8 bit format (requires 256x32 bit CLUT) +#define PVR2D_U8 0x10UL // monochrome unsigned 8 bit +#define PVR2D_U88 0x11UL // monochrome unsigned 16 bit +#define PVR2D_S8 0x12UL // signed 8 bit +#define PVR2D_YUV422_YUYV 0x13UL // YUV 422 low-high byte order Y0UY1V +#define PVR2D_YUV422_UYVY 0x14UL // YUV 422 low-high byte order UY0VY1 +#define PVR2D_YUV422_YVYU 0x15UL // YUV 422 low-high byte order Y0VY1U +#define PVR2D_YUV422_VYUY 0x16UL // YUV 422 low-high byte order VY0UY1 +#define PVR2D_YUV420_2PLANE 0x17UL // YUV420 2 Plane +#define PVR2D_YUV420_3PLANE 0x18UL // YUV420 3 Plane +#define PVR2D_2101010ARGB 0x19UL // 32 bit 2 10 10 10 +#define PVR2D_888RSGSBS 0x1AUL +#define PVR2D_16BPP_RAW 0x1BUL // 16 bit raw (no format conversion) +#define PVR2D_32BPP_RAW 0x1CUL // 32 bit raw +#define PVR2D_64BPP_RAW 0x1DUL // 64 bit raw +#define PVR2D_128BPP_RAW 0x1EUL // 128 bit raw + +#define PVR2D_NO_OF_FORMATS 0x1FUL + +/* Format modifier bit field (DstFormat and SrcFormat bits 16..23) */ +#define PVR2D_FORMAT_MASK 0x0000FFFFUL // PVR2D Format bits +#define PVR2D_FORMAT_LAYOUT_MASK 0x000F0000UL // Format layout (strided / twiddled / tiled) +#define PVR2D_FORMAT_FLAGS_MASK 0x0FF00000UL // Surface Flags mask + +/* Layout */ +#define PVR2D_FORMAT_LAYOUT_SHIFT 16 +#define PVR2D_FORMAT_LAYOUT_STRIDED 0x00000000UL +#define PVR2D_FORMAT_LAYOUT_TILED 0x00010000UL +#define PVR2D_FORMAT_LAYOUT_TWIDDLED 0x00020000UL + +/* + PVR2D_SURFACE_PDUMP + This flag requests a surface pdump, to capture the pixel state after host writes. + Not needed if the surface state has resulted from previous SGX 2D/3D core writes. +*/ +#define PVR2D_SURFACE_PDUMP 0x00100000UL // calls PVRSRVPDumpMem to capture the surface (pdump builds only) + +/* + Low level 3D format extension - for blts via the 3D core only. + If the top bit of the format field is set then PVR2D reads it as a PVRSRV_PIXEL_FORMAT. + The outcome is hardware dependant. + There is no guarantee that any specific PVRSRV format will be supported. +*/ +#define PVR2D_FORMAT_PVRSRV 0x80000000 + +/* wrap surface type */ +typedef enum +{ + PVR2D_WRAPFLAG_NONCONTIGUOUS = 0, + PVR2D_WRAPFLAG_CONTIGUOUS = 1, + +}PVR2DWRAPFLAGS; + +#define PVR2D_CONTEXT_FLAGS_PRIORITY_MASK 0x00000003 + +#define PVR2D_CONTEXT_FLAGS_LOW_PRIORITY_CONTEXT 1 +#define PVR2D_CONTEXT_FLAGS_NORMAL_PRIORITY_CONTEXT 0 +#define PVR2D_CONTEXT_FLAGS_HIGH_PRIORITY_CONTEXT 2 + +/* flags for control information of additional blits */ +typedef enum +{ + PVR2D_BLIT_DISABLE_ALL = 0x00000000, /* disable all additional controls */ + PVR2D_BLIT_CK_ENABLE = 0x00000001, /* enable colour key */ + PVR2D_BLIT_GLOBAL_ALPHA_ENABLE = 0x00000002, /* enable standard global alpha */ + PVR2D_BLIT_PERPIXEL_ALPHABLEND_ENABLE = 0x00000004, /* enable per-pixel alpha bleding */ + PVR2D_BLIT_PAT_SURFACE_ENABLE = 0x00000008, /* enable pattern surf (disable fill) */ + PVR2D_BLIT_FULLY_SPECIFIED_ALPHA_ENABLE = 0x00000010, /* enable fully specified alpha */ + PVR2D_BLIT_ROT_90 = 0x00000020, /* apply 90 degree rotation to the blt */ + PVR2D_BLIT_ROT_180 = 0x00000040, /* apply 180 degree rotation to the blt */ + PVR2D_BLIT_ROT_270 = 0x00000080, /* apply 270 degree rotation to the blt */ + PVR2D_BLIT_COPYORDER_TL2BR = 0x00000100, /* copy order overrides */ + PVR2D_BLIT_COPYORDER_BR2TL = 0x00000200, + PVR2D_BLIT_COPYORDER_TR2BL = 0x00000400, + PVR2D_BLIT_COPYORDER_BL2TR = 0x00000800, + PVR2D_BLIT_COLKEY_SOURCE = 0x00001000, /* Key colour is on the source surface */ + PVR2D_BLIT_COLKEY_DEST = 0x00002000, /* Key colour is on the destination surface */ + PVR2D_BLIT_COLKEY_MASKED = 0x00004000, /* Mask enabled for colour key */ + PVR2D_BLIT_COLKEY_OP_PASS = 0x00008000, /* Colour key op = pass */ + PVR2D_BLIT_COLKEY_OP_REJECT = 0x00010000, /* Colour key op = reject */ + PVR2D_BLIT_PATH_2DCORE = 0x00100000, /* Blt via dedicated 2D Core or PTLA */ + PVR2D_BLIT_PATH_3DCORE = 0x00200000, /* Blt via 3D Core */ + PVR2D_BLIT_PATH_SWBLT = 0x00400000, /* Blt via host software */ + PVR2D_BLIT_NO_SRC_SYNC_INFO = 0x00800000, /* Dont send a source sync info*/ + PVR2D_BLIT_ISSUE_STATUS_UPDATES = 0x01000000, /* Issue status updates */ + +} PVR2DBLITFLAGS; + +/* standard alpha-blending functions, AlphaBlendingFunc field of PVR2DBLTINFO */ +typedef enum +{ + PVR2D_ALPHA_OP_SRC_DSTINV = 1, /* source alpha : Cdst = Csrc*Asrc + Cdst*(1-Asrc) */ + PVR2D_ALPHA_OP_SRCP_DSTINV = 2 /* premultiplied source alpha : Cdst = Csrc + Cdst*(1-Asrc) */ +} PVR2D_ALPHABLENDFUNC; + +/* blend ops for fully specified alpha (SGX 2D Core only) */ +typedef enum +{ + PVR2D_BLEND_OP_ZERO = 0, + PVR2D_BLEND_OP_ONE = 1, + PVR2D_BLEND_OP_SRC = 2, + PVR2D_BLEND_OP_DST = 3, + PVR2D_BLEND_OP_GLOBAL = 4, + PVR2D_BLEND_OP_SRC_PLUS_GLOBAL = 5, + PVR2D_BLEND_OP_DST_PLUS_GLOBAL = 6 +}PVR2D_BLEND_OP; + +/* SGX 2D Core Fully specified alpha blend : pAlpha field of PVR2DBLTINFO structure */ +/* a fully specified Alpha Blend operation is defined as */ +/* DST (ALPHA) = (ALPHA_1 * SRC (ALPHA)) + (ALPHA_3 * DST (ALPHA)) */ +/* DST (RGB) = (ALPHA_2 * SRC (RGB)) + (ALPHA_4 * DST (RGB)) */ +/* if the pre-multiplication stage is enabled then the equations become the following: */ +/* PRE_MUL = ((SRC(A)) * (Global Alpha Value)) */ +/* DST (ALPHA) = (ALPHA_1 * SRC (ALPHA)) + (PRE_MUL * DST (ALPHA)) */ +/* DST (RGB) = (ALPHA_2 * SRC (RGB)) + (PRE_MUL * DST (RGB)) */ +/* if the transparent source alpha stage is enabled then a source alpha of zero forces the */ +/* source to be transparent for that pixel regardless of the blend equation being used. */ +typedef struct _PVR2D_ALPHABLT +{ + PVR2D_BLEND_OP eAlpha1; + PVR2D_BOOL bAlpha1Invert; + PVR2D_BLEND_OP eAlpha2; + PVR2D_BOOL bAlpha2Invert; + PVR2D_BLEND_OP eAlpha3; + PVR2D_BOOL bAlpha3Invert; + PVR2D_BLEND_OP eAlpha4; + PVR2D_BOOL bAlpha4Invert; + PVR2D_BOOL bPremulAlpha; /* enable pre-multiplication stage */ + PVR2D_BOOL bTransAlpha; /* enable transparent source alpha stage */ + PVR2D_BOOL bUpdateAlphaLookup; /* enable and update the 1555-Lookup alpha table */ + PVR2D_UCHAR uAlphaLookup0; /* 8 bit alpha when A=0 in a 1555-Lookup surface */ + PVR2D_UCHAR uAlphaLookup1; /* 8 bit alpha when A=1 in a 1555-Lookup surface */ + PVR2D_UCHAR uGlobalRGB; /* Global Alpha Value for RGB, 0=transparent 255=opaque */ + PVR2D_UCHAR uGlobalA; /* Global Alpha Value for Alpha */ + +} PVR2D_ALPHABLT, *PPVR2D_ALPHABLT; + + +/* surface memory info structure */ +typedef struct _PVR2DMEMINFO +{ + PVR2D_VOID *pBase; + PVR2D_ULONG ui32MemSize; + PVR2D_ULONG ui32DevAddr; + PVR2D_ULONG ulFlags; + PVR2D_VOID *hPrivateData; + PVR2D_VOID *hPrivateMapData; + +}PVR2DMEMINFO, *PPVR2DMEMINFO; + + +#define PVR2D_MAX_DEVICE_NAME 20 + +typedef struct _PVR2DDEVICEINFO +{ + PVR2D_ULONG ulDevID; + PVR2D_CHAR szDeviceName[PVR2D_MAX_DEVICE_NAME]; +}PVR2DDEVICEINFO; + + +typedef struct _PVR2DISPLAYINFO +{ + PVR2D_ULONG ulMaxFlipChains; + PVR2D_ULONG ulMaxBuffersInChain; + PVR2DFORMAT eFormat; + PVR2D_ULONG ulWidth; + PVR2D_ULONG ulHeight; + PVR2D_LONG lStride; + PVR2D_ULONG ulMinFlipInterval; + PVR2D_ULONG ulMaxFlipInterval; + +}PVR2DDISPLAYINFO; + + +typedef struct _PVR2MISCDISPLAYINFO +{ + PVR2D_ULONG ulPhysicalWidthmm; + PVR2D_ULONG ulPhysicalHeightmm; + PVR2D_ULONG ulUnused[10]; + +}PVR2DMISCDISPLAYINFO; + + +typedef struct _PVR2DBLTINFO +{ + PVR2D_ULONG CopyCode; /* rop code */ + PVR2D_ULONG Colour; /* fill colour */ + PVR2D_ULONG ColourKey; /* colour key argb8888 (see CKEY_ defs below) */ + PVR2D_UCHAR GlobalAlphaValue; /* global alpha blending */ + PVR2D_UCHAR AlphaBlendingFunc; /* per-pixel alpha-blending function */ + + PVR2DBLITFLAGS BlitFlags; /* additional blit control information */ + + PVR2DMEMINFO *pDstMemInfo; /* destination memory */ + PVR2D_ULONG DstOffset; /* byte offset from start of allocation to destination surface pixel 0,0 */ + PVR2D_LONG DstStride; /* signed stride, the number of bytes from pixel 0,0 to 0,1 */ + PVR2D_LONG DstX, DstY; /* pixel offset from start of dest surface to start of blt rectangle */ + PVR2D_LONG DSizeX,DSizeY; /* blt size */ + PVR2DFORMAT DstFormat; /* dest format */ + PVR2D_ULONG DstSurfWidth; /* size of dest surface in pixels */ + PVR2D_ULONG DstSurfHeight; /* size of dest surface in pixels */ + + PVR2DMEMINFO *pSrcMemInfo; /* source mem, (source fields are also used for patterns) */ + PVR2D_ULONG SrcOffset; /* byte offset from start of allocation to src/pat surface pixel 0,0 */ + PVR2D_LONG SrcStride; /* signed stride, the number of bytes from pixel 0,0 to 0,1 */ + PVR2D_LONG SrcX, SrcY; /* pixel offset from start of surface to start of source rectangle */ + /* for patterns this is the start offset within the pattern */ + PVR2D_LONG SizeX,SizeY; /* source rectangle size or pattern size in pixels */ + PVR2DFORMAT SrcFormat; /* source/pattern format */ + PVR2DMEMINFO *pPalMemInfo; /* source/pattern palette memory containing argb8888 colour table */ + PVR2D_ULONG PalOffset; /* byte offset from start of allocation to start of palette */ + PVR2D_ULONG SrcSurfWidth; /* size of source surface in pixels */ + PVR2D_ULONG SrcSurfHeight; /* size of source surface in pixels */ + + PVR2DMEMINFO *pMaskMemInfo; /* mask memory, 1bpp format implied */ + PVR2D_ULONG MaskOffset; /* byte offset from start of allocation to mask surface pixel 0,0 */ + PVR2D_LONG MaskStride; /* signed stride, the number of bytes from pixel 0,0 to 0,1 */ + PVR2D_LONG MaskX, MaskY; /* mask rect top left (mask size = blt size) */ + PVR2D_ULONG MaskSurfWidth; /* size of mask surface in pixels */ + PVR2D_ULONG MaskSurfHeight; /* size of mask surface in pixels */ + + PPVR2D_ALPHABLT pAlpha; /* fully specified alpha blend (2DCore only) */ + + PVR2D_ULONG uSrcChromaPlane1; /* mem offset from start of source alloc to chroma plane 1 */ + PVR2D_ULONG uSrcChromaPlane2; /* mem offset from start of source alloc to chroma plane 2 */ + PVR2D_ULONG uDstChromaPlane1; /* mem offset from start of dest alloc to chroma plane 1 */ + PVR2D_ULONG uDstChromaPlane2; /* mem offset from start of dest alloc to chroma plane 2 */ + + PVR2D_ULONG ColourKeyMask; /* 32 bit colour key mask, only valid when PVR2D_BLIT_COLKEY_MASKED is set */ + +}PVR2DBLTINFO, *PPVR2DBLTINFO; + +typedef struct _PVR2DRECT +{ + PVR2D_LONG left, top; + PVR2D_LONG right, bottom; +} PVR2DRECT; + +typedef struct +{ + PVR2DMEMINFO *pSurfMemInfo; /* surface memory */ + PVR2D_ULONG SurfOffset; /* byte offset from start of allocation to destination surface pixel 0,0 */ + PVR2D_LONG Stride; /* signed stride */ + PVR2DFORMAT Format; /* format */ + PVR2D_ULONG SurfWidth; /* surface width in pixels */ + PVR2D_ULONG SurfHeight; /* surface height in pixels */ + +} PVR2D_SURFACE, *PPVR2D_SURFACE; + +typedef struct +{ + PVR2D_ULONG uChromaPlane1; /* YUV multiplane - byte offset from start of alloc to chroma plane 1 */ + PVR2D_ULONG uChromaPlane2; /* YUV multiplane - byte offset from start of alloc to chroma plane 2 */ + PVR2D_LONG Reserved[2]; /* Reserved, must be zero */ + +} PVR2D_SURFACE_EXT, *PPVR2D_SURFACE_EXT; + +typedef struct +{ + PVR2D_ULONG *pUseCode; /* USSE code */ + PVR2D_ULONG UseCodeSize; /* usse code size in bytes */ + +} PVR2D_USECODE, *PPVR2D_USECODE; + +typedef struct +{ + PVR2D_SURFACE sDst; /* destination surface */ + PVR2D_SURFACE sSrc; /* source surface */ + PVR2DRECT rcDest; /* destination rectangle */ + PVR2DRECT rcSource; /* source rectangle */ + PVR2D_HANDLE hUseCode; /* custom USE code (NULL implies source copy) */ + PVR2D_ULONG UseParams[2]; /* per-blt params for use code */ + +} PVR2D_3DBLT, *PPVR2D_3DBLT; + +typedef struct +{ + PVR2D_SURFACE sDst; /* destination surface */ + PVR2DRECT rcDest; /* destination rectangle; scaling is supported */ + PVR2D_SURFACE sSrc; /* source surface */ + PVR2DRECT rcSource; /* source rectangle; scaling is supported */ + PPVR2D_SURFACE pSrc2; /* optional second source surface (NULL if not required) */ + PVR2DRECT* prcSource2; /* optional pSrc2 rectangle */ + PVR2D_HANDLE hUseCode; /* custom USSE shader code (NULL implies default source copy) */ + PVR2D_ULONG UseParams[2]; /* per-blt params for usse code */ + PVR2D_ULONG uiNumTemporaryRegisters; /* no. of temporary registers used in custom shader code */ + PVR2D_BOOL bDisableDestInput; /* set true if the destination is output only */ + PPVR2D_SURFACE_EXT pDstExt; /* Extended format params for dest */ + PPVR2D_SURFACE_EXT pSrcExt[2]; /* Extended format params for source 1 and 2 */ + PVR2D_BOOL bFilter; /* set true to enable smoothing */ + PVR2D_LONG Reserved[4]; /* Reserved, must be zero */ + +} PVR2D_3DBLT_EXT, *PPVR2D_3DBLT_EXT; + + +#define MAKE_COPY_BLIT(src,soff,dest,doff,sx,sy,dx,dy,sz) + +typedef void* PVR2DCONTEXTHANDLE; +typedef void* PVR2DFLIPCHAINHANDLE; + + +// CopyCode field of PVR2DBLTINFO structure: +// the CopyCode field of the PVR2DBLTINFO structure should contain a rop3 or rop4 code. +// a rop3 is an 8 bit code that describes a blt with three inputs : source dest and pattern +// rop4 is a 16 bit code that describes a blt with four inputs : source dest pattern and mask +// common rop3 codes are defined below +// a colour fill blt is processed in the pattern channel as a constant colour with a rop code of 0xF0 +// PVR2D_BLIT_PAT_SURFACE_ENABLE defines whether the pattern channel is a surface or a fill colour. +// a rop4 is defined by two rop3 codes, and the 1 bit-per-pixel mask surface defines which is used. +// a common rop4 is 0xAAF0 which is the mask copy blt used for text glyphs. +// CopyCode is taken to be a rop4 when pMaskMemInfo is non zero, otherwise it is assumed to be a rop3 +// use the PVR2DMASKROP4 macro below to construct a rop4 from two rop3's +// rop3a is the rop used when mask pixel = 1, and rop3b when mask = 0 +#define PVR2DROP4(rop3b, rop3a) ((rop3b<<8)|rop3a) + +/* common rop codes */ +#define PVR2DROPclear 0x00 /* 0 (whiteness) */ +#define PVR2DROPset 0xFF /* 1 (blackness) */ +#define PVR2DROPnoop 0xAA /* dst (used for masked blts) */ + +/* source and dest rop codes */ +#define PVR2DROPand 0x88 /* src AND dst */ +#define PVR2DROPandReverse 0x44 /* src AND NOT dst */ +#define PVR2DROPcopy 0xCC /* src (used for source copy and alpha blts) */ +#define PVR2DROPandInverted 0x22 /* NOT src AND dst */ +#define PVR2DROPxor 0x66 /* src XOR dst */ +#define PVR2DROPor 0xEE /* src OR dst */ +#define PVR2DROPnor 0x11 /* NOT src AND NOT dst */ +#define PVR2DROPequiv 0x99 /* NOT src XOR dst */ +#define PVR2DROPinvert 0x55 /* NOT dst */ +#define PVR2DROPorReverse 0xDD /* src OR NOT dst */ +#define PVR2DROPcopyInverted 0x33 /* NOT src */ +#define PVR2DROPorInverted 0xBB /* NOT src OR dst */ +#define PVR2DROPnand 0x77 /* NOT src OR NOT dst */ + +/* pattern rop codes */ +#define PVR2DPATROPand 0xA0 /* pat AND dst */ +#define PVR2DPATROPandReverse 0x50 /* pat AND NOT dst */ +#define PVR2DPATROPcopy 0xF0 /* pat (used for solid color fills and pattern blts) */ +#define PVR2DPATROPandInverted 0x0A /* NOT pat AND dst */ +#define PVR2DPATROPxor 0x5A /* pat XOR dst */ +#define PVR2DPATROPor 0xFA /* pat OR dst */ +#define PVR2DPATROPnor 0x05 /* NOT pat AND NOT dst */ +#define PVR2DPATROPequiv 0xA5 /* NOT pat XOR dst */ +#define PVR2DPATROPinvert 0x55 /* NOT dst */ +#define PVR2DPATROPorReverse 0xF5 /* pat OR NOT dst */ +#define PVR2DPATROPcopyInverted 0x0F /* NOT pat */ +#define PVR2DPATROPorInverted 0xAF /* NOT pat OR dst */ +#define PVR2DPATROPnand 0x5F /* NOT pat OR NOT dst */ + +/* common rop4 codes */ +#define PVR2DROP4MaskedCopy PVR2DROP4(PVR2DROPnoop,PVR2DROPcopy) /* masked source copy blt (used for rounded window corners etc) */ +#define PVR2DROP4MaskedFill PVR2DROP4(PVR2DROPnoop,PVR2DPATROPcopy) /* masked colour fill blt (used for text) */ + +/* Legacy support */ +#define PVR2DROP3_PATMASK PVR2DPATROPcopy +#define PVR2DROP3_SRCMASK PVR2DROPcopy + +/* pixmap memory alignment */ +#define PVR2D_ALIGNMENT_4 4 /* DWORD alignment */ +#define PVR2D_ALIGNMENT_ANY 0 /* no alignment */ +#define PVR2D_ALIGNMENT_PALETTE 16 /* 16 byte alignment is required for palettes */ + +/* Heap number for PVR2DGetFrameBuffer */ +#define PVR2D_FB_PRIMARY_SURFACE 0 + +#define PVR2D_PRESENT_PROPERTY_SRCSTRIDE (1UL << 0) +#define PVR2D_PRESENT_PROPERTY_DSTSIZE (1UL << 1) +#define PVR2D_PRESENT_PROPERTY_DSTPOS (1UL << 2) +#define PVR2D_PRESENT_PROPERTY_CLIPRECTS (1UL << 3) +#define PVR2D_PRESENT_PROPERTY_INTERVAL (1UL << 4) + +#define PVR2D_CREATE_FLIPCHAIN_SHARED (1UL << 0) +#define PVR2D_CREATE_FLIPCHAIN_QUERY (1UL << 1) +#define PVR2D_CREATE_FLIPCHAIN_OEMOVERLAY (1UL << 2) +#define PVR2D_CREATE_FLIPCHAIN_AS_BLITCHAIN (1UL << 3) + +/* Colour-key colour must be translated into argb8888 format */ +#define CKEY_8888(P) (P) +#define CKEY_4444(P) (((P&0xF000UL)<<16) | ((P&0x0F00UL)<<12) | ((P&0x00F0UL)<<8) | ((P&0x000FUL)<<4)) +#define CKEY_1555(P) (((P&0x8000UL)<<16) | ((P&0x7C00UL)<<9) | ((P&0x3E0UL)<<6) | ((P&0x1FUL)<<3)) +#define CKEY_565(P) (((P&0xF800UL)<<8) | ((P&0x7E0UL)<<5) | ((P&0x1FUL)<<3)) +#define CKEY_MASK_8888 0x00FFFFFFUL +#define CKEY_MASK_4444 0x00F0F0F0UL +#define CKEY_MASK_1555 0x00F8F8F8UL /* Alpha is not normally included in the key test */ +#define CKEY_MASK_565 0x00F8FCF8UL + +/* Fill colours must be translated into argb8888 format */ +#define CFILL_4444(P) (((P&0xF000UL)<<16) | ((P&0x0F00UL)<<12) | ((P&0x00F0UL)<<8) | ((P&0x000FUL)<<4)) +#define CFILL_1555(P) (((P&0x8000UL)<<16) | ((P&0x7C00UL)<<9) | ((P&0x3E0UL)<<6) | ((P&0x1FUL)<<3)) +#define CFILL_565(P) (((P&0xF800UL)<<8) | ((P&0x7E0UL)<<5) | ((P&0x1FUL)<<3)) + +/* PVR2DCreateDeviceContext flags */ +#define PVR2D_XSERVER_PROC 0x00000001UL /*!< Set for the Xserver connection */ + +/* PVR2DMemAlloc flags */ +#define PVR2D_MEM_UNCACHED 0x00000000UL /* Default */ +#define PVR2D_MEM_CACHED 0x00000001UL /* Caller must flush and sync when necessary */ +#define PVR2D_MEM_WRITECOMBINE 0x00000002UL + +/* Functions that the library exports */ + +PVR2D_IMPORT +int PVR2DEnumerateDevices(PVR2DDEVICEINFO *pDevInfo); + +PVR2D_IMPORT +PVR2DERROR PVR2DCreateDeviceContext(PVR2D_ULONG ulDevID, + PVR2DCONTEXTHANDLE* phContext, + PVR2D_ULONG ulFlags); + +PVR2D_IMPORT +PVR2DERROR PVR2DDestroyDeviceContext(PVR2DCONTEXTHANDLE hContext); + +PVR2D_IMPORT +PVR2DERROR PVR2DGetDeviceInfo(PVR2DCONTEXTHANDLE hContext, + PVR2DDISPLAYINFO *pDisplayInfo); + +PVR2D_IMPORT +PVR2DERROR PVR2DGetMiscDisplayInfo(PVR2DCONTEXTHANDLE hContext, + PVR2DMISCDISPLAYINFO *pMiscDisplayInfo); + +PVR2D_IMPORT +PVR2DERROR PVR2DGetScreenMode(PVR2DCONTEXTHANDLE hContext, + PVR2DFORMAT *pFormat, + PVR2D_LONG *plWidth, + PVR2D_LONG *plHeight, + PVR2D_LONG *plStride, + PVR2D_INT *piRefreshRate); + +PVR2D_IMPORT +PVR2DERROR PVR2DGetFrameBuffer(PVR2DCONTEXTHANDLE hContext, + PVR2D_INT nHeap, + PVR2DMEMINFO **ppsMemInfo); + +PVR2D_IMPORT +PVR2DERROR PVR2DMemAlloc(PVR2DCONTEXTHANDLE hContext, + PVR2D_ULONG ulBytes, + PVR2D_ULONG ulAlign, + PVR2D_ULONG ulFlags, + PVR2DMEMINFO **ppsMemInfo); + +PVR2D_IMPORT +PVR2DERROR PVR2DMemExport(PVR2DCONTEXTHANDLE hContext, + PVR2D_ULONG ulFlags, + PVR2DMEMINFO *psMemInfo, + PVR2D_HANDLE *phMemHandle); + +PVR2D_IMPORT +PVR2DERROR PVR2DMemWrap(PVR2DCONTEXTHANDLE hContext, + PVR2D_VOID *pMem, + PVR2D_ULONG ulFlags, + PVR2D_ULONG ulBytes, + PVR2D_ULONG alPageAddress[], + PVR2DMEMINFO **ppsMemInfo); + +PVR2D_IMPORT +PVR2DERROR PVR2DMemMap(PVR2DCONTEXTHANDLE hContext, + PVR2D_ULONG ulFlags, + PVR2D_HANDLE hMemHandle, + PVR2DMEMINFO **ppsDstMem); + +PVR2D_IMPORT +PVR2DERROR PVR2DMemFree(PVR2DCONTEXTHANDLE hContext, + PVR2DMEMINFO *psMemInfo); + +PVR2D_IMPORT +PVR2DERROR PVR2DBlt(PVR2DCONTEXTHANDLE hContext, + PVR2DBLTINFO *pBltInfo); + +PVR2D_IMPORT +PVR2DERROR PVR2DBltClipped(PVR2DCONTEXTHANDLE hContext, + PVR2DBLTINFO *pBltInfo, + PVR2D_ULONG ulNumClipRects, + PVR2DRECT *pClipRects); + +PVR2D_EXPORT +PVR2DERROR PVR2DSet1555Alpha (PVR2DCONTEXTHANDLE hContext, + PVR2D_UCHAR Alpha0, PVR2D_UCHAR Alpha1); + +PVR2D_IMPORT +PVR2DERROR PVR2DQueryBlitsComplete(PVR2DCONTEXTHANDLE hContext, + const PVR2DMEMINFO *pMemInfo, + PVR2D_UINT uiWaitForComplete); + +PVR2D_IMPORT +PVR2DERROR PVR2DSetPresentBltProperties(PVR2DCONTEXTHANDLE hContext, + PVR2D_ULONG ulPropertyMask, + PVR2D_LONG lSrcStride, + PVR2D_ULONG ulDstWidth, + PVR2D_ULONG ulDstHeight, + PVR2D_LONG lDstXPos, + PVR2D_LONG lDstYPos, + PVR2D_ULONG ulNumClipRects, + PVR2DRECT *pClipRects, + PVR2D_ULONG ulSwapInterval); + +PVR2D_IMPORT +PVR2DERROR PVR2DPresentBlt(PVR2DCONTEXTHANDLE hContext, + PVR2DMEMINFO *pMemInfo, + PVR2D_LONG lRenderID); + +PVR2D_IMPORT +PVR2DERROR PVR2DCreateFlipChain(PVR2DCONTEXTHANDLE hContext, + PVR2D_ULONG ulFlags, + PVR2D_ULONG ulNumBuffers, + PVR2D_ULONG ulWidth, + PVR2D_ULONG ulHeight, + PVR2DFORMAT eFormat, + PVR2D_LONG *plStride, + PVR2D_ULONG *pulFlipChainID, + PVR2DFLIPCHAINHANDLE *phFlipChain); + +PVR2D_IMPORT +PVR2DERROR PVR2DDestroyFlipChain(PVR2DCONTEXTHANDLE hContext, + PVR2DFLIPCHAINHANDLE hFlipChain); + +PVR2D_IMPORT +PVR2DERROR PVR2DGetFlipChainBuffers(PVR2DCONTEXTHANDLE hContext, + PVR2DFLIPCHAINHANDLE hFlipChain, + PVR2D_ULONG *pulNumBuffers, + PVR2DMEMINFO *psMemInfo[]); + +PVR2D_IMPORT +PVR2DERROR PVR2DSetPresentFlipProperties(PVR2DCONTEXTHANDLE hContext, + PVR2DFLIPCHAINHANDLE hFlipChain, + PVR2D_ULONG ulPropertyMask, + PVR2D_LONG lDstXPos, + PVR2D_LONG lDstYPos, + PVR2D_ULONG ulNumClipRects, + PVR2DRECT *pClipRects, + PVR2D_ULONG ulSwapInterval); + +PVR2D_IMPORT +PVR2DERROR PVR2DPresentFlip(PVR2DCONTEXTHANDLE hContext, + PVR2DFLIPCHAINHANDLE hFlipChain, + PVR2DMEMINFO *psMemInfo, + PVR2D_LONG lRenderID); + +PVR2D_IMPORT +PVR2DERROR PVR2DGetAPIRev(PVR2D_LONG *lRevMajor, PVR2D_LONG *lRevMinor); + +PVR2D_IMPORT +PVR2DERROR PVR2DLoadUseCode (const PVR2DCONTEXTHANDLE hContext, const PVR2D_UCHAR *pUseCode, + const PVR2D_ULONG UseCodeSize, PVR2D_HANDLE *pUseCodeHandle); +PVR2D_IMPORT +PVR2DERROR PVR2DFreeUseCode (const PVR2DCONTEXTHANDLE hContext, const PVR2D_HANDLE hUseCodeHandle); + +PVR2D_IMPORT +PVR2DERROR PVR2DBlt3D (const PVR2DCONTEXTHANDLE hContext, const PPVR2D_3DBLT pBlt3D); + +PVR2D_IMPORT +PVR2DERROR PVR2DBlt3DExt (const PVR2DCONTEXTHANDLE hContext, const PPVR2D_3DBLT_EXT pBlt3D); + +#ifdef __cplusplus +} +#endif + +#endif /* _PVR2D_H_ */ + +/****************************************************************************** + End of file (pvr2d.h) +******************************************************************************/ diff --git a/sys/pvr2d/pvr_includes/services.h b/sys/pvr2d/pvr_includes/services.h new file mode 100644 index 0000000..93263a8 --- /dev/null +++ b/sys/pvr2d/pvr_includes/services.h @@ -0,0 +1,1211 @@ +/********************************************************************** + * + * Copyright (C) Imagination Technologies Ltd. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful but, except + * as otherwise stated in writing, without any warranty; without even the + * implied warranty of merchantability or fitness for a particular purpose. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Imagination Technologies Ltd. <gpl-support@imgtec.com> + * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK + * + ******************************************************************************/ + +#ifndef __SERVICES_H__ +#define __SERVICES_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" +#include "servicesext.h" + +#define PVRSRV_4K_PAGE_SIZE 4096UL + +#define PVRSRV_MAX_CMD_SIZE 1024 + +#define PVRSRV_MAX_DEVICES 16 + +#define EVENTOBJNAME_MAXLENGTH (50) + +#define PVRSRV_MEM_READ (1U<<0) +#define PVRSRV_MEM_WRITE (1U<<1) +#define PVRSRV_MEM_CACHE_CONSISTENT (1U<<2) +#define PVRSRV_MEM_NO_SYNCOBJ (1U<<3) +#define PVRSRV_MEM_INTERLEAVED (1U<<4) +#define PVRSRV_MEM_DUMMY (1U<<5) +#define PVRSRV_MEM_EDM_PROTECT (1U<<6) +#define PVRSRV_MEM_ZERO (1U<<7) +#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR (1U<<8) +#define PVRSRV_MEM_RAM_BACKED_ALLOCATION (1U<<9) +#define PVRSRV_MEM_NO_RESMAN (1U<<10) +#define PVRSRV_MEM_EXPORTED (1U<<11) + + +#define PVRSRV_HAP_CACHED (1U<<12) +#define PVRSRV_HAP_UNCACHED (1U<<13) +#define PVRSRV_HAP_SMART (1U<<20) /* XXX could we use CACHED|UNCACHED? */ +#define PVRSRV_HAP_WRITECOMBINE (1U<<14) +#define PVRSRV_HAP_CACHETYPE_MASK (PVRSRV_HAP_CACHED|PVRSRV_HAP_UNCACHED|PVRSRV_HAP_SMART|PVRSRV_HAP_WRITECOMBINE) +#define PVRSRV_HAP_KERNEL_ONLY (1U<<15) +#define PVRSRV_HAP_SINGLE_PROCESS (1U<<16) +#define PVRSRV_HAP_MULTI_PROCESS (1U<<17) +#define PVRSRV_HAP_FROM_EXISTING_PROCESS (1U<<18) +#define PVRSRV_HAP_NO_CPU_VIRTUAL (1U<<19) +#define PVRSRV_HAP_GPU_PAGEABLE (1U<<21) +#define PVRSRV_HAP_MAPTYPE_MASK (PVRSRV_HAP_KERNEL_ONLY \ + |PVRSRV_HAP_SINGLE_PROCESS \ + |PVRSRV_HAP_MULTI_PROCESS \ + |PVRSRV_HAP_FROM_EXISTING_PROCESS \ + |PVRSRV_HAP_NO_CPU_VIRTUAL\ + |PVRSRV_HAP_GPU_PAGEABLE) + +#define PVRSRV_MEM_CACHED PVRSRV_HAP_CACHED +#define PVRSRV_MEM_UNCACHED PVRSRV_HAP_UNCACHED +#define PVRSRV_MEM_SMART PVRSRV_HAP_SMART +#define PVRSRV_MEM_WRITECOMBINE PVRSRV_HAP_WRITECOMBINE + +#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT (24) + +#define PVRSRV_MAP_NOUSERVIRTUAL (1UL<<27) +#define PVRSRV_MEM_XPROC (1U<<28) + +#define PVRSRV_NO_CONTEXT_LOSS 0 +#define PVRSRV_SEVERE_LOSS_OF_CONTEXT 1 +#define PVRSRV_PRE_STATE_CHANGE_MASK 0x80 + + +#define PVRSRV_DEFAULT_DEV_COOKIE (1) + + +#define PVRSRV_MISC_INFO_TIMER_PRESENT (1U<<0) +#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT (1U<<1) +#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT (1U<<2) +#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT (1U<<3) +#define PVRSRV_MISC_INFO_DDKVERSION_PRESENT (1U<<4) +#define PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT (1U<<5) +#define PVRSRV_MISC_INFO_FREEMEM_PRESENT (1U<<6) + +#define PVRSRV_MISC_INFO_RESET_PRESENT (1U<<31) + +#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 20 +#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 200 + + +#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT 0x00000001 + +#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA 0x00000001 +#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG 0x00000002 + +#define PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC 0x00000001 +#define PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC 0x00000002 + +#define SRV_FLAGS_PERSIST 0x1 +#define SRV_FLAGS_PDUMP_ACTIVE 0x2 + +#define PVRSRV_PDUMP_FLAGS_CONTINUOUS 0x1 + +#define PVR_FULL_CACHE_OP_THRESHOLD (0x7D000) + +typedef enum _PVRSRV_DEVICE_TYPE_ +{ + PVRSRV_DEVICE_TYPE_UNKNOWN = 0 , + PVRSRV_DEVICE_TYPE_MBX1 = 1 , + PVRSRV_DEVICE_TYPE_MBX1_LITE = 2 , + + PVRSRV_DEVICE_TYPE_M24VA = 3, + PVRSRV_DEVICE_TYPE_MVDA2 = 4, + PVRSRV_DEVICE_TYPE_MVED1 = 5, + PVRSRV_DEVICE_TYPE_MSVDX = 6, + + PVRSRV_DEVICE_TYPE_SGX = 7, + + PVRSRV_DEVICE_TYPE_VGX = 8, + + + PVRSRV_DEVICE_TYPE_EXT = 9, + + PVRSRV_DEVICE_TYPE_LAST = 9, + + PVRSRV_DEVICE_TYPE_FORCE_I32 = 0x7fffffff + +} PVRSRV_DEVICE_TYPE; + +#define HEAP_ID( _dev_ , _dev_heap_idx_ ) ( ((_dev_)<<24) | ((_dev_heap_idx_)&((1<<24)-1)) ) +#define HEAP_IDX( _heap_id_ ) ( (_heap_id_)&((1<<24) - 1 ) ) +#define HEAP_DEV( _heap_id_ ) ( (_heap_id_)>>24 ) + +#define PVRSRV_UNDEFINED_HEAP_ID (~0LU) + +typedef enum +{ + IMG_EGL = 0x00000001, + IMG_OPENGLES1 = 0x00000002, + IMG_OPENGLES2 = 0x00000003, + IMG_D3DM = 0x00000004, + IMG_SRV_UM = 0x00000005, + IMG_OPENVG = 0x00000006, + IMG_SRVCLIENT = 0x00000007, + IMG_VISTAKMD = 0x00000008, + IMG_VISTA3DNODE = 0x00000009, + IMG_VISTAMVIDEONODE = 0x0000000A, + IMG_VISTAVPBNODE = 0x0000000B, + IMG_OPENGL = 0x0000000C, + IMG_D3D = 0x0000000D, +#if defined(SUPPORT_GRAPHICS_HAL) || defined(SUPPORT_COMPOSER_HAL) + IMG_ANDROID_HAL = 0x0000000E, +#endif +#if defined(SUPPORT_OPENCL) + IMG_OPENCL = 0x0000000F, +#endif + +} IMG_MODULE_ID; + + +#define APPHINT_MAX_STRING_SIZE 256 + +typedef enum +{ + IMG_STRING_TYPE = 1, + IMG_FLOAT_TYPE , + IMG_UINT_TYPE , + IMG_INT_TYPE , + IMG_FLAG_TYPE +}IMG_DATA_TYPE; + + +typedef struct _PVRSRV_DEV_DATA_ *PPVRSRV_DEV_DATA; + +typedef struct _PVRSRV_DEVICE_IDENTIFIER_ +{ + PVRSRV_DEVICE_TYPE eDeviceType; + PVRSRV_DEVICE_CLASS eDeviceClass; + IMG_UINT32 ui32DeviceIndex; + IMG_CHAR *pszPDumpDevName; + IMG_CHAR *pszPDumpRegName; + +} PVRSRV_DEVICE_IDENTIFIER; + + +typedef struct _PVRSRV_CLIENT_DEV_DATA_ +{ + IMG_UINT32 ui32NumDevices; + PVRSRV_DEVICE_IDENTIFIER asDevID[PVRSRV_MAX_DEVICES]; + PVRSRV_ERROR (*apfnDevConnect[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA); + PVRSRV_ERROR (*apfnDumpTrace[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA); + +} PVRSRV_CLIENT_DEV_DATA; + + +typedef struct _PVRSRV_CONNECTION_ +{ + IMG_HANDLE hServices; + IMG_UINT32 ui32ProcessID; + PVRSRV_CLIENT_DEV_DATA sClientDevData; + IMG_UINT32 ui32SrvFlags; +}PVRSRV_CONNECTION; + + +typedef struct _PVRSRV_DEV_DATA_ +{ + IMG_CONST PVRSRV_CONNECTION *psConnection; +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDevCookie; +#else + IMG_HANDLE hDevCookie; +#endif + +} PVRSRV_DEV_DATA; + +typedef struct _PVRSRV_MEMUPDATE_ +{ + IMG_UINT32 ui32UpdateAddr; + IMG_UINT32 ui32UpdateVal; +} PVRSRV_MEMUPDATE; + +typedef struct _PVRSRV_HWREG_ +{ + IMG_UINT32 ui32RegAddr; + IMG_UINT32 ui32RegVal; +} PVRSRV_HWREG; + +typedef struct _PVRSRV_MEMBLK_ +{ + IMG_DEV_VIRTADDR sDevVirtAddr; + IMG_HANDLE hOSMemHandle; + IMG_HANDLE hOSWrapMem; + IMG_HANDLE hBuffer; + IMG_HANDLE hResItem; + IMG_SYS_PHYADDR *psIntSysPAddr; + +} PVRSRV_MEMBLK; + +typedef struct _PVRSRV_KERNEL_MEM_INFO_ *PPVRSRV_KERNEL_MEM_INFO; + +typedef struct _PVRSRV_CLIENT_MEM_INFO_ +{ + + IMG_PVOID pvLinAddr; + + + IMG_PVOID pvLinAddrKM; + + + IMG_DEV_VIRTADDR sDevVAddr; + + + + + + + IMG_CPU_PHYADDR sCpuPAddr; + + + IMG_UINT32 ui32Flags; + + + + + IMG_UINT32 ui32ClientFlags; + + + IMG_SIZE_T uAllocSize; + + + + struct _PVRSRV_CLIENT_SYNC_INFO_ *psClientSyncInfo; + +#if defined (SUPPORT_SID_INTERFACE) + + IMG_SID hMappingInfo; + + + IMG_SID hKernelMemInfo; + + + IMG_SID hResItem; +#else + + IMG_HANDLE hMappingInfo; + + + IMG_HANDLE hKernelMemInfo; + + + IMG_HANDLE hResItem; +#endif + +#if defined(SUPPORT_MEMINFO_IDS) + #if !defined(USE_CODE) + + IMG_UINT64 ui64Stamp; + #else + IMG_UINT32 dummy1; + IMG_UINT32 dummy2; + #endif +#endif + + + + + struct _PVRSRV_CLIENT_MEM_INFO_ *psNext; + +} PVRSRV_CLIENT_MEM_INFO, *PPVRSRV_CLIENT_MEM_INFO; + + +#define PVRSRV_MAX_CLIENT_HEAPS (32) +typedef struct _PVRSRV_HEAP_INFO_ +{ + IMG_UINT32 ui32HeapID; +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDevMemHeap; +#else + IMG_HANDLE hDevMemHeap; +#endif + IMG_DEV_VIRTADDR sDevVAddrBase; + IMG_UINT32 ui32HeapByteSize; + IMG_UINT32 ui32Attribs; + IMG_UINT32 ui32XTileStride; +}PVRSRV_HEAP_INFO; + + + + +typedef struct _PVRSRV_EVENTOBJECT_ +{ + + IMG_CHAR szName[EVENTOBJNAME_MAXLENGTH]; + +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hOSEventKM; +#else + IMG_HANDLE hOSEventKM; +#endif + +} PVRSRV_EVENTOBJECT; + +typedef enum +{ + PVRSRV_MISC_INFO_CPUCACHEOP_NONE = 0, + PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN, + PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH, + PVRSRV_MISC_INFO_CPUCACHEOP_CUSTOM_FLUSH, + PVRSRV_MISC_INFO_CPUCACHEOP_CUSTOM_INV +} PVRSRV_MISC_INFO_CPUCACHEOP_TYPE; + +typedef struct _PVRSRV_MISC_INFO_ +{ + IMG_UINT32 ui32StateRequest; + IMG_UINT32 ui32StatePresent; + + + IMG_VOID *pvSOCTimerRegisterKM; + IMG_VOID *pvSOCTimerRegisterUM; +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hSOCTimerRegisterOSMemHandle; + IMG_SID hSOCTimerRegisterMappingInfo; +#else + IMG_HANDLE hSOCTimerRegisterOSMemHandle; + IMG_HANDLE hSOCTimerRegisterMappingInfo; +#endif + + + IMG_VOID *pvSOCClockGateRegs; + IMG_UINT32 ui32SOCClockGateRegsSize; + + + IMG_CHAR *pszMemoryStr; + IMG_UINT32 ui32MemoryStrLen; + + + PVRSRV_EVENTOBJECT sGlobalEventObject; +#if defined (SUPPORT_SID_INTERFACE) + IMG_EVENTSID hOSGlobalEvent; +#else + IMG_HANDLE hOSGlobalEvent; +#endif + + + IMG_UINT32 aui32DDKVersion[4]; + + + struct + { + + IMG_BOOL bDeferOp; + + + PVRSRV_MISC_INFO_CPUCACHEOP_TYPE eCacheOpType; + + +#if !defined (SUPPORT_SID_INTERFACE) + union + { + + PVRSRV_CLIENT_MEM_INFO *psClientMemInfo; + + + struct _PVRSRV_KERNEL_MEM_INFO_ *psKernelMemInfo; + } u; +#endif + + + IMG_VOID *pvBaseVAddr; + + + IMG_UINT32 ui32Length; + } sCacheOpCtl; +} PVRSRV_MISC_INFO; + +typedef struct _PVRSRV_SYNC_TOKEN_ +{ + + + struct + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hKernelSyncInfo; +#else + IMG_HANDLE hKernelSyncInfo; +#endif + IMG_UINT32 ui32ReadOpsPendingSnapshot; + IMG_UINT32 ui32WriteOpsPendingSnapshot; + } sPrivate; +} PVRSRV_SYNC_TOKEN; + + +typedef enum _PVRSRV_CLIENT_EVENT_ +{ + PVRSRV_CLIENT_EVENT_HWTIMEOUT = 0, +} PVRSRV_CLIENT_EVENT; + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVClientEvent(IMG_CONST PVRSRV_CLIENT_EVENT eEvent, + PVRSRV_DEV_DATA *psDevData, + IMG_PVOID pvData); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION **ppsConnection, IMG_UINT32 ui32SrvFlags); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDisconnect(IMG_CONST PVRSRV_CONNECTION *psConnection); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevices(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_UINT32 *puiNumDevices, + PVRSRV_DEVICE_IDENTIFIER *puiDevIDs); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceData(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_UINT32 uiDevIndex, + PVRSRV_DEV_DATA *psDevData, + PVRSRV_DEVICE_TYPE eDeviceType); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo); + +#if 1 +IMG_IMPORT +IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +IMG_IMPORT +IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); + +IMG_IMPORT IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs); +#endif + +IMG_IMPORT +PVRSRV_ERROR PVRSRVPollForValue ( const PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hOSEvent, +#else + IMG_HANDLE hOSEvent, +#endif + volatile IMG_UINT32 *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Waitus, + IMG_UINT32 ui32Tries); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID *phDevMemContext, +#else + IMG_HANDLE *phDevMemContext, +#endif + IMG_UINT32 *pui32SharedHeapCount, + PVRSRV_HEAP_INFO *psHeapInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDevMemContext); +#else + IMG_HANDLE hDevMemContext); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDevMemContext, +#else + IMG_HANDLE hDevMemContext, +#endif + IMG_UINT32 *pui32SharedHeapCount, + PVRSRV_HEAP_INFO *psHeapInfo); + +#if defined(PVRSRV_LOG_MEMORY_ALLOCS) + #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \ + (PVR_TRACE(("PVRSRVAllocDeviceMem(" #psDevData "," #hDevMemHeap "," #ui32Attribs "," #ui32Size "," #ui32Alignment "," #ppsMemInfo ")" \ + ": " logStr " (size = 0x%lx)", ui32Size)), \ + PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo)) +#else + #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \ + PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo) +#endif + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDevMemHeap, +#else + IMG_HANDLE hDevMemHeap, +#endif + IMG_UINT32 ui32Attribs, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVRemapToDev(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapFromDev(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVExportDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID *phMemInfo); +#else + IMG_HANDLE *phMemInfo); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDevMemHeap, +#else + IMG_HANDLE hDevMemHeap, +#endif + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hKernelMemInfo, + IMG_SID hDstDevMemHeap, +#else + IMG_HANDLE hKernelMemInfo, + IMG_HANDLE hDstDevMemHeap, +#endif + PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_SYS_PHYADDR *psSysPAddr, + IMG_UINT32 ui32Flags); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Flags); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory(IMG_CONST PVRSRV_DEV_DATA *psDevData, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDevMemContext, +#else + IMG_HANDLE hDevMemContext, +#endif + IMG_SIZE_T ui32ByteSize, + IMG_SIZE_T ui32PageOffset, + IMG_BOOL bPhysContig, + IMG_SYS_PHYADDR *psSysPAddr, + IMG_VOID *pvLinAddr, + IMG_UINT32 ui32Flags, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +PVRSRV_ERROR PVRSRVChangeDeviceMemoryAttributes(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psClientMemInfo, + IMG_UINT32 ui32Attribs); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDevMemContext, + IMG_SID hDeviceClassBuffer, +#else + IMG_HANDLE hDevMemContext, + IMG_HANDLE hDeviceClassBuffer, +#endif + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_SYS_PHYADDR sSysPhysAddr, + IMG_UINT32 uiSizeInBytes, + IMG_PVOID *ppvUserAddr, + IMG_UINT32 *puiActualSize, + IMG_PVOID *ppvProcess); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_PVOID pvUserAddr, + IMG_PVOID pvProcess); + +#if defined(LINUX) +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVExportDeviceMem2(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_INT *iFd); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory2(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_INT iFd, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDstDevMemHeap, +#else + IMG_HANDLE hDstDevMemHeap, +#endif + PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo); +#endif + +typedef enum _PVRSRV_SYNCVAL_MODE_ +{ + PVRSRV_SYNCVAL_READ = IMG_TRUE, + PVRSRV_SYNCVAL_WRITE = IMG_FALSE, + +} PVRSRV_SYNCVAL_MODE, *PPVRSRV_SYNCVAL_MODE; + +typedef IMG_UINT32 PVRSRV_SYNCVAL; + +IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired); + +IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode); + +IMG_IMPORT IMG_BOOL PVRSRVTestOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired); + +IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode); + +IMG_IMPORT IMG_BOOL PVRSRVTestOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired); + +IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode); + +IMG_IMPORT PVRSRV_SYNCVAL PVRSRVGetPendingOpSyncVal(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode); + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDeviceClass(IMG_CONST PVRSRV_CONNECTION *psConnection, + PVRSRV_DEVICE_CLASS DeviceClass, + IMG_UINT32 *pui32DevCount, + IMG_UINT32 *pui32DevID); + +IMG_IMPORT +IMG_HANDLE IMG_CALLCONV PVRSRVOpenDCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_UINT32 ui32DeviceID); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseDCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection, IMG_HANDLE hDevice); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCFormats (IMG_HANDLE hDevice, + IMG_UINT32 *pui32Count, + DISPLAY_FORMAT *psFormat); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCDims (IMG_HANDLE hDevice, + IMG_UINT32 *pui32Count, + DISPLAY_FORMAT *psFormat, + DISPLAY_DIMS *psDims); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCSystemBuffer(IMG_HANDLE hDevice, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID *phBuffer); +#else + IMG_HANDLE *phBuffer); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCInfo(IMG_HANDLE hDevice, + DISPLAY_INFO* psDisplayInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDCSwapChain (IMG_HANDLE hDevice, + IMG_UINT32 ui32Flags, + DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib, + DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib, + IMG_UINT32 ui32BufferCount, + IMG_UINT32 ui32OEMFlags, + IMG_UINT32 *pui32SwapChainID, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID *phSwapChain); +#else + IMG_HANDLE *phSwapChain); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDCSwapChain (IMG_HANDLE hDevice, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hSwapChain); +#else + IMG_HANDLE hSwapChain); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstRect (IMG_HANDLE hDevice, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hSwapChain, +#else + IMG_HANDLE hSwapChain, +#endif + IMG_RECT *psDstRect); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcRect (IMG_HANDLE hDevice, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hSwapChain, +#else + IMG_HANDLE hSwapChain, +#endif + IMG_RECT *psSrcRect); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstColourKey (IMG_HANDLE hDevice, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hSwapChain, +#else + IMG_HANDLE hSwapChain, +#endif + IMG_UINT32 ui32CKColour); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcColourKey (IMG_HANDLE hDevice, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hSwapChain, +#else + IMG_HANDLE hSwapChain, +#endif + IMG_UINT32 ui32CKColour); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers(IMG_HANDLE hDevice, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hSwapChain, + IMG_SID *phBuffer); +#else + IMG_HANDLE hSwapChain, + IMG_HANDLE *phBuffer); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer (IMG_HANDLE hDevice, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hBuffer, +#else + IMG_HANDLE hBuffer, +#endif + IMG_UINT32 ui32ClipRectCount, + IMG_RECT *psClipRect, + IMG_UINT32 ui32SwapInterval, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hPrivateTag); +#else + IMG_HANDLE hPrivateTag); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCSystem (IMG_HANDLE hDevice, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hSwapChain); +#else + IMG_HANDLE hSwapChain); +#endif + + +IMG_IMPORT +IMG_HANDLE IMG_CALLCONV PVRSRVOpenBCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_UINT32 ui32DeviceID); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseBCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_HANDLE hDevice); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBufferInfo(IMG_HANDLE hDevice, + BUFFER_INFO *psBuffer); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBuffer(IMG_HANDLE hDevice, + IMG_UINT32 ui32BufferIndex, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID *phBuffer); +#else + IMG_HANDLE *phBuffer); +#endif + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpInit(IMG_CONST PVRSRV_CONNECTION *psConnection); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStartInitPhase(IMG_CONST PVRSRV_CONNECTION *psConnection); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStopInitPhase(IMG_CONST PVRSRV_CONNECTION *psConnection); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol(IMG_CONST PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hKernelSyncInfo, +#else + PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo, +#endif + IMG_BOOL bIsRead, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol2(IMG_CONST PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hKernelSyncInfo, +#else + PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo, +#endif + IMG_BOOL bIsRead); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMem(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_PVOID pvAltLinAddr, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSync(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_PVOID pvAltLinAddr, + PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpReg(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_CHAR *pszRegRegion, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Flags); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(const PVRSRV_DEV_DATA *psDevData, + IMG_CHAR *pszRegRegion, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(const PVRSRV_DEV_DATA *psDevData, + IMG_CHAR *pszRegRegion, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDDevPAddr(IMG_CONST PVRSRV_CONNECTION *psConnection, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_DEV_PHYADDR sPDDevPAddr); + +#if !defined(USE_CODE) +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST PVRSRV_DEV_DATA *psDevData, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hKernelMemInfo, +#else + IMG_HANDLE hKernelMemInfo, +#endif + IMG_DEV_PHYADDR *pPages, + IMG_UINT32 ui32NumPages, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32Length, + IMG_UINT32 ui32Flags); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetFrame(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_UINT32 ui32Frame); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpComment(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_CONST IMG_CHAR *pszComment, + IMG_BOOL bContinuous); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_BOOL bContinuous, + IMG_CONST IMG_CHAR *pszFormat, ...) +#if !defined(USE_CODE) + IMG_FORMAT_PRINTF(3, 4) +#endif +; + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_UINT32 ui32Flags, + IMG_CONST IMG_CHAR *pszFormat, ...) +#if !defined(USE_CODE) + IMG_FORMAT_PRINTF(3, 4) +#endif +; + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_CHAR *pszString, + IMG_BOOL bContinuous); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpIsCapturing(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_BOOL *pbIsCapturing); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegRead(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_CONST IMG_CHAR *pszRegRegion, + IMG_CONST IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags); + + +IMG_IMPORT +IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_CONST PVRSRV_CONNECTION *psConnection); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_UINT32 ui32RegOffset, + IMG_BOOL bLastFrame); + +IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(const IMG_CHAR *pszLibraryName); +IMG_IMPORT PVRSRV_ERROR PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv); +IMG_IMPORT PVRSRV_ERROR PVRSRVGetLibFuncAddr(IMG_HANDLE hExtDrv, const IMG_CHAR *pszFunctionName, IMG_VOID **ppvFuncAddr); + +IMG_IMPORT IMG_UINT32 PVRSRVClockus (void); +IMG_IMPORT IMG_VOID PVRSRVWaitus (IMG_UINT32 ui32Timeus); +IMG_IMPORT IMG_VOID PVRSRVReleaseThreadQuanta (void); +IMG_IMPORT IMG_UINT32 IMG_CALLCONV PVRSRVGetCurrentProcessID(void); +IMG_IMPORT IMG_CHAR * IMG_CALLCONV PVRSRVSetLocale(const IMG_CHAR *pszLocale); + + + + + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVCreateAppHintState(IMG_MODULE_ID eModuleID, + const IMG_CHAR *pszAppName, + IMG_VOID **ppvState); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeAppHintState(IMG_MODULE_ID eModuleID, + IMG_VOID *pvHintState); + +IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetAppHint(IMG_VOID *pvHintState, + const IMG_CHAR *pszHintName, + IMG_DATA_TYPE eDataType, + const IMG_VOID *pvDefault, + IMG_VOID *pvReturn); + +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMem (IMG_SIZE_T ui32Size); +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMem (IMG_SIZE_T ui32Size); +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMem (IMG_PVOID pvBase, IMG_SIZE_T uNewSize); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeUserModeMem (IMG_PVOID pvMem); +IMG_IMPORT IMG_VOID PVRSRVMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_SIZE_T ui32Size); +IMG_IMPORT IMG_VOID PVRSRVMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size); + +struct _PVRSRV_MUTEX_OPAQUE_STRUCT_; +typedef struct _PVRSRV_MUTEX_OPAQUE_STRUCT_ *PVRSRV_MUTEX_HANDLE; + +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE *phMutex); +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE hMutex); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE hMutex); + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockProcessGlobalMutex(void); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockProcessGlobalMutex(void); + + +struct _PVRSRV_SEMAPHORE_OPAQUE_STRUCT_; +typedef struct _PVRSRV_SEMAPHORE_OPAQUE_STRUCT_ *PVRSRV_SEMAPHORE_HANDLE; + + + #define IMG_SEMAPHORE_WAIT_INFINITE ((IMG_UINT64)0xFFFFFFFFFFFFFFFFull) + + +#if !defined(USE_CODE) + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVCreateSemaphore) +#endif +static INLINE PVRSRV_ERROR PVRSRVCreateSemaphore(PVRSRV_SEMAPHORE_HANDLE *phSemaphore, IMG_INT iInitialCount) +{ + PVR_UNREFERENCED_PARAMETER(iInitialCount); + *phSemaphore = 0; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVDestroySemaphore) +#endif +static INLINE PVRSRV_ERROR PVRSRVDestroySemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore) +{ + PVR_UNREFERENCED_PARAMETER(hSemaphore); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVWaitSemaphore) +#endif +static INLINE PVRSRV_ERROR PVRSRVWaitSemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore, IMG_UINT64 ui64TimeoutMicroSeconds) +{ + PVR_UNREFERENCED_PARAMETER(hSemaphore); + PVR_UNREFERENCED_PARAMETER(ui64TimeoutMicroSeconds); + return PVRSRV_ERROR_INVALID_PARAMS; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVPostSemaphore) +#endif +static INLINE IMG_VOID PVRSRVPostSemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore, IMG_INT iPostCount) +{ + PVR_UNREFERENCED_PARAMETER(hSemaphore); + PVR_UNREFERENCED_PARAMETER(iPostCount); +} + +#endif + + +#if (defined(DEBUG) && defined(__linux__)) +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber); + +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber); + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeUserModeMemTracking(IMG_VOID *pvMem); + +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, IMG_SIZE_T ui32NewSize, + IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber); +#endif + +IMG_IMPORT PVRSRV_ERROR PVRSRVEventObjectWait(const PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + IMG_EVENTSID hOSEvent); +#else + IMG_HANDLE hOSEvent); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateSyncInfoModObj(const PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID *phKernelSyncInfoModObj); +#else + IMG_HANDLE *phKernelSyncInfoModObj); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroySyncInfoModObj(const PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hKernelSyncInfoModObj); +#else + IMG_HANDLE hKernelSyncInfoModObj); +#endif + + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyPendingSyncOps(const PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hKernelSyncInfoModObj, +#else + IMG_HANDLE hKernelSyncInfoModObj, +#endif + PVRSRV_CLIENT_SYNC_INFO *psSyncInfo, + IMG_UINT32 ui32ModifyFlags, + IMG_UINT32 *pui32ReadOpsPending, + IMG_UINT32 *pui32WriteOpsPending); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyCompleteSyncOps(const PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hKernelSyncInfoModObj); +#else + IMG_HANDLE hKernelSyncInfoModObj); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsTakeToken(const PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + const IMG_SID hKernelSyncInfo, +#else + const PVRSRV_CLIENT_SYNC_INFO *psSyncInfo, +#endif + PVRSRV_SYNC_TOKEN *psSyncToken); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsFlushToToken(const PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + const IMG_SID hKernelSyncInfo, +#else + const PVRSRV_CLIENT_SYNC_INFO *psSyncInfo, +#endif + const PVRSRV_SYNC_TOKEN *psSyncToken, + IMG_BOOL bWait); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsFlushToModObj(const PVRSRV_CONNECTION *psConnection, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hKernelSyncInfoModObj, +#else + IMG_HANDLE hKernelSyncInfoModObj, +#endif + IMG_BOOL bWait); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsFlushToDelta(const PVRSRV_CONNECTION *psConnection, + PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo, + IMG_UINT32 ui32Delta, + IMG_BOOL bWait); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_SYNC_INFO **ppsSyncInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_SYNC_INFO *psSyncInfo); + +IMG_IMPORT +const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError); + + +#define TIME_NOT_PASSED_UINT32(a,b,c) (((a) - (b)) < (c)) + +#if defined (__cplusplus) +} +#endif +#endif + diff --git a/sys/pvr2d/pvr_includes/servicesext.h b/sys/pvr2d/pvr_includes/servicesext.h new file mode 100644 index 0000000..d326245 --- /dev/null +++ b/sys/pvr2d/pvr_includes/servicesext.h @@ -0,0 +1,855 @@ +/********************************************************************** +* +* Copyright(c) Imagination Technologies Ltd. +* +* The contents of this file are subject to the MIT license as set out below. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +* OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +* +* This License is also included in this distribution in the file called +* "COPYING". +* +******************************************************************************/ + + + +#if !defined (__SERVICESEXT_H__) +#define __SERVICESEXT_H__ + +#define PVRSRV_LOCKFLG_READONLY (1) + +typedef enum _PVRSRV_ERROR_ +{ + PVRSRV_OK = 0, + PVRSRV_ERROR_OUT_OF_MEMORY, + PVRSRV_ERROR_TOO_FEW_BUFFERS, + PVRSRV_ERROR_INVALID_PARAMS, + PVRSRV_ERROR_INIT_FAILURE, + PVRSRV_ERROR_CANT_REGISTER_CALLBACK, + PVRSRV_ERROR_INVALID_DEVICE, + PVRSRV_ERROR_NOT_OWNER, + PVRSRV_ERROR_BAD_MAPPING, + PVRSRV_ERROR_TIMEOUT, + PVRSRV_ERROR_FLIP_CHAIN_EXISTS, + PVRSRV_ERROR_INVALID_SWAPINTERVAL, + PVRSRV_ERROR_SCENE_INVALID, + PVRSRV_ERROR_STREAM_ERROR, + PVRSRV_ERROR_FAILED_DEPENDENCIES, + PVRSRV_ERROR_CMD_NOT_PROCESSED, + PVRSRV_ERROR_CMD_TOO_BIG, + PVRSRV_ERROR_DEVICE_REGISTER_FAILED, + PVRSRV_ERROR_TOOMANYBUFFERS, + PVRSRV_ERROR_NOT_SUPPORTED, + PVRSRV_ERROR_PROCESSING_BLOCKED, + + PVRSRV_ERROR_CANNOT_FLUSH_QUEUE, + PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE, + PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS, + PVRSRV_ERROR_RETRY, + + PVRSRV_ERROR_DDK_VERSION_MISMATCH, + PVRSRV_ERROR_BUILD_MISMATCH, + PVRSRV_ERROR_CORE_REVISION_MISMATCH, + + PVRSRV_ERROR_UPLOAD_TOO_BIG, + + PVRSRV_ERROR_INVALID_FLAGS, + PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS, + + PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY, + PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR, + PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED, + + PVRSRV_ERROR_BRIDGE_CALL_FAILED, + PVRSRV_ERROR_IOCTL_CALL_FAILED, + + PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND, + PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND, + PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT, + + PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND, + PVRSRV_ERROR_PCI_CALL_FAILED, + PVRSRV_ERROR_PCI_REGION_TOO_SMALL, + PVRSRV_ERROR_PCI_REGION_UNAVAILABLE, + PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH, + + PVRSRV_ERROR_REGISTER_BASE_NOT_SET, + + PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM, + PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY, + PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC, + PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR, + + PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY, + PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY, + + PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES, + PVRSRV_ERROR_FAILED_TO_FREE_PAGES, + PVRSRV_ERROR_FAILED_TO_COPY_PAGES, + PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES, + PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES, + PVRSRV_ERROR_STILL_MAPPED, + PVRSRV_ERROR_MAPPING_NOT_FOUND, + PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT, + PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE, + + PVRSRV_ERROR_INVALID_SEGMENT_BLOCK, + PVRSRV_ERROR_INVALID_SGXDEVDATA, + PVRSRV_ERROR_INVALID_DEVINFO, + PVRSRV_ERROR_INVALID_MEMINFO, + PVRSRV_ERROR_INVALID_MISCINFO, + PVRSRV_ERROR_UNKNOWN_IOCTL, + PVRSRV_ERROR_INVALID_CONTEXT, + PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT, + PVRSRV_ERROR_INVALID_HEAP, + PVRSRV_ERROR_INVALID_KERNELINFO, + PVRSRV_ERROR_UNKNOWN_POWER_STATE, + PVRSRV_ERROR_INVALID_HANDLE_TYPE, + PVRSRV_ERROR_INVALID_WRAP_TYPE, + PVRSRV_ERROR_INVALID_PHYS_ADDR, + PVRSRV_ERROR_INVALID_CPU_ADDR, + PVRSRV_ERROR_INVALID_HEAPINFO, + PVRSRV_ERROR_INVALID_PERPROC, + PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO, + PVRSRV_ERROR_INVALID_MAP_REQUEST, + PVRSRV_ERROR_INVALID_UNMAP_REQUEST, + PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP, + PVRSRV_ERROR_MAPPING_STILL_IN_USE, + + PVRSRV_ERROR_EXCEEDED_HW_LIMITS, + PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED, + + PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA, + PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT, + PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT, + PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT, + PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT, + PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD, + PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD, + PVRSRV_ERROR_THREAD_READ_ERROR, + PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER, + PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR, + PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR, + PVRSRV_ERROR_ISR_ALREADY_INSTALLED, + PVRSRV_ERROR_ISR_NOT_INSTALLED, + PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT, + PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO, + PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT, + PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES, + PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT, + PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE, + + PVRSRV_ERROR_INVALID_CCB_COMMAND, + + PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE, + PVRSRV_ERROR_INVALID_LOCK_ID, + PVRSRV_ERROR_RESOURCE_NOT_LOCKED, + + PVRSRV_ERROR_FLIP_FAILED, + PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED, + + PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE, + + PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED, + PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG, + PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG, + PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG, + + PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID, + + PVRSRV_ERROR_BLIT_SETUP_FAILED, + + PVRSRV_ERROR_PDUMP_NOT_AVAILABLE, + PVRSRV_ERROR_PDUMP_BUFFER_FULL, + PVRSRV_ERROR_PDUMP_BUF_OVERFLOW, + PVRSRV_ERROR_PDUMP_NOT_ACTIVE, + PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES, + + PVRSRV_ERROR_MUTEX_DESTROY_FAILED, + PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR, + + PVRSRV_ERROR_INSUFFICIENT_SCRIPT_SPACE, + PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND, + + PVRSRV_ERROR_PROCESS_NOT_INITIALISED, + PVRSRV_ERROR_PROCESS_NOT_FOUND, + PVRSRV_ERROR_SRV_CONNECT_FAILED, + PVRSRV_ERROR_SRV_DISCONNECT_FAILED, + PVRSRV_ERROR_DEINT_PHASE_FAILED, + PVRSRV_ERROR_INIT2_PHASE_FAILED, + + PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE, + + PVRSRV_ERROR_NO_DC_DEVICES_FOUND, + PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE, + PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE, + PVRSRV_ERROR_NO_DEVICEDATA_FOUND, + PVRSRV_ERROR_NO_DEVICENODE_FOUND, + PVRSRV_ERROR_NO_CLIENTNODE_FOUND, + PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE, + + PVRSRV_ERROR_UNABLE_TO_INIT_TASK, + PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK, + PVRSRV_ERROR_UNABLE_TO_KILL_TASK, + + PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER, + PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER, + PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER, + + PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT, + PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION, + + PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE, + PVRSRV_ERROR_HANDLE_NOT_ALLOCATED, + PVRSRV_ERROR_HANDLE_TYPE_MISMATCH, + PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE, + PVRSRV_ERROR_HANDLE_NOT_SHAREABLE, + PVRSRV_ERROR_HANDLE_NOT_FOUND, + PVRSRV_ERROR_INVALID_SUBHANDLE, + PVRSRV_ERROR_HANDLE_BATCH_IN_USE, + PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE, + + PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE, + PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED, + + PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE, + PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP, + + PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE, + + PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVALIABLE, + PVRSRV_ERROR_INVALID_DEVICEID, + PVRSRV_ERROR_DEVICEID_NOT_FOUND, + + PVRSRV_ERROR_MEMORY_TEST_FAILED, + PVRSRV_ERROR_CPUPADDR_TEST_FAILED, + PVRSRV_ERROR_COPY_TEST_FAILED, + + PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED, + + PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK, + PVRSRV_ERROR_CLOCK_REQUEST_FAILED, + PVRSRV_ERROR_DISABLE_CLOCK_FAILURE, + PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE, + PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE, + PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK, + PVRSRV_ERROR_UNABLE_TO_GET_CLOCK, + PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK, + PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK, + + PVRSRV_ERROR_UNKNOWN_SGL_ERROR, + + PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE, + PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE, + + PVRSRV_ERROR_BAD_SYNC_STATE, + + PVRSRV_ERROR_CACHEOP_FAILED, + + PVRSRV_ERROR_FORCE_I32 = 0x7fffffff + +} PVRSRV_ERROR; + + +typedef enum _PVRSRV_DEVICE_CLASS_ +{ + PVRSRV_DEVICE_CLASS_3D = 0 , + PVRSRV_DEVICE_CLASS_DISPLAY = 1 , + PVRSRV_DEVICE_CLASS_BUFFER = 2 , + PVRSRV_DEVICE_CLASS_VIDEO = 3 , + + PVRSRV_DEVICE_CLASS_FORCE_I32 = 0x7fffffff + +} PVRSRV_DEVICE_CLASS; + + +typedef enum _PVRSRV_SYS_POWER_STATE_ +{ + PVRSRV_SYS_POWER_STATE_Unspecified = -1, + PVRSRV_SYS_POWER_STATE_D0 = 0, + PVRSRV_SYS_POWER_STATE_D1 = 1, + PVRSRV_SYS_POWER_STATE_D2 = 2, + PVRSRV_SYS_POWER_STATE_D3 = 3, + PVRSRV_SYS_POWER_STATE_D4 = 4, + + PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff + +} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE; + + +typedef enum _PVRSRV_DEV_POWER_STATE_ +{ + PVRSRV_DEV_POWER_STATE_DEFAULT = -1, + PVRSRV_DEV_POWER_STATE_ON = 0, + PVRSRV_DEV_POWER_STATE_IDLE = 1, + PVRSRV_DEV_POWER_STATE_OFF = 2, + + PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff + +} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE; + + +typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); +typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); +typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + + +typedef enum _PVRSRV_PIXEL_FORMAT_ { + + PVRSRV_PIXEL_FORMAT_UNKNOWN = 0, + PVRSRV_PIXEL_FORMAT_RGB565 = 1, + PVRSRV_PIXEL_FORMAT_RGB555 = 2, + PVRSRV_PIXEL_FORMAT_RGB888 = 3, + PVRSRV_PIXEL_FORMAT_BGR888 = 4, + PVRSRV_PIXEL_FORMAT_GREY_SCALE = 8, + PVRSRV_PIXEL_FORMAT_PAL12 = 13, + PVRSRV_PIXEL_FORMAT_PAL8 = 14, + PVRSRV_PIXEL_FORMAT_PAL4 = 15, + PVRSRV_PIXEL_FORMAT_PAL2 = 16, + PVRSRV_PIXEL_FORMAT_PAL1 = 17, + PVRSRV_PIXEL_FORMAT_ARGB1555 = 18, + PVRSRV_PIXEL_FORMAT_ARGB4444 = 19, + PVRSRV_PIXEL_FORMAT_ARGB8888 = 20, + PVRSRV_PIXEL_FORMAT_ABGR8888 = 21, + PVRSRV_PIXEL_FORMAT_YV12 = 22, + PVRSRV_PIXEL_FORMAT_I420 = 23, + PVRSRV_PIXEL_FORMAT_IMC2 = 25, + PVRSRV_PIXEL_FORMAT_XRGB8888 = 26, + PVRSRV_PIXEL_FORMAT_XBGR8888 = 27, + PVRSRV_PIXEL_FORMAT_BGRA8888 = 28, + PVRSRV_PIXEL_FORMAT_XRGB4444 = 29, + PVRSRV_PIXEL_FORMAT_ARGB8332 = 30, + PVRSRV_PIXEL_FORMAT_A2RGB10 = 31, + PVRSRV_PIXEL_FORMAT_A2BGR10 = 32, + PVRSRV_PIXEL_FORMAT_P8 = 33, + PVRSRV_PIXEL_FORMAT_L8 = 34, + PVRSRV_PIXEL_FORMAT_A8L8 = 35, + PVRSRV_PIXEL_FORMAT_A4L4 = 36, + PVRSRV_PIXEL_FORMAT_L16 = 37, + PVRSRV_PIXEL_FORMAT_L6V5U5 = 38, + PVRSRV_PIXEL_FORMAT_V8U8 = 39, + PVRSRV_PIXEL_FORMAT_V16U16 = 40, + PVRSRV_PIXEL_FORMAT_QWVU8888 = 41, + PVRSRV_PIXEL_FORMAT_XLVU8888 = 42, + PVRSRV_PIXEL_FORMAT_QWVU16 = 43, + PVRSRV_PIXEL_FORMAT_D16 = 44, + PVRSRV_PIXEL_FORMAT_D24S8 = 45, + PVRSRV_PIXEL_FORMAT_D24X8 = 46, + + + PVRSRV_PIXEL_FORMAT_ABGR16 = 47, + PVRSRV_PIXEL_FORMAT_ABGR16F = 48, + PVRSRV_PIXEL_FORMAT_ABGR32 = 49, + PVRSRV_PIXEL_FORMAT_ABGR32F = 50, + PVRSRV_PIXEL_FORMAT_B10GR11 = 51, + PVRSRV_PIXEL_FORMAT_GR88 = 52, + PVRSRV_PIXEL_FORMAT_BGR32 = 53, + PVRSRV_PIXEL_FORMAT_GR32 = 54, + PVRSRV_PIXEL_FORMAT_E5BGR9 = 55, + + + PVRSRV_PIXEL_FORMAT_RESERVED1 = 56, + PVRSRV_PIXEL_FORMAT_RESERVED2 = 57, + PVRSRV_PIXEL_FORMAT_RESERVED3 = 58, + PVRSRV_PIXEL_FORMAT_RESERVED4 = 59, + PVRSRV_PIXEL_FORMAT_RESERVED5 = 60, + + + PVRSRV_PIXEL_FORMAT_R8G8_B8G8 = 61, + PVRSRV_PIXEL_FORMAT_G8R8_G8B8 = 62, + + + PVRSRV_PIXEL_FORMAT_NV11 = 63, + PVRSRV_PIXEL_FORMAT_NV12 = 64, + + + PVRSRV_PIXEL_FORMAT_YUY2 = 65, + PVRSRV_PIXEL_FORMAT_YUV420 = 66, + PVRSRV_PIXEL_FORMAT_YUV444 = 67, + PVRSRV_PIXEL_FORMAT_VUY444 = 68, + PVRSRV_PIXEL_FORMAT_YUYV = 69, + PVRSRV_PIXEL_FORMAT_YVYU = 70, + PVRSRV_PIXEL_FORMAT_UYVY = 71, + PVRSRV_PIXEL_FORMAT_VYUY = 72, + + PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY = 73, + PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV = 74, + PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU = 75, + PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY = 76, + PVRSRV_PIXEL_FORMAT_FOURCC_ORG_AYUV = 77, + + + PVRSRV_PIXEL_FORMAT_A32B32G32R32 = 78, + PVRSRV_PIXEL_FORMAT_A32B32G32R32F = 79, + PVRSRV_PIXEL_FORMAT_A32B32G32R32_UINT = 80, + PVRSRV_PIXEL_FORMAT_A32B32G32R32_SINT = 81, + + + PVRSRV_PIXEL_FORMAT_B32G32R32 = 82, + PVRSRV_PIXEL_FORMAT_B32G32R32F = 83, + PVRSRV_PIXEL_FORMAT_B32G32R32_UINT = 84, + PVRSRV_PIXEL_FORMAT_B32G32R32_SINT = 85, + + + PVRSRV_PIXEL_FORMAT_G32R32 = 86, + PVRSRV_PIXEL_FORMAT_G32R32F = 87, + PVRSRV_PIXEL_FORMAT_G32R32_UINT = 88, + PVRSRV_PIXEL_FORMAT_G32R32_SINT = 89, + + + PVRSRV_PIXEL_FORMAT_D32F = 90, + PVRSRV_PIXEL_FORMAT_R32 = 91, + PVRSRV_PIXEL_FORMAT_R32F = 92, + PVRSRV_PIXEL_FORMAT_R32_UINT = 93, + PVRSRV_PIXEL_FORMAT_R32_SINT = 94, + + + PVRSRV_PIXEL_FORMAT_A16B16G16R16 = 95, + PVRSRV_PIXEL_FORMAT_A16B16G16R16F = 96, + PVRSRV_PIXEL_FORMAT_A16B16G16R16_SINT = 97, + PVRSRV_PIXEL_FORMAT_A16B16G16R16_SNORM = 98, + PVRSRV_PIXEL_FORMAT_A16B16G16R16_UINT = 99, + PVRSRV_PIXEL_FORMAT_A16B16G16R16_UNORM = 100, + + + PVRSRV_PIXEL_FORMAT_G16R16 = 101, + PVRSRV_PIXEL_FORMAT_G16R16F = 102, + PVRSRV_PIXEL_FORMAT_G16R16_UINT = 103, + PVRSRV_PIXEL_FORMAT_G16R16_UNORM = 104, + PVRSRV_PIXEL_FORMAT_G16R16_SINT = 105, + PVRSRV_PIXEL_FORMAT_G16R16_SNORM = 106, + + + PVRSRV_PIXEL_FORMAT_R16 = 107, + PVRSRV_PIXEL_FORMAT_R16F = 108, + PVRSRV_PIXEL_FORMAT_R16_UINT = 109, + PVRSRV_PIXEL_FORMAT_R16_UNORM = 110, + PVRSRV_PIXEL_FORMAT_R16_SINT = 111, + PVRSRV_PIXEL_FORMAT_R16_SNORM = 112, + + + PVRSRV_PIXEL_FORMAT_X8R8G8B8 = 113, + PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM = 114, + PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM_SRGB = 115, + + PVRSRV_PIXEL_FORMAT_A8R8G8B8 = 116, + PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM = 117, + PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM_SRGB = 118, + + PVRSRV_PIXEL_FORMAT_A8B8G8R8 = 119, + PVRSRV_PIXEL_FORMAT_A8B8G8R8_UINT = 120, + PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM = 121, + PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM_SRGB = 122, + PVRSRV_PIXEL_FORMAT_A8B8G8R8_SINT = 123, + PVRSRV_PIXEL_FORMAT_A8B8G8R8_SNORM = 124, + + + PVRSRV_PIXEL_FORMAT_G8R8 = 125, + PVRSRV_PIXEL_FORMAT_G8R8_UINT = 126, + PVRSRV_PIXEL_FORMAT_G8R8_UNORM = 127, + PVRSRV_PIXEL_FORMAT_G8R8_SINT = 128, + PVRSRV_PIXEL_FORMAT_G8R8_SNORM = 129, + + + PVRSRV_PIXEL_FORMAT_A8 = 130, + PVRSRV_PIXEL_FORMAT_R8 = 131, + PVRSRV_PIXEL_FORMAT_R8_UINT = 132, + PVRSRV_PIXEL_FORMAT_R8_UNORM = 133, + PVRSRV_PIXEL_FORMAT_R8_SINT = 134, + PVRSRV_PIXEL_FORMAT_R8_SNORM = 135, + + + PVRSRV_PIXEL_FORMAT_A2B10G10R10 = 136, + PVRSRV_PIXEL_FORMAT_A2B10G10R10_UNORM = 137, + PVRSRV_PIXEL_FORMAT_A2B10G10R10_UINT = 138, + + + PVRSRV_PIXEL_FORMAT_B10G11R11 = 139, + PVRSRV_PIXEL_FORMAT_B10G11R11F = 140, + + + PVRSRV_PIXEL_FORMAT_X24G8R32 = 141, + PVRSRV_PIXEL_FORMAT_G8R24 = 142, + PVRSRV_PIXEL_FORMAT_X8R24 = 143, + PVRSRV_PIXEL_FORMAT_E5B9G9R9 = 144, + PVRSRV_PIXEL_FORMAT_R1 = 145, + + PVRSRV_PIXEL_FORMAT_RESERVED6 = 146, + PVRSRV_PIXEL_FORMAT_RESERVED7 = 147, + PVRSRV_PIXEL_FORMAT_RESERVED8 = 148, + PVRSRV_PIXEL_FORMAT_RESERVED9 = 149, + PVRSRV_PIXEL_FORMAT_RESERVED10 = 150, + PVRSRV_PIXEL_FORMAT_RESERVED11 = 151, + PVRSRV_PIXEL_FORMAT_RESERVED12 = 152, + PVRSRV_PIXEL_FORMAT_RESERVED13 = 153, + PVRSRV_PIXEL_FORMAT_RESERVED14 = 154, + PVRSRV_PIXEL_FORMAT_RESERVED15 = 155, + PVRSRV_PIXEL_FORMAT_RESERVED16 = 156, + PVRSRV_PIXEL_FORMAT_RESERVED17 = 157, + PVRSRV_PIXEL_FORMAT_RESERVED18 = 158, + PVRSRV_PIXEL_FORMAT_RESERVED19 = 159, + PVRSRV_PIXEL_FORMAT_RESERVED20 = 160, + + + PVRSRV_PIXEL_FORMAT_UBYTE4 = 161, + PVRSRV_PIXEL_FORMAT_SHORT4 = 162, + PVRSRV_PIXEL_FORMAT_SHORT4N = 163, + PVRSRV_PIXEL_FORMAT_USHORT4N = 164, + PVRSRV_PIXEL_FORMAT_SHORT2N = 165, + PVRSRV_PIXEL_FORMAT_SHORT2 = 166, + PVRSRV_PIXEL_FORMAT_USHORT2N = 167, + PVRSRV_PIXEL_FORMAT_UDEC3 = 168, + PVRSRV_PIXEL_FORMAT_DEC3N = 169, + PVRSRV_PIXEL_FORMAT_F16_2 = 170, + PVRSRV_PIXEL_FORMAT_F16_4 = 171, + + + PVRSRV_PIXEL_FORMAT_L_F16 = 172, + PVRSRV_PIXEL_FORMAT_L_F16_REP = 173, + PVRSRV_PIXEL_FORMAT_L_F16_A_F16 = 174, + PVRSRV_PIXEL_FORMAT_A_F16 = 175, + PVRSRV_PIXEL_FORMAT_B16G16R16F = 176, + + PVRSRV_PIXEL_FORMAT_L_F32 = 177, + PVRSRV_PIXEL_FORMAT_A_F32 = 178, + PVRSRV_PIXEL_FORMAT_L_F32_A_F32 = 179, + + + PVRSRV_PIXEL_FORMAT_PVRTC2 = 180, + PVRSRV_PIXEL_FORMAT_PVRTC4 = 181, + PVRSRV_PIXEL_FORMAT_PVRTCII2 = 182, + PVRSRV_PIXEL_FORMAT_PVRTCII4 = 183, + PVRSRV_PIXEL_FORMAT_PVRTCIII = 184, + PVRSRV_PIXEL_FORMAT_PVRO8 = 185, + PVRSRV_PIXEL_FORMAT_PVRO88 = 186, + PVRSRV_PIXEL_FORMAT_PT1 = 187, + PVRSRV_PIXEL_FORMAT_PT2 = 188, + PVRSRV_PIXEL_FORMAT_PT4 = 189, + PVRSRV_PIXEL_FORMAT_PT8 = 190, + PVRSRV_PIXEL_FORMAT_PTW = 191, + PVRSRV_PIXEL_FORMAT_PTB = 192, + PVRSRV_PIXEL_FORMAT_MONO8 = 193, + PVRSRV_PIXEL_FORMAT_MONO16 = 194, + + + PVRSRV_PIXEL_FORMAT_C0_YUYV = 195, + PVRSRV_PIXEL_FORMAT_C0_UYVY = 196, + PVRSRV_PIXEL_FORMAT_C0_YVYU = 197, + PVRSRV_PIXEL_FORMAT_C0_VYUY = 198, + PVRSRV_PIXEL_FORMAT_C1_YUYV = 199, + PVRSRV_PIXEL_FORMAT_C1_UYVY = 200, + PVRSRV_PIXEL_FORMAT_C1_YVYU = 201, + PVRSRV_PIXEL_FORMAT_C1_VYUY = 202, + + + PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_UV = 203, + PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_VU = 204, + PVRSRV_PIXEL_FORMAT_C0_YUV420_3P = 205, + PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_UV = 206, + PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_VU = 207, + PVRSRV_PIXEL_FORMAT_C1_YUV420_3P = 208, + + PVRSRV_PIXEL_FORMAT_A2B10G10R10F = 209, + PVRSRV_PIXEL_FORMAT_B8G8R8_SINT = 210, + PVRSRV_PIXEL_FORMAT_PVRF32SIGNMASK = 211, + + PVRSRV_PIXEL_FORMAT_ABGR4444 = 212, + PVRSRV_PIXEL_FORMAT_ABGR1555 = 213, + PVRSRV_PIXEL_FORMAT_BGR565 = 214, + + PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff + +} PVRSRV_PIXEL_FORMAT; + +typedef enum _PVRSRV_ALPHA_FORMAT_ { + PVRSRV_ALPHA_FORMAT_UNKNOWN = 0x00000000, + PVRSRV_ALPHA_FORMAT_PRE = 0x00000001, + PVRSRV_ALPHA_FORMAT_NONPRE = 0x00000002, + PVRSRV_ALPHA_FORMAT_MASK = 0x0000000F, +} PVRSRV_ALPHA_FORMAT; + +typedef enum _PVRSRV_COLOURSPACE_FORMAT_ { + PVRSRV_COLOURSPACE_FORMAT_UNKNOWN = 0x00000000, + PVRSRV_COLOURSPACE_FORMAT_LINEAR = 0x00010000, + PVRSRV_COLOURSPACE_FORMAT_NONLINEAR = 0x00020000, + PVRSRV_COLOURSPACE_FORMAT_MASK = 0x000F0000, +} PVRSRV_COLOURSPACE_FORMAT; + + +typedef enum _PVRSRV_ROTATION_ { + PVRSRV_ROTATE_0 = 0, + PVRSRV_ROTATE_90 = 1, + PVRSRV_ROTATE_180 = 2, + PVRSRV_ROTATE_270 = 3, + PVRSRV_FLIP_Y + +} PVRSRV_ROTATION; + +#define PVRSRV_CREATE_SWAPCHAIN_SHARED (1<<0) +#define PVRSRV_CREATE_SWAPCHAIN_QUERY (1<<1) +#define PVRSRV_CREATE_SWAPCHAIN_OEMOVERLAY (1<<2) + +typedef struct _PVRSRV_SYNC_DATA_ +{ + + IMG_UINT32 ui32WriteOpsPending; + volatile IMG_UINT32 ui32WriteOpsComplete; + + + IMG_UINT32 ui32ReadOpsPending; + volatile IMG_UINT32 ui32ReadOpsComplete; + + + IMG_UINT32 ui32LastOpDumpVal; + IMG_UINT32 ui32LastReadOpDumpVal; + +} PVRSRV_SYNC_DATA; + +typedef struct _PVRSRV_CLIENT_SYNC_INFO_ +{ + + PVRSRV_SYNC_DATA *psSyncData; + + + + + + IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr; + + + IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr; + + + IMG_HANDLE hMappingInfo; + + + IMG_HANDLE hKernelSyncInfo; + +} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO; + +typedef struct PVRSRV_RESOURCE_TAG +{ + volatile IMG_UINT32 ui32Lock; + IMG_UINT32 ui32ID; +}PVRSRV_RESOURCE; +typedef PVRSRV_RESOURCE PVRSRV_RES_HANDLE; + + +typedef IMG_VOID (*PFN_CMD_COMPLETE) (IMG_HANDLE); +typedef IMG_VOID (**PPFN_CMD_COMPLETE) (IMG_HANDLE); + +typedef IMG_BOOL (*PFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*); +typedef IMG_BOOL (**PPFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*); + + +typedef struct _IMG_RECT_ +{ + IMG_INT32 x0; + IMG_INT32 y0; + IMG_INT32 x1; + IMG_INT32 y1; +}IMG_RECT; + +typedef struct _IMG_RECT_16_ +{ + IMG_INT16 x0; + IMG_INT16 y0; + IMG_INT16 x1; + IMG_INT16 y1; +}IMG_RECT_16; + + +typedef PVRSRV_ERROR (*PFN_GET_BUFFER_ADDR)(IMG_HANDLE, + IMG_HANDLE, + IMG_SYS_PHYADDR**, + IMG_SIZE_T*, + IMG_VOID**, + IMG_HANDLE*, + IMG_BOOL*, + IMG_UINT32*); + + +typedef struct DISPLAY_DIMS_TAG +{ + IMG_UINT32 ui32ByteStride; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32Height; +} DISPLAY_DIMS; + + +typedef struct DISPLAY_FORMAT_TAG +{ + + PVRSRV_PIXEL_FORMAT pixelformat; +} DISPLAY_FORMAT; + +typedef struct DISPLAY_SURF_ATTRIBUTES_TAG +{ + + PVRSRV_PIXEL_FORMAT pixelformat; + + DISPLAY_DIMS sDims; +} DISPLAY_SURF_ATTRIBUTES; + + +typedef struct DISPLAY_MODE_INFO_TAG +{ + + PVRSRV_PIXEL_FORMAT pixelformat; + + DISPLAY_DIMS sDims; + + IMG_UINT32 ui32RefreshHZ; + + IMG_UINT32 ui32OEMFlags; +} DISPLAY_MODE_INFO; + + + +#define MAX_DISPLAY_NAME_SIZE (50) + +typedef struct DISPLAY_INFO_TAG +{ + + IMG_UINT32 ui32MaxSwapChains; + + IMG_UINT32 ui32MaxSwapChainBuffers; + + IMG_UINT32 ui32MinSwapInterval; + + IMG_UINT32 ui32MaxSwapInterval; + + IMG_UINT32 ui32PhysicalWidthmm; + IMG_UINT32 ui32PhysicalHeightmm; + + IMG_CHAR szDisplayName[MAX_DISPLAY_NAME_SIZE]; +#if defined(SUPPORT_HW_CURSOR) + + IMG_UINT16 ui32CursorWidth; + IMG_UINT16 ui32CursorHeight; +#endif +} DISPLAY_INFO; + +typedef struct ACCESS_INFO_TAG +{ + IMG_UINT32 ui32Size; + IMG_UINT32 ui32FBPhysBaseAddress; + IMG_UINT32 ui32FBMemAvailable; + IMG_UINT32 ui32SysPhysBaseAddress; + IMG_UINT32 ui32SysSize; + IMG_UINT32 ui32DevIRQ; +}ACCESS_INFO; + + +typedef struct PVRSRV_CURSOR_SHAPE_TAG +{ + IMG_UINT16 ui16Width; + IMG_UINT16 ui16Height; + IMG_INT16 i16XHot; + IMG_INT16 i16YHot; + + + IMG_VOID* pvMask; + IMG_INT16 i16MaskByteStride; + + + IMG_VOID* pvColour; + IMG_INT16 i16ColourByteStride; + PVRSRV_PIXEL_FORMAT eColourPixelFormat; +} PVRSRV_CURSOR_SHAPE; + +#define PVRSRV_SET_CURSOR_VISIBILITY (1<<0) +#define PVRSRV_SET_CURSOR_POSITION (1<<1) +#define PVRSRV_SET_CURSOR_SHAPE (1<<2) +#define PVRSRV_SET_CURSOR_ROTATION (1<<3) + +typedef struct PVRSRV_CURSOR_INFO_TAG +{ + + IMG_UINT32 ui32Flags; + + + IMG_BOOL bVisible; + + + IMG_INT16 i16XPos; + IMG_INT16 i16YPos; + + + PVRSRV_CURSOR_SHAPE sCursorShape; + + + IMG_UINT32 ui32Rotation; + +} PVRSRV_CURSOR_INFO; + + +typedef struct _PVRSRV_REGISTRY_INFO_ +{ + IMG_UINT32 ui32DevCookie; + IMG_PCHAR pszKey; + IMG_PCHAR pszValue; + IMG_PCHAR pszBuf; + IMG_UINT32 ui32BufSize; +} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO; + + +PVRSRV_ERROR IMG_CALLCONV PVRSRVReadRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo); +PVRSRV_ERROR IMG_CALLCONV PVRSRVWriteRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo); + + +#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE (0 << 0) +#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE (1 << 0) + +#define PVRSRV_BC_FLAGS_YUVCSC_BT601 (0 << 1) +#define PVRSRV_BC_FLAGS_YUVCSC_BT709 (1 << 1) + +#define MAX_BUFFER_DEVICE_NAME_SIZE (50) + +typedef struct BUFFER_INFO_TAG +{ + IMG_UINT32 ui32BufferCount; + IMG_UINT32 ui32BufferDeviceID; + PVRSRV_PIXEL_FORMAT pixelformat; + IMG_UINT32 ui32ByteStride; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32Height; + IMG_UINT32 ui32Flags; + IMG_CHAR szDeviceName[MAX_BUFFER_DEVICE_NAME_SIZE]; +} BUFFER_INFO; + +typedef enum _OVERLAY_DEINTERLACE_MODE_ +{ + WEAVE=0x0, + BOB_ODD, + BOB_EVEN, + BOB_EVEN_NONINTERLEAVED +} OVERLAY_DEINTERLACE_MODE; + +#endif diff --git a/sys/pvr2d/pvr_includes/wsegl.h b/sys/pvr2d/pvr_includes/wsegl.h new file mode 100644 index 0000000..e5191ec --- /dev/null +++ b/sys/pvr2d/pvr_includes/wsegl.h @@ -0,0 +1,285 @@ +/********************************************************************** +* +* Copyright(c) Imagination Technologies Ltd. +* +* The contents of this file are subject to the MIT license as set out below. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +* OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +* +* This License is also included in this distribution in the file called +* "COPYING". +* +******************************************************************************/ + + + +#if !defined(__WSEGL_H__) +#define __WSEGL_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +// WSEGL Platform-specific definitions +*/ +#if defined(__linux__) +#define WSEGL_EXPORT __attribute__((visibility("default"))) +#define WSEGL_IMPORT +#else +#define WSEGL_EXPORT +#define WSEGL_IMPORT +#endif + +/* +// WSEGL API Version Number +*/ + +#define WSEGL_VERSION 2 +#define WSEGL_DEFAULT_DISPLAY 0 +#define WSEGL_DEFAULT_NATIVE_ENGINE 0 + +#define WSEGL_FALSE 0 +#define WSEGL_TRUE 1 +#define WSEGL_NULL 0 + +#define WSEGL_UNREFERENCED_PARAMETER(param) (param) = (param) + +/* +// WSEGL handles +*/ +typedef void *WSEGLDisplayHandle; +typedef void *WSEGLDrawableHandle; + +/* +// Display capability type +*/ +typedef enum WSEGLCapsType_TAG +{ + WSEGL_NO_CAPS = 0, + WSEGL_CAP_MIN_SWAP_INTERVAL = 1, /* System default value = 1 */ + WSEGL_CAP_MAX_SWAP_INTERVAL = 2, /* System default value = 1 */ + WSEGL_CAP_WINDOWS_USE_HW_SYNC = 3, /* System default value = 0 (FALSE) */ + WSEGL_CAP_PIXMAPS_USE_HW_SYNC = 4, /* System default value = 0 (FALSE) */ + +} WSEGLCapsType; + +/* +// Display capability +*/ +typedef struct WSEGLCaps_TAG +{ + WSEGLCapsType eCapsType; + unsigned long ui32CapsValue; + +} WSEGLCaps; + +/* +// Drawable type +*/ +#define WSEGL_NO_DRAWABLE 0x0 +#define WSEGL_DRAWABLE_WINDOW 0x1 +#define WSEGL_DRAWABLE_PIXMAP 0x2 + + +/* +// Pixel format of display/drawable +*/ +typedef enum WSEGLPixelFormat_TAG +{ + /* These must not be re-ordered */ + WSEGL_PIXELFORMAT_RGB565 = 0, + WSEGL_PIXELFORMAT_ARGB4444 = 1, + WSEGL_PIXELFORMAT_ARGB8888 = 2, + WSEGL_PIXELFORMAT_ARGB1555 = 3, + WSEGL_PIXELFORMAT_ABGR8888 = 4, + WSEGL_PIXELFORMAT_XBGR8888 = 5, + + /* These are compatibility names only; new WSEGL + * modules should not use them. + */ + WSEGL_PIXELFORMAT_565 = WSEGL_PIXELFORMAT_RGB565, + WSEGL_PIXELFORMAT_4444 = WSEGL_PIXELFORMAT_ARGB4444, + WSEGL_PIXELFORMAT_8888 = WSEGL_PIXELFORMAT_ARGB8888, + WSEGL_PIXELFORMAT_1555 = WSEGL_PIXELFORMAT_ARGB1555, + +} WSEGLPixelFormat; + +/* +// Transparent of display/drawable +*/ +typedef enum WSEGLTransparentType_TAG +{ + WSEGL_OPAQUE = 0, + WSEGL_COLOR_KEY = 1, + +} WSEGLTransparentType; + +/* +// Display/drawable configuration +*/ +typedef struct WSEGLConfig_TAG +{ + /* + // Type of drawables this configuration applies to - + // OR'd values of drawable types. + */ + unsigned long ui32DrawableType; + + /* Pixel format */ + WSEGLPixelFormat ePixelFormat; + + /* Native Renderable - set to WSEGL_TRUE if native renderable */ + unsigned long ulNativeRenderable; + + /* FrameBuffer Level Parameter */ + unsigned long ulFrameBufferLevel; + + /* Native Visual ID */ + unsigned long ulNativeVisualID; + + /* Native Visual */ + void *hNativeVisual; + + /* Transparent Type */ + WSEGLTransparentType eTransparentType; + + /* Transparent Color - only used if transparent type is COLOR_KEY */ + unsigned long ulTransparentColor; /* packed as 0x00RRGGBB */ + + +} WSEGLConfig; + +/* +// WSEGL errors +*/ +typedef enum WSEGLError_TAG +{ + WSEGL_SUCCESS = 0, + WSEGL_CANNOT_INITIALISE = 1, + WSEGL_BAD_NATIVE_DISPLAY = 2, + WSEGL_BAD_NATIVE_WINDOW = 3, + WSEGL_BAD_NATIVE_PIXMAP = 4, + WSEGL_BAD_NATIVE_ENGINE = 5, + WSEGL_BAD_DRAWABLE = 6, + WSEGL_BAD_MATCH = 7, + WSEGL_OUT_OF_MEMORY = 8, + + /* These are compatibility names only; new WSEGL + * modules should not use them. + */ + WSEGL_BAD_CONFIG = WSEGL_BAD_MATCH, + +} WSEGLError; + +/* +// Drawable orientation (in degrees anti-clockwise) +*/ +typedef enum WSEGLRotationAngle_TAG +{ + WSEGL_ROTATE_0 = 0, + WSEGL_ROTATE_90 = 1, + WSEGL_ROTATE_180 = 2, + WSEGL_ROTATE_270 = 3 + +} WSEGLRotationAngle; + +/* +// Drawable information required by OpenGL-ES driver +*/ +typedef struct WSEGLDrawableParams_TAG +{ + /* Width in pixels of the drawable */ + unsigned long ui32Width; + + /* Height in pixels of the drawable */ + unsigned long ui32Height; + + /* Stride in pixels of the drawable */ + unsigned long ui32Stride; + + /* Pixel format of the drawable */ + WSEGLPixelFormat ePixelFormat; + + /* User space cpu virtual address of the drawable */ + void *pvLinearAddress; + + /* HW address of the drawable */ + unsigned long ui32HWAddress; + + /* Private data for the drawable */ + void *hPrivateData; + + +} WSEGLDrawableParams; + + +/* +// Table of function pointers that is returned by WSEGL_GetFunctionTablePointer() +// +// The first entry in the table is the version number of the wsegl.h header file that +// the module has been written against, and should therefore be set to WSEGL_VERSION +*/ +typedef struct WSEGL_FunctionTable_TAG +{ + unsigned long ui32WSEGLVersion; + + WSEGLError (*pfnWSEGL_IsDisplayValid)(NativeDisplayType); + + WSEGLError (*pfnWSEGL_InitialiseDisplay)(NativeDisplayType, WSEGLDisplayHandle *, const WSEGLCaps **, WSEGLConfig **); + + WSEGLError (*pfnWSEGL_CloseDisplay)(WSEGLDisplayHandle); + + WSEGLError (*pfnWSEGL_CreateWindowDrawable)(WSEGLDisplayHandle, WSEGLConfig *, WSEGLDrawableHandle *, NativeWindowType, WSEGLRotationAngle *); + + WSEGLError (*pfnWSEGL_CreatePixmapDrawable)(WSEGLDisplayHandle, WSEGLConfig *, WSEGLDrawableHandle *, NativePixmapType, WSEGLRotationAngle *); + + WSEGLError (*pfnWSEGL_DeleteDrawable)(WSEGLDrawableHandle); + + WSEGLError (*pfnWSEGL_SwapDrawable)(WSEGLDrawableHandle, unsigned long); + + WSEGLError (*pfnWSEGL_SwapControlInterval)(WSEGLDrawableHandle, unsigned long); + + WSEGLError (*pfnWSEGL_WaitNative)(WSEGLDrawableHandle, unsigned long); + + WSEGLError (*pfnWSEGL_CopyFromDrawable)(WSEGLDrawableHandle, NativePixmapType); + + WSEGLError (*pfnWSEGL_CopyFromPBuffer)(void *, unsigned long, unsigned long, unsigned long, WSEGLPixelFormat, NativePixmapType); + + WSEGLError (*pfnWSEGL_GetDrawableParameters)(WSEGLDrawableHandle, WSEGLDrawableParams *, WSEGLDrawableParams *); + + WSEGLError (*pfnWSEGL_ConnectDrawable)(WSEGLDrawableHandle); + + WSEGLError (*pfnWSEGL_DisconnectDrawable)(WSEGLDrawableHandle); + + +} WSEGL_FunctionTable; + + +WSEGL_IMPORT const WSEGL_FunctionTable *WSEGL_GetFunctionTablePointer(void); + +#ifdef __cplusplus +} +#endif + +#endif /* __WSEGL_H__ */ + +/****************************************************************************** + End of file (wsegl.h) +******************************************************************************/ diff --git a/sys/shm/shmpipe.c b/sys/shm/shmpipe.c index 6465e38..38711f9 100644 --- a/sys/shm/shmpipe.c +++ b/sys/shm/shmpipe.c @@ -262,7 +262,7 @@ static ShmArea * sp_open_shm (char *path, int id, mode_t perms, size_t size) { ShmArea *area = spalloc_new (ShmArea); - char tmppath[PATH_MAX]; + char tmppath[32]; int flags; int prot; int i = 0; @@ -285,7 +285,7 @@ sp_open_shm (char *path, int id, mode_t perms, size_t size) area->shm_fd = shm_open (path, flags, perms); } else { do { - snprintf (tmppath, PATH_MAX, "/shmpipe.5%d.%5d", getpid (), i++); + snprintf (tmppath, sizeof (tmppath), "/shmpipe.%5d.%5d", getpid (), i++); area->shm_fd = shm_open (tmppath, flags, perms); } while (area->shm_fd < 0 && errno == EEXIST); } diff --git a/tests/check/Makefile.am b/tests/check/Makefile.am index 433b94f..0d2aff6 100644 --- a/tests/check/Makefile.am +++ b/tests/check/Makefile.am @@ -14,7 +14,7 @@ TESTS_ENVIRONMENT = \ GST_PLUGIN_SYSTEM_PATH= \ GST_PLUGIN_PATH=$(top_builddir)/gst:$(top_builddir)/sys:$(top_builddir)/ext:$(GST_PLUGINS_FFMPEG_DIR):$(GST_PLUGINS_UGLY_DIR):$(GST_PLUGINS_GOOD_DIR):$(GST_PLUGINS_BASE_DIR):$(GST_PLUGINS_DIR) \ GST_PLUGIN_LOADING_WHITELIST="gstreamer@$(GST_PLUGINS_DIR):gst-plugins-base@$(GSTPB_PLUGINS_DIR):gst-plugins-good:gst-plugins-ugly:gst-ffmpeg:gst-plugins-bad@$(top_builddir)" \ - GST_STATE_IGNORE_ELEMENTS="apexsink camerabin camerabin2 cdaudio dc1394src dccpclientsrc dccpclientsink dccpserversrc dccpserversink dvbsrc dvbbasebin dfbvideosink festival gsettingsvideosrc gsettingsvideosink gsettingsaudiosrc gsettingsaudiosink nassink rsndvdbin sdlaudiosink sdlvideosink vcdsrc rfbsrc vdpauyuvvideo vdpauvideoyuv vdpaumpegdec vdpaumpeg4dec vdpauh264dec vdpauvideopostprocess vdpausink neonhttpsrc" + STATE_IGNORE_ELEMENTS="alsaspdifsink apexsink camerabin camerabin2 cdaudio dc1394src dccpclientsrc dccpclientsink dccpserversrc dccpserversink dvbsrc dvbbasebin dfbvideosink festival gsettingsvideosrc gsettingsvideosink gsettingsaudiosrc gsettingsaudiosink nassink rsndvdbin sdlaudiosink sdlvideosink vcdsrc rfbsrc vdpauyuvvideo vdpauvideoyuv vdpaumpegdec neonhttpsrc" plugindir = $(libdir)/gstreamer-@GST_MAJORMINOR@ @@ -135,7 +135,6 @@ VALGRIND_TESTS_DISABLE = \ if BUILD_EXPERIMENTAL EXPERIMENTAL_CHECKS=elements/camerabin2 \ - elements/imagecapturebin \ elements/viewfinderbin endif @@ -158,7 +157,9 @@ check_PROGRAMS = \ elements/autovideoconvert \ elements/asfmux \ elements/camerabin \ + elements/camerabin2 \ elements/dataurisrc \ + elements/imagecapturebin \ elements/legacyresample \ $(check_jifmux) \ elements/jpegparse \ @@ -169,11 +170,14 @@ check_PROGRAMS = \ pipelines/mxf \ $(check_mimic) \ elements/rtpmux \ + libs/h264parser \ $(check_schro) \ $(check_vp8) \ $(check_zbar) \ $(check_orc) \ - $(EXPERIMENTAL_CHECKS) + $(EXPERIMENTAL_CHECKS) \ + pipelines/tagschecking \ + elements/viewfinderbin noinst_HEADERS = elements/mxfdemux.h @@ -184,6 +188,15 @@ AM_CFLAGS = $(GST_CHECK_CFLAGS) $(GST_OPTION_CFLAGS) \ -UG_DISABLE_ASSERT -UG_DISABLE_CAST_CHECKS LDADD = $(GST_CHECK_LIBS) +libs_h264parser_CFLAGS = \ + $(GST_PLUGINS_BAD_CFLAGS) $(GST_PLUGINS_BASE_CFLAGS) \ + $(GST_BASE_CFLAGS) $(GST_CFLAGS) $(AM_CFLAGS) + +libs_h264parser_LDADD = \ + $(top_builddir)/gst-libs/gst/codecparsers/libgstcodecparsers-@GST_MAJORMINOR@.la \ + $(GST_PLUGINS_BAD_LIBS) -lgstcodecparsers-@GST_MAJORMINOR@ \ + $(GST_BASE_LIBS) $(GST_LIBS) $(LDADD) + elements_camerabin_CFLAGS = \ $(GST_PLUGINS_BAD_CFLAGS) $(GST_PLUGINS_BASE_CFLAGS) \ $(GST_BASE_CFLAGS) $(GST_CFLAGS) $(AM_CFLAGS) -DGST_USE_UNSTABLE_API @@ -205,13 +218,6 @@ elements_camerabin2_LDADD = \ $(GST_PLUGINS_BASE_LIBS) $(GST_BASE_LIBS) $(GST_LIBS) $(LDADD) elements_camerabin2_SOURCES = elements/camerabin2.c -elements_imagecapturebin_CFLAGS = \ - $(GST_PLUGINS_BAD_CFLAGS) $(GST_PLUGINS_BASE_CFLAGS) \ - $(GST_BASE_CFLAGS) $(GST_CFLAGS) $(AM_CFLAGS) -DGST_USE_UNSTABLE_API -elements_imagecapturebin_LDADD = \ - $(GST_PLUGINS_BASE_LIBS) -lgstapp-@GST_MAJORMINOR@ \ - $(GST_BASE_LIBS) $(GST_LIBS) $(LDADD) -elements_imagecapturebin_SOURCES = elements/imagecapturebin.c endif elements_jifmux_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(EXIF_CFLAGS) $(AM_CFLAGS) diff --git a/tests/check/elements/camerabin2.c b/tests/check/elements/camerabin2.c index f449985..d43b1c0 100644 --- a/tests/check/elements/camerabin2.c +++ b/tests/check/elements/camerabin2.c @@ -58,7 +58,7 @@ typedef struct _GstTestCameraSrc GstTestCameraSrc; typedef struct _GstTestCameraSrcClass GstTestCameraSrcClass; struct _GstTestCameraSrc { - GstBaseCameraSrc element; + GstBaseCameraBinSrc element; GstPad *vfpad; GstPad *vidpad; @@ -69,16 +69,16 @@ struct _GstTestCameraSrc struct _GstTestCameraSrcClass { - GstBaseCameraSrcClass parent_class; + GstBaseCameraBinSrcClass parent_class; }; GType gst_test_camera_src_get_type (void); GST_BOILERPLATE (GstTestCameraSrc, - gst_test_camera_src, GstBaseCameraSrc, GST_TYPE_BASE_CAMERA_SRC); + gst_test_camera_src, GstBaseCameraBinSrc, GST_TYPE_BASE_CAMERA_SRC); static gboolean -gst_test_camera_src_set_mode (GstBaseCameraSrc * src, GstCameraBinMode mode) +gst_test_camera_src_set_mode (GstBaseCameraBinSrc * src, GstCameraBinMode mode) { GstTestCameraSrc *self = GST_TEST_CAMERA_SRC (src); @@ -122,7 +122,7 @@ gst_test_camera_src_class_init (GstTestCameraSrcClass * klass) { GObjectClass *gobject_class; GstElementClass *gstelement_class; - GstBaseCameraSrcClass *gstbasecamera_class; + GstBaseCameraBinSrcClass *gstbasecamera_class; gobject_class = G_OBJECT_CLASS (klass); gstelement_class = GST_ELEMENT_CLASS (klass); diff --git a/tests/check/elements/imagecapturebin.c b/tests/check/elements/imagecapturebin.c deleted file mode 100644 index 5c5f736..0000000 --- a/tests/check/elements/imagecapturebin.c +++ /dev/null @@ -1,410 +0,0 @@ -/* GStreamer unit test for the imagecapturebin element - * Copyright (C) 2010 Thiago Santos <thiago.sousa.santos@collabora.co.uk> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include <stdio.h> -#include <gst/check/gstcheck.h> -#include <gst/app/gstappsink.h> -#include <gst/app/gstappsrc.h> - -#define N_BUFFERS 3 - -typedef struct -{ - GstElement *pipe; - GstElement *src; - GstElement *icbin; -} GstImageCaptureBinTestContext; - -static void -gstimagecapturebin_init_test_context (GstImageCaptureBinTestContext * ctx, - const gchar * src, gint num_buffers) -{ - fail_unless (ctx != NULL); - - ctx->pipe = gst_pipeline_new ("pipeline"); - fail_unless (ctx->pipe != NULL); - ctx->src = gst_element_factory_make (src, "src"); - fail_unless (ctx->src != NULL, "Failed to create src element"); - ctx->icbin = gst_element_factory_make ("imagecapturebin", "icbin"); - fail_unless (ctx->icbin != NULL, "Failed to create imagecapturebin element"); - - if (num_buffers > 0) - g_object_set (ctx->src, "num-buffers", num_buffers, NULL); - - fail_unless (gst_bin_add (GST_BIN (ctx->pipe), ctx->src)); - fail_unless (gst_bin_add (GST_BIN (ctx->pipe), ctx->icbin)); - fail_unless (gst_element_link_many (ctx->src, ctx->icbin, NULL)); -} - -static void -gstimagecapturebin_unset_test_context (GstImageCaptureBinTestContext * ctx) -{ - gst_element_set_state (ctx->pipe, GST_STATE_NULL); - gst_object_unref (ctx->pipe); - memset (ctx, 0, sizeof (GstImageCaptureBinTestContext)); -} - -static gchar * -make_test_file_name (void) -{ - return g_strdup_printf ("%s" G_DIR_SEPARATOR_S - "imagecapturebintest_%%d.cap", g_get_tmp_dir ()); -} - -static gboolean -get_file_info (const gchar * filename, gint * width, gint * height) -{ - GstElement *playbin = gst_element_factory_make ("playbin2", NULL); - GstElement *fakesink = gst_element_factory_make ("fakesink", NULL); - GstState state = GST_STATE_NULL; - GstPad *pad; - GstCaps *caps; - gchar *uri = g_strdup_printf ("file://%s", filename); - - g_object_set (playbin, "video-sink", fakesink, NULL); - g_object_set (playbin, "uri", uri, NULL); - g_free (uri); - - gst_element_set_state (playbin, GST_STATE_PAUSED); - - gst_element_get_state (playbin, &state, NULL, GST_SECOND * 5); - - fail_unless (state == GST_STATE_PAUSED); - - g_signal_emit_by_name (playbin, "get-video-pad", 0, &pad, NULL); - caps = gst_pad_get_negotiated_caps (pad); - fail_unless (gst_structure_get_int (gst_caps_get_structure (caps, 0), "width", - width)); - fail_unless (gst_structure_get_int (gst_caps_get_structure (caps, 0), - "height", height)); - - gst_object_unref (pad); - gst_element_set_state (playbin, GST_STATE_NULL); - gst_object_unref (playbin); - return TRUE; -} - -static GstBuffer * -create_video_buffer (GstCaps * caps) -{ - GstElement *pipeline; - GstElement *cf; - GstElement *sink; - GstBuffer *buffer; - - pipeline = - gst_parse_launch - ("videotestsrc num-buffers=1 ! capsfilter name=cf ! appsink name=sink", - NULL); - g_assert (pipeline != NULL); - - cf = gst_bin_get_by_name (GST_BIN (pipeline), "cf"); - sink = gst_bin_get_by_name (GST_BIN (pipeline), "sink"); - - g_object_set (G_OBJECT (cf), "caps", caps, NULL); - - gst_element_set_state (pipeline, GST_STATE_PLAYING); - - buffer = gst_app_sink_pull_buffer (GST_APP_SINK (sink)); - - gst_element_set_state (pipeline, GST_STATE_NULL); - gst_object_unref (pipeline); - gst_object_unref (sink); - gst_object_unref (cf); - return buffer; -} - - -GST_START_TEST (test_simple_capture) -{ - GstImageCaptureBinTestContext ctx; - GstBus *bus; - GstMessage *msg; - gchar *test_file_name; - gint i; - - gstimagecapturebin_init_test_context (&ctx, "videotestsrc", N_BUFFERS); - bus = gst_element_get_bus (ctx.pipe); - - test_file_name = make_test_file_name (); - g_object_set (ctx.icbin, "location", test_file_name, NULL); - - fail_if (gst_element_set_state (ctx.pipe, GST_STATE_PLAYING) == - GST_STATE_CHANGE_FAILURE); - - msg = gst_bus_timed_pop_filtered (bus, GST_SECOND * 10, - GST_MESSAGE_EOS | GST_MESSAGE_ERROR); - fail_unless (msg != NULL); - fail_unless (GST_MESSAGE_TYPE (msg) == GST_MESSAGE_EOS); - gst_message_unref (msg); - - /* check there are N_BUFFERS files */ - for (i = 0; i < N_BUFFERS; i++) { - gchar *filename; - FILE *f; - - filename = g_strdup_printf (test_file_name, i); - - fail_unless (g_file_test (filename, G_FILE_TEST_EXISTS)); - fail_unless (g_file_test (filename, G_FILE_TEST_IS_REGULAR)); - fail_if (g_file_test (filename, G_FILE_TEST_IS_SYMLINK)); - - /* check the file isn't empty */ - f = fopen (filename, "r"); - fseek (f, 0, SEEK_END); - fail_unless (ftell (f) > 0); - fclose (f); - - g_free (filename); - } - - gstimagecapturebin_unset_test_context (&ctx); - gst_object_unref (bus); - g_free (test_file_name); -} - -GST_END_TEST; - - -GST_START_TEST (test_multiple_captures_different_caps) -{ - GstImageCaptureBinTestContext ctx; - GstBus *bus; - GstMessage *msg; - gchar *test_file_name; - gint i; - gint widths[] = { 100, 300, 200 }; - gint heights[] = { 300, 200, 100 }; - GstPad *pad; - - gstimagecapturebin_init_test_context (&ctx, "appsrc", N_BUFFERS); - bus = gst_element_get_bus (ctx.pipe); - - test_file_name = make_test_file_name (); - g_object_set (ctx.icbin, "location", test_file_name, NULL); - fail_if (gst_element_set_state (ctx.pipe, GST_STATE_PLAYING) == - GST_STATE_CHANGE_FAILURE); - - /* push data directly because set_caps and buffer pushes on appsrc - * are not serialized into the flow, so we can't guarantee the buffers - * have the caps we want on them when pushed */ - pad = gst_element_get_static_pad (ctx.src, "src"); - - /* push the buffers */ - for (i = 0; i < N_BUFFERS; i++) { - GstCaps *caps; - GstBuffer *buf; - - caps = gst_caps_new_simple ("video/x-raw-yuv", "width", G_TYPE_INT, - widths[i], "height", G_TYPE_INT, heights[i], "framerate", - GST_TYPE_FRACTION, 1, 1, "format", GST_TYPE_FOURCC, - GST_MAKE_FOURCC ('I', '4', '2', '0'), NULL); - - buf = create_video_buffer (caps); - fail_if (buf == NULL); - - fail_unless (gst_pad_push (pad, buf) == GST_FLOW_OK); - gst_caps_unref (caps); - } - gst_app_src_end_of_stream (GST_APP_SRC (ctx.src)); - gst_object_unref (pad); - - msg = gst_bus_timed_pop_filtered (bus, GST_SECOND * 10, - GST_MESSAGE_EOS | GST_MESSAGE_ERROR); - fail_unless (msg != NULL); - fail_unless (GST_MESSAGE_TYPE (msg) == GST_MESSAGE_EOS); - gst_message_unref (msg); - - /* check there are N_BUFFERS files */ - for (i = 0; i < N_BUFFERS; i++) { - gchar *filename; - FILE *f; - gint width = 0, height = 0; - - filename = g_strdup_printf (test_file_name, i); - - fail_unless (g_file_test (filename, G_FILE_TEST_EXISTS)); - fail_unless (g_file_test (filename, G_FILE_TEST_IS_REGULAR)); - fail_if (g_file_test (filename, G_FILE_TEST_IS_SYMLINK)); - - /* check the file isn't empty */ - f = fopen (filename, "r"); - fseek (f, 0, SEEK_END); - fail_unless (ftell (f) > 0); - fclose (f); - - /* get the file info */ - fail_unless (get_file_info (filename, &width, &height)); - fail_unless (width == widths[i]); - fail_unless (height == heights[i]); - - g_free (filename); - } - - gstimagecapturebin_unset_test_context (&ctx); - gst_object_unref (bus); - g_free (test_file_name); -} - -GST_END_TEST; - -GST_START_TEST (test_setting_encoder) -{ - GstImageCaptureBinTestContext ctx; - GstBus *bus; - GstMessage *msg; - GstElement *encoder; - gchar *test_file_name; - gint i; - - gstimagecapturebin_init_test_context (&ctx, "videotestsrc", N_BUFFERS); - bus = gst_element_get_bus (ctx.pipe); - - test_file_name = make_test_file_name (); - g_object_set (ctx.icbin, "location", test_file_name, NULL); - - encoder = gst_element_factory_make ("jpegenc", NULL); - g_object_set (ctx.icbin, "image-encoder", encoder, NULL); - - fail_if (gst_element_set_state (ctx.pipe, GST_STATE_PLAYING) == - GST_STATE_CHANGE_FAILURE); - - msg = gst_bus_timed_pop_filtered (bus, GST_SECOND * 10, - GST_MESSAGE_EOS | GST_MESSAGE_ERROR); - fail_unless (msg != NULL); - fail_unless (GST_MESSAGE_TYPE (msg) == GST_MESSAGE_EOS); - - /* check there are N_BUFFERS files */ - for (i = 0; i < N_BUFFERS; i++) { - gchar *filename; - FILE *f; - - filename = g_strdup_printf (test_file_name, i); - - fail_unless (g_file_test (filename, G_FILE_TEST_EXISTS)); - fail_unless (g_file_test (filename, G_FILE_TEST_IS_REGULAR)); - fail_if (g_file_test (filename, G_FILE_TEST_IS_SYMLINK)); - - /* check the file isn't empty */ - f = fopen (filename, "r"); - fseek (f, 0, SEEK_END); - fail_unless (ftell (f) > 0); - fclose (f); - - g_free (filename); - } - - gstimagecapturebin_unset_test_context (&ctx); - gst_object_unref (bus); - g_free (test_file_name); -} - -GST_END_TEST; - -GST_START_TEST (test_setting_muxer) -{ - GstImageCaptureBinTestContext ctx; - GstBus *bus; - GstMessage *msg; - GstElement *encoder; - gchar *test_file_name; - gint i; - - gstimagecapturebin_init_test_context (&ctx, "videotestsrc", N_BUFFERS); - bus = gst_element_get_bus (ctx.pipe); - - test_file_name = make_test_file_name (); - g_object_set (ctx.icbin, "location", test_file_name, NULL); - - encoder = gst_element_factory_make ("pngenc", NULL); - g_object_set (ctx.icbin, "image-encoder", encoder, NULL); - - encoder = gst_element_factory_make ("identity", NULL); - g_object_set (ctx.icbin, "image-muxer", encoder, NULL); - - fail_if (gst_element_set_state (ctx.pipe, GST_STATE_PLAYING) == - GST_STATE_CHANGE_FAILURE); - - msg = gst_bus_timed_pop_filtered (bus, GST_SECOND * 10, - GST_MESSAGE_EOS | GST_MESSAGE_ERROR); - fail_unless (msg != NULL); - fail_unless (GST_MESSAGE_TYPE (msg) == GST_MESSAGE_EOS); - - /* check there are N_BUFFERS files */ - for (i = 0; i < N_BUFFERS; i++) { - gchar *filename; - FILE *f; - - filename = g_strdup_printf (test_file_name, i); - - fail_unless (g_file_test (filename, G_FILE_TEST_EXISTS)); - fail_unless (g_file_test (filename, G_FILE_TEST_IS_REGULAR)); - fail_if (g_file_test (filename, G_FILE_TEST_IS_SYMLINK)); - - /* check the file isn't empty */ - f = fopen (filename, "r"); - fseek (f, 0, SEEK_END); - fail_unless (ftell (f) > 0); - fclose (f); - - g_free (filename); - } - - gstimagecapturebin_unset_test_context (&ctx); - gst_object_unref (bus); - g_free (test_file_name); -} - -GST_END_TEST; - -static Suite * -imagecapturebin_suite (void) -{ - GstElementFactory *jpegenc_factory; - - Suite *s = suite_create ("imagecapturebin"); - TCase *tc_chain = tcase_create ("general"); - - jpegenc_factory = gst_element_factory_find ("jpegenc"); - - suite_add_tcase (s, tc_chain); - if (jpegenc_factory) { - tcase_add_test (tc_chain, test_simple_capture); - - /* only adds this test if jpegenc contains the fix for its getcaps - * The fix on good: dcbba0932dc579abd6aab4460fa1a416374eda1b */ - if (gst_plugin_feature_check_version ((GstPluginFeature *) jpegenc_factory, - 0, 10, 27)) - tcase_add_test (tc_chain, test_multiple_captures_different_caps); - else - GST_WARNING ("Skipped test that needs gst-plugins-good 0.10.27"); - - tcase_add_test (tc_chain, test_setting_encoder); - tcase_add_test (tc_chain, test_setting_muxer); - } else - GST_WARNING ("Skipped imagecapturebin tests because jpegenc is missing"); - - return s; -} - -GST_CHECK_MAIN (imagecapturebin); diff --git a/tests/check/libs/h264parser.c b/tests/check/libs/h264parser.c new file mode 100644 index 0000000..3bc8c49 --- /dev/null +++ b/tests/check/libs/h264parser.c @@ -0,0 +1,182 @@ +/* Gstreamer + * Copyright (C) <2011> Intel Corporation + * Copyright (C) <2011> Collabora Ltd. + * Copyright (C) <2011> Thibault Saunier <thibault.saunier@collabora.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ +#include <gst/check/gstcheck.h> +#include <gst/codecparsers/gsth264parser.h> + +static guint8 slice_dpa[] = { + 0x00, 0x00, 0x01, 0x02, 0x00, 0x02, 0x01, 0x03, 0x00, + 0x04, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, 0x09, 0x00, 0x0a, 0x00, + 0x0b, 0x00, 0x0c, 0x00, 0x0d, 0x00, 0x0e, 0x00, 0x0f, 0x00, 0x10, 0x00, + 0x11, 0x00, 0x12, 0x00, 0x13, 0x00, 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, + 0x17, 0x00, 0x18, 0x00, 0x19, 0x00, 0x1a, 0x00, 0x1b, 0x00, 0x1c, 0x00, + 0x1d, 0x00, 0x1e, 0x00, 0x1f, 0x00, 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, + 0x23, 0x00, 0x24, 0x00, 0x25, 0x00, 0x26, 0x00, 0x27, 0x00, 0x28, 0x00, + 0x29, 0x00, 0x2a, 0x00, 0x2b, 0x00, 0x2c, 0x00, 0x2d, 0x00, 0x2e, 0x00, + 0x2f, 0x00, 0x30, 0x00, 0x31, 0x00, 0x32, 0x00, 0x33, 0x00, 0x34, 0x00, + 0x35, 0x00, 0x36, 0x00, 0x37, 0x00, 0x38, 0x00, 0x39, 0x00, 0x3a, 0x00, + 0x3b, 0x00, 0x3c, 0x00, 0x3d, 0x00, 0x3e, 0x00, 0x3f, 0x00, 0x40, 0x00, + 0x41, 0x00, 0x42, 0x00, 0x43, 0x00, 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, + 0x47, 0x00, 0x48, 0x00, 0x49, 0x00, 0x4a, 0x00, 0x4b, 0x00, 0x4c, 0x00, + 0x4d, 0x00, 0x4e, 0x00, 0x4f, 0x00, 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, + 0x53, 0x00, 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00, 0x58, 0x00, + 0x59, 0x00, 0x5a, 0x00, 0x5b, 0x00, 0x5c, 0x00, 0x5d, 0x00, 0x5e, 0x00, + 0x5f, 0x00, 0x60, 0x00, 0x61, 0x01, 0x04, 0x00, 0xc4, 0x00, 0xa6, 0x00, + 0xc5, 0x00, 0xab, 0x00, 0x82, 0x00, 0xc2, 0x00, 0xd8, 0x00, 0xc6, 0x00, + 0xe4, 0x00, 0xbe, 0x00, 0xb0, 0x00, 0xe6, 0x00, 0xb6, 0x00, 0xb7, 0x00, + 0xb4, 0x00, 0xb5, 0x00, 0x87, 0x00, 0xb2, 0x00, 0xb3, 0x00, 0xd9, 0x00, + 0x8c, 0x00, 0xe5, 0x00, 0xbf, 0x00, 0xb1, 0x00, 0xe7, 0x00, 0xbb, 0x00, + 0xa3, 0x00, 0x84, 0x00, 0x85, 0x00, 0xbd, 0x00, 0x96, 0x00, 0xe8, 0x00, + 0x86, 0x00, 0x8e, 0x00, 0x8b, 0x00, 0x9d, 0x00, 0xa9, 0x00, 0x8a, 0x01, + 0x05, 0x00, 0x83, 0x00, 0xf2, 0x00, 0xf3, 0x00, 0x8d, 0x00, 0x97, 0x00, + 0x88, 0x00, 0xde, 0x00, 0xf1, 0x00, 0x9e, 0x00, 0xaa, 0x00, 0xf5, 0x00, + 0xf4, 0x00, 0xf6, 0x00, 0xa2, 0x00, 0xad, 0x00, 0xc9, 0x00, 0xc7, 0x00, + 0xae, 0x00, 0x62, 0x00, 0x63, 0x00, 0x90, 0x00, 0x64, 0x00, 0xcb, 0x00, + 0x65, 0x00, 0xc8, 0x00, 0xca, 0x00, 0xcf, 0x00, 0xcc, 0x00, 0xcd, 0x00, + 0xce, 0x00, 0xe9, 0x00, 0x66, 0x00, 0xd3, 0x00, 0xd0, 0x00, 0xd1, 0x00, + 0xaf, 0x00, 0x67, 0x00, 0x91, 0x00, 0xd6, 0x00, 0xd4, 0x00, 0xd5, 0x00, + 0x68, 0x00, 0xeb, 0x00, 0xed, 0x00, 0x89, 0x00, 0x6a, 0x00, 0x69, 0x00, + 0x6b, 0x00, 0x6d, 0x00, 0x6c, 0x00, 0x6e, 0x00, 0xa0, 0x00, 0x6f, 0x00, + 0x71, 0x00, 0x70, 0x00, 0x72, 0x00, 0x73, 0x00, 0x75, 0x00, 0x74, 0x00, + 0x76, 0x00, 0x77, 0x00, 0xea, 0x00, 0x78, 0x00, 0x7a, 0x00, 0x79, 0x00, + 0x7b, 0x00, 0x7d, 0x00, 0x7c, 0x00, 0xa1, 0x00, 0x7f, 0x00, 0x7e, 0x00, + 0x80, 0x00, 0x81, 0x00, 0xec, 0x00, 0xee, 0x00, 0xba, 0x01, 0x06, 0x00, + 0xef, 0x00, 0xe1, 0x00, 0xe0, 0x00, 0xdc, 0x01, 0x07, 0x01, 0x08, 0x01, + 0x09, 0x01, 0x0a, 0x01, 0x0b, 0x01, 0x0c, 0x00, 0xdb, 0x00, 0xe2, 0x01, + 0x0d, 0x01, 0x0e, 0x01, 0x0f, 0x01, 0x10, 0x01, 0x11, 0x01, 0x12, 0x00, + 0xdf, 0x01, 0x13, 0x01, 0x14, 0x01, 0x15, 0x01, 0x16, 0x01, 0x17, 0x00, + 0xfd, 0x00, 0xff, 0x01, 0x18, 0x01, 0x19, 0x01, 0x1a, 0x01, 0x1b, 0x01, + 0x1c, 0x01, 0x1d, 0x01, 0x1e, 0x01, 0x1f, 0x01, 0x20, 0x01, 0x21, 0x01, + 0x22, 0x01, 0x23, 0x01, 0x24, 0x01, 0x25, 0x01, 0x26, 0x00, 0xfe, 0x01, + 0x00, 0x01, 0x27, 0x01, 0x28, 0x01, 0x29, 0x01, 0x2a, 0x01, 0x2b, 0x01, + 0x2c, 0x01, 0x2d, 0x01, 0x2e, 0x01, 0x2f, 0x01, 0x30, 0x01, 0x31, 0x00, + 0xe3, 0x00, 0xd7, 0x01, 0x32, 0x00, 0xf8, 0x00, 0xf9, 0x01, 0x33, 0x01, + 0x34, 0x01, 0x35, 0x01, 0x36, 0x01, 0x37, 0x01, 0x38, 0x01, 0x39, 0x01, + 0x3a, 0x01, 0x3b, 0x01, 0x3c, 0x01, 0x3d, 0x01, 0x3e, 0x01, 0x3f, 0x01, + 0x40, 0x01, 0x41, 0x01, 0x42, 0x01, 0x43, 0x01, 0x44, 0x01, 0x45, 0x01, + 0x46, 0x01, 0x47, 0x01, 0x48, 0x01, 0x49, 0x01, 0x4a, 0x01, 0x4b, 0x01, + 0x4c, 0x00, 0x08, 0x05, 0x2e, 0x6e, 0x75, 0x6c, 0x6c, 0x0c, 0x76, 0x69, + 0x73, 0x69, 0x62, 0x6c, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x04, 0x45, + 0x75, 0x72, 0x6f, 0x06, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x6e, 0x0a, 0x62, + 0x75, 0x6c, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x74, 0x68, 0x06, 0x53, 0x61, + 0x63, 0x75, 0x74, 0x65, 0x06, 0x54, 0x63, 0x61, 0x72, 0x6f, 0x6e, 0x06, + 0x5a, 0x61, 0x63, 0x75, 0x74, 0x65, 0x06, 0x73, 0x61, 0x63, 0x75, 0x74, + 0x65, 0x06, 0x74, 0x63, 0x61, 0x72, 0x6f, 0x6e, 0x06, 0x7a, 0x61, 0x63, + 0x75, 0x74, 0x65, 0x07, 0x41, 0x6f, 0x67, 0x6f, 0x6e, 0x65, 0x6b, 0x07, + 0x61, 0x6f, 0x67, 0x6f, 0x6e, 0x65, 0x6b, 0x0c, 0x73, 0x63, 0x6f, 0x6d, + 0x6d, 0x61, 0x61, 0x63, 0x63, 0x65, 0x6e, 0x74, 0x0c, 0x53, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x61, 0x63, 0x63, 0x65, 0x6e, 0x74, 0x0a, 0x5a, 0x64, + 0x6f, 0x74, 0x61, 0x63, 0x63, 0x65, 0x6e, 0x74, 0x06, 0x4c, 0x63, 0x61, + 0x72, 0x6f, 0x6e, 0x06, 0x6c, 0x63, 0x61, 0x72, 0x6f, 0x6e, 0x0a, 0x7a, + 0x64, 0x6f, 0x74, 0x61, 0x63, 0x63, 0x65, 0x6e, 0x74, 0x06, 0x52, 0x61, + 0x63, 0x75, 0x74, 0x65, 0x06, 0x41, 0x62, 0x72, 0x65, 0x76, 0x65, 0x06, + 0x4c, 0x61, 0x63, 0x75, 0x74, 0x65, 0x07, 0x45, 0x6f, 0x67, 0x6f, 0x6e, + 0x65, 0x6b, 0x06, 0x45, 0x63, 0x61, 0x72, 0x6f, 0x6e, 0x06, 0x44, 0x63, + 0x61, 0x72, 0x6f, 0x6e, 0x07, 0x44, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x6e, + 0x06, 0x4e, 0x61, 0x63, 0x75, 0x74, 0x65, 0x06, 0x4e, 0x63, 0x61, 0x72, + 0x6f, 0x6e, 0x0d, 0x4f, 0x68, 0x75, 0x6e, 0x67, 0x61, 0x72, 0x75, 0x6d, + 0x6c, 0x61, 0x75, 0x74, 0x06, 0x52, 0x63, 0x61, 0x72, 0x6f, 0x6e, 0x05, + 0x55, 0x72, 0x69, 0x6e, 0x67, 0x09, 0x6e, 0x75, 0x6e, 0x67, 0x61, 0x64, + 0x65, 0x73, 0x68, 0x0d, 0x55, 0x68, 0x75, 0x6e, 0x67, 0x61, 0x72, 0x75, + 0x6d, 0x6c, 0x61, 0x75, 0x74, 0x0c, 0x54, 0x63, 0x6f, 0x6d, 0x6d, 0x61, + 0x61, 0x63, 0x63, 0x65, 0x6e, 0x74, 0x06, 0x72, 0x61, 0x63, 0x75, 0x74, + 0x65, 0x06, 0x61, 0x62, 0x72, 0x65, 0x76, 0x65, 0x06, 0x6c, 0x61, 0x63, + 0x75, 0x74, 0x65, 0x07, 0x65, 0x6f, 0x67, 0x6f, 0x6e, 0x65, 0x6b, 0x06, + 0x65, 0x63, 0x61, 0x72, 0x6f, 0x6e, 0x06, 0x64, 0x63, 0x61, 0x72, 0x6f, + 0x6e, 0x07, 0x64, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x6e, 0x06, 0x6e, 0x61, + 0x63, 0x75, 0x74, 0x65, 0x06, 0x6e, 0x63, 0x61, 0x72, 0x6f, 0x6e, 0x0d, + 0x6f, 0x68, 0x75, 0x6e, 0x67, 0x61, 0x72, 0x75, 0x6d, 0x6c, 0x61, 0x75, + 0x74, 0x06, 0x72, 0x63, 0x61, 0x72, 0x6f, 0x6e, 0x05, 0x75, 0x72, 0x69, + 0x6e, 0x67, 0x0d, 0x75, 0x68, 0x75, 0x6e, 0x67, 0x61, 0x72, 0x75, 0x6d, + 0x6c, 0x61, 0x75, 0x74, 0x0c, 0x74, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x61, + 0x63, 0x63, 0x65, 0x6e, 0x74, 0x0a, 0x49, 0x64, 0x6f, 0x74, 0x61, 0x63, + 0x63, 0x65, 0x6e, 0x74, 0x0c, 0x52, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x61, + 0x63, 0x63, 0x65, 0x6e, 0x74, 0x0c, 0x72, 0x63, 0x6f, 0x6d, 0x6d, 0x61, + 0x61, 0x63, 0x63, 0x65, 0x6e, 0x74, 0x07, 0x49, 0x6f, 0x67, 0x6f, 0x6e, + 0x65, 0x6b, 0x07, 0x41, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x6e, 0x07, 0x45, + 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x6e, 0x0a, 0x45, 0x64, 0x6f, 0x74, 0x61, + 0x63, 0x63, 0x65, 0x6e, 0x74, 0x0c, 0x47, 0x63, 0x6f, 0x6d, 0x6d, 0x61, + 0x61, 0x63, 0x63, 0x65, 0x6e, 0x74, 0x0c, 0x4b, 0x63, 0x6f, 0x6d, 0x6d, + 0x61, 0x61, 0x63, 0x63, 0x65, 0x6e, 0x74, 0x07, 0x49, 0x6d, 0x61, 0x63, + 0x72, 0x6f, 0x6e, 0x0c, 0x4c, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x61, 0x63, + 0x63, 0x65, 0x6e, 0x74, 0x0c, 0x4e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x61, + 0x63, 0x63, 0x65, 0x6e, 0x74, 0x07, 0x4f, 0x6d, 0x61, 0x63, 0x72, 0x6f, + 0x6e, 0x07, 0x55, 0x6f, 0x67, 0x6f, 0x6e, 0x65, 0x6b, 0x07, 0x55, 0x6d, + 0x61, 0x63, 0x72, 0x6f, 0x6e, 0x07, 0x69, 0x6f, 0x67, 0x6f, 0x6e, 0x65, + 0x6b, 0x07, 0x61, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x6e, 0x07, 0x65, 0x6d, + 0x61, 0x63, 0x72, 0x6f, 0x6e, 0x0a, 0x65, 0x64, 0x6f, 0x74, 0x61, 0x63, + 0x63, 0x65, 0x6e, 0x74, 0x0c, 0x67, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x61, + 0x63, 0x63, 0x65, 0x6e, 0x74, 0x0c, 0x6b, 0x63, 0x6f, 0x6d, 0x6d, 0x61, + 0x61, 0x63, 0x63, 0x65, 0x6e, 0x74, 0x07, 0x69, 0x6d, 0x61, 0x63, 0x72, + 0x6f, 0x6e, 0x0c, 0x6c, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x61, 0x63, 0x63, + 0x65, 0x6e, 0x74, 0x0c, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x61, 0x63, + 0x63, 0x65, 0x6e, 0x74, 0x07, 0x6f, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x6e, + 0x07, 0x75, 0x6f, 0x67, 0x6f, 0x6e, 0x65, 0x6b, 0x07, 0x75, 0x6d, 0x61, + 0x63, 0x72, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02 +}; + +GST_START_TEST (test_h264_parse_slice_dpa) +{ + GstH264ParserResult res; + GstH264NalUnit nalu; + + GstH264NalParser *parser = gst_h264_nal_parser_new (); + + res = gst_h264_parser_identify_nalu (parser, slice_dpa, 0, + sizeof (slice_dpa), &nalu); + + assert_equals_int (res, GST_H264_PARSER_OK); + assert_equals_int (nalu.type, GST_H264_NAL_SLICE_DPA); + + g_free (parser); +} + +GST_END_TEST; + +static Suite * +h264parser_suite (void) +{ + Suite *s = suite_create ("H264 Parser library"); + + TCase *tc_chain = tcase_create ("general"); + + suite_add_tcase (s, tc_chain); + tcase_add_test (tc_chain, test_h264_parse_slice_dpa); + + return s; +} + +int +main (int argc, char **argv) +{ + int nf; + + Suite *s = h264parser_suite (); + + SRunner *sr = srunner_create (s); + + gst_check_init (&argc, &argv); + + srunner_run_all (sr, CK_NORMAL); + nf = srunner_ntests_failed (sr); + srunner_free (sr); + + return nf; +} diff --git a/tests/examples/camerabin2/gst-camerabin2-test.c b/tests/examples/camerabin2/gst-camerabin2-test.c index 49974d1..8ca5013 100644 --- a/tests/examples/camerabin2/gst-camerabin2-test.c +++ b/tests/examples/camerabin2/gst-camerabin2-test.c @@ -157,8 +157,8 @@ static gint color_mode = COLOR_TONE_MODE_NONE; static gchar *viewfinder_filter = NULL; -static int x_width = 320; -static int x_height = 240; +static int x_width = 864; +static int x_height = 480; /* test configuration for common callbacks */ static GString *filename = NULL; @@ -435,6 +435,17 @@ setup_pipeline_element (GstElement * element, const gchar * property_name, if (g_object_class_find_property (G_OBJECT_GET_CLASS (elem), "device")) { g_object_set (elem, "device", "/dev/video1", NULL); } + if (g_object_class_find_property (G_OBJECT_GET_CLASS (elem), + "queue-size")) { + g_object_set (elem, "queue-size", 15, NULL); + } + if (g_object_class_find_property (G_OBJECT_GET_CLASS (elem), "sync")) { + g_object_set (elem, "sync", FALSE, NULL); + } + if (g_object_class_find_property (G_OBJECT_GET_CLASS (elem), + "enable-last-buffer")) { + g_object_set (elem, "enable-last-buffer", FALSE, NULL); + } g_object_set (element, property_name, elem, NULL); } else { GST_WARNING ("can't create element '%s' for property '%s'", element_name, @@ -456,6 +467,7 @@ setup_pipeline (void) gboolean res = TRUE; GstBus *bus; GstElement *sink = NULL, *ipp = NULL; + GstElement *stride; GstEncodingProfile *prof = NULL; camerabin = gst_element_factory_make ("camerabin2", NULL); if (NULL == camerabin) { @@ -463,6 +475,9 @@ setup_pipeline (void) goto error; } + stride = gst_element_factory_make ("stridetransform", "capsfilter-stride"); + g_object_set (camerabin, "preview-filter", stride, NULL); + bus = gst_pipeline_get_bus (GST_PIPELINE (camerabin)); /* Add sync handler for time critical messages that need to be handled fast */ gst_bus_set_sync_handler (bus, sync_bus_callback, NULL); @@ -507,8 +522,10 @@ setup_pipeline (void) GST_INFO_OBJECT (camerabin, "elements created"); +#if 0 if (sink) g_object_set (sink, "sync", TRUE, NULL); +#endif GST_INFO_OBJECT (camerabin, "elements configured"); @@ -527,29 +544,30 @@ setup_pipeline (void) "height", G_TYPE_INT, image_height, "framerate", GST_TYPE_FRACTION, view_framerate_num, view_framerate_den, NULL), NULL); - else - caps = gst_caps_new_full (gst_structure_new ("video/x-raw-yuv", + else { + caps = gst_caps_new_full (gst_structure_new ("video/x-raw-yuv-strided", "width", G_TYPE_INT, image_width, "height", G_TYPE_INT, image_height, NULL), gst_structure_new ("video/x-raw-rgb", "width", G_TYPE_INT, image_width, "height", G_TYPE_INT, image_height, NULL), NULL); + } - g_object_set (camerabin, "video-capture-caps", caps, NULL); + //g_object_set (camerabin, "video-capture-caps", caps, NULL); + g_object_set (camerabin, "viewfinder-caps", caps, NULL); gst_caps_unref (caps); } else { - GstCaps *caps = gst_caps_new_full (gst_structure_new ("video/x-raw-yuv", + GstCaps *caps = gst_caps_new_full (gst_structure_new ("image/jpeg", "width", G_TYPE_INT, image_width, "height", G_TYPE_INT, image_height, NULL), - gst_structure_new ("video/x-raw-rgb", - "width", G_TYPE_INT, image_width, - "height", G_TYPE_INT, image_height, NULL), NULL); - + NULL); g_object_set (camerabin, "image-capture-caps", caps, NULL); gst_caps_unref (caps); } } + g_object_set (camerabin, "mode", mode, NULL); + if (GST_STATE_CHANGE_FAILURE == gst_element_set_state (camerabin, GST_STATE_READY)) { g_warning ("can't set camerabin to ready\n"); @@ -758,13 +776,13 @@ main (int argc, char *argv[]) {"view-framerate-den", '\0', 0, G_OPTION_ARG_INT, &view_framerate_den, "Framerate denominator for viewfinder", NULL}, {"preview-caps", '\0', 0, G_OPTION_ARG_STRING, &preview_caps_name, - "Preview caps (e.g. video/x-raw-rgb,width=320,height=240)", NULL}, + "Preview caps (e.g. video/x-raw-rgb,width=864,height=480)", NULL}, {"viewfinder-filter", '\0', 0, G_OPTION_ARG_STRING, &viewfinder_filter, "Filter to process all frames going to viewfinder sink", NULL}, {"x-width", '\0', 0, G_OPTION_ARG_INT, &x_width, - "X window width (default = 320)", NULL}, + "X window width (default = 864)", NULL}, {"x-height", '\0', 0, G_OPTION_ARG_INT, &x_height, - "X window height (default = 240)", NULL}, + "X window height (default = 480)", NULL}, {"no-xwindow", '\0', 0, G_OPTION_ARG_NONE, &no_xwindow, "Do not create XWindow", NULL}, {"encoding-target", '\0', 0, G_OPTION_ARG_STRING, &gep_targetname,