the gstreamer backend uses now gstreamer playbin2 element to

create automatically the pipeline. The code is simpler and
there is no need of the specific pipelines anymore.

SVN revision: 53649
This commit is contained in:
Vincent Torri 2010-10-20 07:59:20 +00:00
parent 2d28628c95
commit 3382c0d596
10 changed files with 414 additions and 1248 deletions

View File

@ -20,12 +20,7 @@ pkg_LTLIBRARIES = gstreamer.la
gstreamer_la_SOURCES = \
emotion_gstreamer.c \
emotion_gstreamer_pipeline.c \
emotion_gstreamer_pipeline.h \
emotion_gstreamer_pipeline_cdda.c \
emotion_gstreamer_pipeline_dvd.c \
emotion_gstreamer_pipeline_file.c \
emotion_gstreamer_pipeline_uri.c \
emotion_gstreamer_pipeline_v4l.c
emotion_gstreamer_pipeline.h
gstreamer_la_LIBADD = @GSTREAMER_LIBS@ $(top_builddir)/src/lib/libemotion.la
gstreamer_la_LDFLAGS = -no-undefined @lt_enable_auto_import@ -module -avoid-version
gstreamer_la_LIBTOOLFLAGS = --tag=disable-static

View File

@ -233,8 +233,8 @@ static Emotion_Video_Module em_module =
};
static unsigned char
em_init(Evas_Object *obj,
void **emotion_video,
em_init(Evas_Object *obj,
void **emotion_video,
Emotion_Module_Options *opt __UNUSED__)
{
Emotion_Gstreamer_Video *ev;
@ -255,8 +255,6 @@ em_init(Evas_Object *obj,
/* Default values */
ev->ratio = 1.0;
ev->video_sink_nbr = 0;
ev->audio_sink_nbr = 0;
ev->vis = EMOTION_VIS_NONE;
ev->volume = 0.8;
@ -279,8 +277,8 @@ static int
em_shutdown(void *video)
{
Emotion_Gstreamer_Video *ev;
Emotion_Audio_Sink *asink;
Emotion_Video_Sink *vsink;
Emotion_Audio_Stream *astream;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)video;
if (!ev)
@ -291,10 +289,12 @@ em_shutdown(void *video)
/* FIXME: and the evas object ? */
if (ev->obj_data) free(ev->obj_data);
EINA_LIST_FREE(ev->audio_sinks, asink)
free(asink);
EINA_LIST_FREE(ev->video_sinks, vsink)
free(vsink);
EINA_LIST_FREE(ev->audio_streams, astream)
free(astream);
EINA_LIST_FREE(ev->video_streams, vstream)
free(vstream);
gst_deinit();
free(ev);
@ -311,127 +311,37 @@ em_file_open(const char *file,
ev = (Emotion_Gstreamer_Video *)video;
ev->pipeline = gst_pipeline_new("pipeline");
if (!ev->pipeline)
return 0;
ev->eos_bus = gst_pipeline_get_bus(GST_PIPELINE(ev->pipeline));
if (!ev->eos_bus)
{
gst_object_unref(ev->pipeline);
return 0;
}
if (!_emotion_pipeline_build(ev, file))
return EINA_FALSE;
/* Evas Object */
ev->obj = obj;
/* CD Audio */
if (strstr(file, "cdda://"))
{
const char *device = NULL;
unsigned int track = 1;
device = file + strlen("cdda://");
if (device[0] == '/')
{
char *tmp;
if ((tmp = strchr(device, '?')) || (tmp = strchr(device, '#')))
{
sscanf(tmp + 1, "%d", &track);
tmp[0] = '\0';
}
}
else
{
device = NULL;
sscanf(file, "cdda://%d", &track);
}
DBG("Build CD Audio pipeline");
if (!(emotion_pipeline_cdda_build(ev, device, track)))
{
ERR("Could not build CD Audio pipeline");
gst_object_unref(ev->pipeline);
return 0;
}
}
/* Dvd */
else if (strstr(file, "dvd://"))
{
DBG("Build DVD pipeline");
if (!(emotion_pipeline_dvd_build(ev, NULL)))
{
ERR("Could not build DVD pipeline");
gst_object_unref(ev->pipeline);
return 0;
}
}
/* http */
else if (strstr(file, "http://"))
{
DBG("Build URI pipeline");
if (!(emotion_pipeline_uri_build(ev, file)))
{
ERR("Could not build URI pipeline");
gst_object_unref(ev->pipeline);
return 0;
}
}
/* v4l */
else if (strstr(file, "v4l://"))
{
DBG("Build V4L pipeline");
if (!(emotion_pipeline_v4l_build(ev, file)))
{
ERR("Could not build V4L pipeline");
gst_object_unref(ev->pipeline);
return 0;
}
}
/* Normal media file */
else
{
const char *filename;
filename = strstr(file, "file://")
? file + strlen("file://")
: file;
DBG("Build file pipeline");
if (!(emotion_pipeline_file_build(ev, filename)))
{
ERR("Could not build File pipeline");
gst_object_unref(ev->pipeline);
return 0;
}
}
ev->position = 0.0;
{
/* on recapitule : */
Emotion_Video_Sink *vsink;
Emotion_Audio_Sink *asink;
{
/* on recapitule : */
Emotion_Video_Stream *vstream;
Emotion_Audio_Stream *astream;
vsink = (Emotion_Video_Sink *)eina_list_data_get(ev->video_sinks);
if (vsink)
{
DBG("video size=%dx%d, fps=%d/%d, "
"fourcc=%"GST_FOURCC_FORMAT", length=%"GST_TIME_FORMAT,
vsink->width, vsink->height, vsink->fps_num, vsink->fps_den,
GST_FOURCC_ARGS(vsink->fourcc),
GST_TIME_ARGS((guint64)(vsink->length_time * GST_SECOND)));
}
vstream = (Emotion_Video_Stream *)eina_list_data_get(ev->video_streams);
if (vstream)
{
DBG("video size=%dx%d, fps=%d/%d, "
"fourcc=%"GST_FOURCC_FORMAT", length=%"GST_TIME_FORMAT,
vstream->width, vstream->height, vstream->fps_num, vstream->fps_den,
GST_FOURCC_ARGS(vstream->fourcc),
GST_TIME_ARGS((guint64)(vstream->length_time * GST_SECOND)));
}
asink = (Emotion_Audio_Sink *)eina_list_data_get(ev->audio_sinks);
if (asink)
{
DBG("audio channels=%d, rate=%d, length=%"GST_TIME_FORMAT,
asink->channels, asink->samplerate,
GST_TIME_ARGS((guint64)(asink->length_time * GST_SECOND)));
}
}
astream = (Emotion_Audio_Stream *)eina_list_data_get(ev->audio_streams);
if (astream)
{
DBG("audio channels=%d, rate=%d, length=%"GST_TIME_FORMAT,
astream->channels, astream->samplerate,
GST_TIME_ARGS((guint64)(astream->length_time * GST_SECOND)));
}
}
if (ev->metadata)
_free_metadata(ev->metadata);
@ -449,18 +359,18 @@ static void
em_file_close(void *video)
{
Emotion_Gstreamer_Video *ev;
Emotion_Audio_Sink *asink;
Emotion_Video_Sink *vsink;
Emotion_Audio_Stream *astream;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)video;
if (!ev)
return;
/* we clear the sink lists */
EINA_LIST_FREE(ev->audio_sinks, asink)
free(asink);
EINA_LIST_FREE(ev->video_sinks, vsink)
free(vsink);
/* we clear the stream lists */
EINA_LIST_FREE(ev->audio_streams, astream)
free(astream);
EINA_LIST_FREE(ev->video_streams, vstream)
free(vstream);
/* shutdown eos */
if (ev->eos_timer)
@ -528,15 +438,15 @@ em_size_get(void *video,
int *height)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Sink *vsink;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)video;
vsink = (Emotion_Video_Sink *)eina_list_nth(ev->video_sinks, ev->video_sink_nbr);
if (vsink)
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
{
if (width) *width = vsink->width;
if (height) *height = vsink->height;
if (width) *width = vstream->width;
if (height) *height = vstream->height;
}
else
{
@ -550,17 +460,19 @@ em_pos_set(void *video,
double pos)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Sink *vsink;
Emotion_Audio_Sink *asink;
GstElement *vsink;
GstElement *asink;
ev = (Emotion_Gstreamer_Video *)video;
vsink = (Emotion_Video_Sink *)eina_list_nth(ev->video_sinks, ev->video_sink_nbr);
asink = (Emotion_Audio_Sink *)eina_list_nth(ev->audio_sinks, ev->audio_sink_nbr);
g_object_get (G_OBJECT (ev->pipeline),
"video-sink", &vsink,
"audio-sink", &asink,
NULL);
if (vsink)
{
gst_element_seek(vsink->sink, 1.0,
gst_element_seek(vsink, 1.0,
GST_FORMAT_TIME,
GST_SEEK_FLAG_ACCURATE | GST_SEEK_FLAG_FLUSH,
GST_SEEK_TYPE_SET,
@ -569,7 +481,7 @@ em_pos_set(void *video,
}
if (asink)
{
gst_element_seek(asink->sink, 1.0,
gst_element_seek(asink, 1.0,
GST_FORMAT_TIME,
GST_SEEK_FLAG_ACCURATE | GST_SEEK_FLAG_FLUSH,
GST_SEEK_TYPE_SET,
@ -582,8 +494,8 @@ static double
em_len_get(void *video)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Sink *vsink;
Emotion_Audio_Sink *asink;
Emotion_Video_Stream *vstream;
Emotion_Audio_Stream *astream;
Eina_List *l;
GstFormat fmt;
gint64 val;
@ -608,13 +520,13 @@ em_len_get(void *video)
return val / 1000000000.0;
fallback:
EINA_LIST_FOREACH(ev->audio_sinks, l, asink)
if (asink->length_time >= 0)
return asink->length_time;
EINA_LIST_FOREACH(ev->audio_streams, l, astream)
if (astream->length_time >= 0)
return astream->length_time;
EINA_LIST_FOREACH(ev->video_sinks, l, vsink)
if (vsink->length_time >= 0)
return vsink->length_time;
EINA_LIST_FOREACH(ev->video_streams, l, vstream)
if (vstream->length_time >= 0)
return vstream->length_time;
return 0.0;
}
@ -623,13 +535,13 @@ static int
em_fps_num_get(void *video)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Sink *vsink;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)video;
vsink = (Emotion_Video_Sink *)eina_list_nth(ev->video_sinks, ev->video_sink_nbr);
if (vsink)
return vsink->fps_num;
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
return vstream->fps_num;
return 0;
}
@ -638,13 +550,13 @@ static int
em_fps_den_get(void *video)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Sink *vsink;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)video;
vsink = (Emotion_Video_Sink *)eina_list_nth(ev->video_sinks, ev->video_sink_nbr);
if (vsink)
return vsink->fps_den;
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
return vstream->fps_den;
return 1;
}
@ -653,13 +565,13 @@ static double
em_fps_get(void *video)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Sink *vsink;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)video;
vsink = (Emotion_Video_Sink *)eina_list_nth(ev->video_sinks, ev->video_sink_nbr);
if (vsink)
return (double)vsink->fps_num / (double)vsink->fps_den;
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
return (double)vstream->fps_num / (double)vstream->fps_den;
return 0.0;
}
@ -749,7 +661,7 @@ em_video_handled(void *video)
ev = (Emotion_Gstreamer_Video *)video;
if (!eina_list_count(ev->video_sinks))
if (!eina_list_count(ev->video_streams))
return 0;
return 1;
@ -762,7 +674,7 @@ em_audio_handled(void *video)
ev = (Emotion_Gstreamer_Video *)video;
if (!eina_list_count(ev->audio_sinks))
if (!eina_list_count(ev->audio_streams))
return 0;
return 1;
@ -783,14 +695,14 @@ static Emotion_Format
em_format_get(void *video)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Sink *vsink;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)video;
vsink = (Emotion_Video_Sink *)eina_list_nth(ev->video_sinks, ev->video_sink_nbr);
if (vsink)
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
{
switch (vsink->fourcc)
switch (vstream->fourcc)
{
case GST_MAKE_FOURCC('I', '4', '2', '0'):
return EMOTION_FORMAT_I420;
@ -811,15 +723,15 @@ static void
em_video_data_size_get(void *video, int *w, int *h)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Sink *vsink;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)video;
vsink = (Emotion_Video_Sink *)eina_list_nth(ev->video_sinks, ev->video_sink_nbr);
if (vsink)
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
{
*w = vsink->width;
*h = vsink->height;
*w = vstream->width;
*h = vstream->height;
}
else
{
@ -912,7 +824,7 @@ em_video_channel_count(void *video)
ev = (Emotion_Gstreamer_Video *)video;
return eina_list_count(ev->video_sinks);
return eina_list_count(ev->video_streams);
}
static void
@ -936,7 +848,7 @@ em_video_channel_get(void *video)
ev = (Emotion_Gstreamer_Video *)video;
return ev->video_sink_nbr;
return ev->video_stream_nbr;
}
static const char *
@ -976,7 +888,7 @@ em_audio_channel_count(void *video)
ev = (Emotion_Gstreamer_Video *)video;
return eina_list_count(ev->audio_sinks);
return eina_list_count(ev->audio_streams);
}
static void
@ -1000,7 +912,7 @@ em_audio_channel_get(void *video)
ev = (Emotion_Gstreamer_Video *)video;
return ev->audio_sink_nbr;
return ev->audio_stream_nbr;
}
static const char *
@ -1015,7 +927,6 @@ em_audio_channel_mute_set(void *video,
int mute)
{
Emotion_Gstreamer_Video *ev;
GstElement *volume;
ev = (Emotion_Gstreamer_Video *)video;
@ -1023,15 +934,11 @@ em_audio_channel_mute_set(void *video,
return;
ev->audio_mute = mute;
volume = gst_bin_get_by_name(GST_BIN(ev->pipeline), "volume");
if (!volume) return;
if (mute)
g_object_set(G_OBJECT(volume), "volume", 0.0, NULL);
g_object_set(G_OBJECT(ev->pipeline), "mute", 1, NULL);
else
g_object_set(G_OBJECT(volume), "volume", ev->volume, NULL);
gst_object_unref(volume);
g_object_set(G_OBJECT(ev->pipeline), "mute", 0, NULL);
}
static int
@ -1049,7 +956,6 @@ em_audio_channel_volume_set(void *video,
double vol)
{
Emotion_Gstreamer_Video *ev;
GstElement *volume;
ev = (Emotion_Gstreamer_Video *)video;
@ -1058,10 +964,7 @@ em_audio_channel_volume_set(void *video,
if (vol > 1.0)
vol = 1.0;
ev->volume = vol;
volume = gst_bin_get_by_name(GST_BIN(ev->pipeline), "volume");
if (!volume) return;
g_object_set(G_OBJECT(volume), "volume", vol, NULL);
gst_object_unref(volume);
g_object_set(G_OBJECT(ev->pipeline), "volume", vol, NULL);
}
static double
@ -1380,15 +1283,15 @@ static void
_em_buffer_read(void *data, void *buf, unsigned int nbyte __UNUSED__)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Sink *vsink;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)data;
_emotion_frame_new(ev->obj);
vsink = (Emotion_Video_Sink *)eina_list_nth(ev->video_sinks, ev->video_sink_nbr);
if (vsink)
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
{
_emotion_video_pos_update(ev->obj, ev->position, vsink->length_time);
_emotion_frame_resize(ev->obj, vsink->width, vsink->height, ev->ratio);
_emotion_video_pos_update(ev->obj, ev->position, vstream->length_time);
_emotion_frame_resize(ev->obj, vstream->width, vstream->height, ev->ratio);
}
}

View File

@ -12,11 +12,10 @@
#include "emotion_private.h"
typedef struct _Emotion_Video_Sink Emotion_Video_Sink;
typedef struct _Emotion_Video_Stream Emotion_Video_Stream;
struct _Emotion_Video_Sink
struct _Emotion_Video_Stream
{
GstElement *sink;
gdouble length_time;
gint width;
gint height;
@ -25,11 +24,10 @@ struct _Emotion_Video_Sink
guint32 fourcc;
};
typedef struct _Emotion_Audio_Sink Emotion_Audio_Sink;
typedef struct _Emotion_Audio_Stream Emotion_Audio_Stream;
struct _Emotion_Audio_Sink
struct _Emotion_Audio_Stream
{
GstElement *sink;
gdouble length_time;
gint channels;
gint samplerate;
@ -61,12 +59,12 @@ struct _Emotion_Gstreamer_Video
GstBus *eos_bus;
Ecore_Timer *eos_timer;
/* Sinks */
Eina_List *video_sinks;
Eina_List *audio_sinks;
/* Strams */
Eina_List *video_streams;
Eina_List *audio_streams;
int video_sink_nbr;
int audio_sink_nbr;
int video_stream_nbr;
int audio_stream_nbr;
/* Evas object */
Evas_Object *obj;
@ -86,10 +84,10 @@ struct _Emotion_Gstreamer_Video
Emotion_Vis vis;
unsigned char play : 1;
unsigned char play : 1;
unsigned char play_started : 1;
unsigned char video_mute : 1;
unsigned char audio_mute : 1;
unsigned char video_mute : 1;
unsigned char audio_mute : 1;
};
extern int _emotion_gstreamer_log_domain;

View File

@ -28,6 +28,292 @@ emotion_pipeline_pause(GstElement *pipeline)
return 1;
}
Eina_Bool
_emotion_pipeline_build(Emotion_Gstreamer_Video *ev, const char *file)
{
GstElement *sink;
int i;
ev->pipeline = gst_element_factory_make("playbin2", "pipeline");
if (!ev->pipeline)
{
ERR("could not create playbin2 element");
return EINA_FALSE;
}
ev->eos_bus = gst_pipeline_get_bus(GST_PIPELINE(ev->pipeline));
if (!ev->eos_bus)
{
ERR("could not create BUS");
goto unref_pipeline;
}
sink = gst_element_factory_make("fakesink", "videosink");
if (!sink)
{
ERR("could not create video sink");
goto unref_pipeline;
}
g_object_set(G_OBJECT(sink), "sync", TRUE, NULL);
g_object_set(G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
g_signal_connect(G_OBJECT(sink),
"handoff",
G_CALLBACK(cb_handoff), ev);
g_object_set(G_OBJECT(ev->pipeline), "video-sink", sink, NULL);
sink = gst_element_factory_make("autoaudiosink", "audiosink");
if (!sink)
{
ERR("could not create audio sink");
goto unref_pipeline;
}
g_object_set(G_OBJECT(ev->pipeline), "audio-sink", sink, NULL);
if ((*file == '/') || (*file == '~'))
{
char *uri;
uri = g_filename_to_uri(file, NULL, NULL);
if (uri)
{
DBG("Setting file %s\n", uri);
g_object_set(G_OBJECT(ev->pipeline), "uri", uri, NULL);
free(uri);
}
else
{
ERR("could not create new uri from %s", file);
goto unref_pipeline;
}
}
else
{
DBG("Setting file %s\n", file);
g_object_set(G_OBJECT(ev->pipeline), "uri", file, NULL);
}
if (!emotion_pipeline_pause(ev->pipeline))
goto unref_pipeline;
g_object_get(G_OBJECT(ev->pipeline),
"n-audio", &ev->audio_stream_nbr,
"n-video", &ev->video_stream_nbr,
NULL);
if ((ev->video_stream_nbr == 0) && (ev->audio_stream_nbr == 0))
{
ERR("No audio nor video stream found");
goto unref_pipeline;
}
/* Video streams */
for (i = 0; i < ev->video_stream_nbr; i++)
{
GstPad *pad;
GstCaps *caps;
GstStructure *structure;
GstQuery *query;
const GValue *val;
gchar *str;
Eina_Bool build_stream = EINA_FALSE;
gdouble length_time;
gint width;
gint height;
gint fps_num;
gint fps_den;
guint32 fourcc;
g_signal_emit_by_name(ev->pipeline, "get-video-pad", i, &pad);
if (!pad)
continue;
caps = gst_pad_get_negotiated_caps(pad);
if (!caps)
goto unref_pad_v;
structure = gst_caps_get_structure(caps, 0);
str = gst_caps_to_string(caps);
if (!gst_structure_get_int(structure, "width", &width))
goto unref_caps_v;
if (!gst_structure_get_int(structure, "height", &height))
goto unref_caps_v;
if (!gst_structure_get_fraction(structure, "framerate", &fps_num, &fps_den))
goto unref_caps_v;
if (g_str_has_prefix(str, "video/x-raw-yuv"))
{
val = gst_structure_get_value(structure, "format");
fourcc = gst_value_get_fourcc(val);
}
else if (g_str_has_prefix(str, "video/x-raw-rgb"))
fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
else
goto unref_caps_v;
query = gst_query_new_duration(GST_FORMAT_TIME);
if (gst_pad_peer_query(pad, query))
{
gint64 t;
gst_query_parse_duration(query, NULL, &t);
length_time = (double)t / (double)GST_SECOND;
}
else
goto unref_query_v;
build_stream = EINA_TRUE;
unref_query_v:
gst_query_unref(query);
unref_caps_v:
gst_caps_unref(caps);
unref_pad_v:
gst_object_unref(pad);
if (build_stream)
{
Emotion_Video_Stream *vstream;
vstream = emotion_video_stream_new(ev);
if (!vstream) continue;
vstream->length_time = length_time;
vstream->width = width;
vstream->height = height;
vstream->fps_num = fps_num;
vstream->fps_den = fps_den;
vstream->fourcc = fourcc;
}
}
/* Audio streams */
for (i = 0; i < ev->audio_stream_nbr; i++)
{
GstPad *pad;
GstCaps *caps;
GstStructure *structure;
GstQuery *query;
Eina_Bool build_stream = EINA_FALSE;
gdouble length_time;
gint channels;
gint samplerate;
g_signal_emit_by_name(ev->pipeline, "get-audio-pad", i, &pad);
if (!pad)
continue;
caps = gst_pad_get_negotiated_caps(pad);
if (!caps)
goto unref_pad_a;
structure = gst_caps_get_structure(caps, 0);
if (!gst_structure_get_int(structure, "channels", &channels))
goto unref_caps_a;
if (!gst_structure_get_int(structure, "rate", &samplerate))
goto unref_caps_a;
query = gst_query_new_duration(GST_FORMAT_TIME);
if (gst_pad_peer_query(pad, query))
{
gint64 t;
gst_query_parse_duration(query, NULL, &t);
length_time = (double)t / (double)GST_SECOND;
}
else
goto unref_query_a;
build_stream = EINA_TRUE;
unref_query_a:
gst_query_unref(query);
unref_caps_a:
gst_caps_unref(caps);
unref_pad_a:
gst_object_unref(pad);
if (build_stream)
{
Emotion_Audio_Stream *astream;
astream = (Emotion_Audio_Stream *)calloc(1, sizeof(Emotion_Audio_Stream));
if (!astream) continue;
ev->audio_streams = eina_list_append(ev->audio_streams, astream);
if (eina_error_get())
{
free(astream);
continue;
}
astream->length_time = length_time;
astream->channels = channels;
astream->samplerate = samplerate;
}
}
/* Visualization sink */
if (ev->video_stream_nbr == 0)
{
GstElement *vis = NULL;
Emotion_Video_Stream *vstream;
Emotion_Audio_Stream *astream;
gint flags;
const char *vis_name;
if (!(vis_name = emotion_visualization_element_name_get(ev->vis)))
{
printf ("pb vis name %d\n", ev->vis);
goto finalize;
}
astream = (Emotion_Audio_Stream *)eina_list_data_get(ev->audio_streams);
vis = gst_element_factory_make(vis_name, "vissink");
vstream = emotion_video_stream_new(ev);
if (!vstream)
goto finalize;
else
DBG("could not create visualization stream");
vstream->length_time = astream->length_time;
vstream->width = 320;
vstream->height = 200;
vstream->fps_num = 25;
vstream->fps_den = 1;
vstream->fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
g_object_set(G_OBJECT(ev->pipeline), "vis-plugin", vis, NULL);
g_object_get(G_OBJECT(ev->pipeline), "flags", &flags, NULL);
flags |= 0x00000008;
g_object_set(G_OBJECT(ev->pipeline), "flags", flags, NULL);
}
finalize:
ev->video_stream_nbr = eina_list_count(ev->video_streams);
ev->audio_stream_nbr = eina_list_count(ev->audio_streams);
if (ev->video_stream_nbr == 1)
{
Emotion_Video_Stream *vstream;
vstream = (Emotion_Video_Stream *)eina_list_data_get(ev->video_streams);
ev->ratio = (double)vstream->width / (double)vstream->height;
}
return EINA_TRUE;
unref_pipeline:
gst_object_unref(ev->pipeline);
return EINA_FALSE;
}
/* Send the video frame to the evas object */
void
cb_handoff(GstElement *fakesrc __UNUSED__,
@ -54,9 +340,9 @@ cb_handoff(GstElement *fakesrc __UNUSED__,
}
else
{
Emotion_Audio_Sink *asink;
asink = (Emotion_Audio_Sink *)eina_list_nth(ev->audio_sinks, ev->audio_sink_nbr);
_emotion_video_pos_update(ev->obj, ev->position, asink->length_time);
Emotion_Audio_Stream *astream;
astream = (Emotion_Audio_Stream *)eina_list_nth(ev->audio_streams, ev->audio_stream_nbr - 1);
_emotion_video_pos_update(ev->obj, ev->position, astream->length_time);
}
query = gst_query_new_position(GST_FORMAT_TIME);
@ -70,134 +356,32 @@ cb_handoff(GstElement *fakesrc __UNUSED__,
gst_query_unref(query);
}
void
file_new_decoded_pad_cb(GstElement *decodebin __UNUSED__,
GstPad *new_pad,
gboolean last __UNUSED__,
gpointer user_data)
Emotion_Video_Stream *
emotion_video_stream_new(Emotion_Gstreamer_Video *ev)
{
Emotion_Gstreamer_Video *ev;
GstCaps *caps;
gchar *str;
unsigned int index;
ev = (Emotion_Gstreamer_Video *)user_data;
caps = gst_pad_get_caps(new_pad);
str = gst_caps_to_string(caps);
/* video stream */
if (g_str_has_prefix(str, "video/"))
{
Emotion_Video_Sink *vsink;
GstElement *queue;
GstPad *videopad;
vsink = (Emotion_Video_Sink *)calloc(1, sizeof(Emotion_Video_Sink));
if (!vsink) return;
ev->video_sinks = eina_list_append(ev->video_sinks, vsink);
if (eina_error_get())
{
free(vsink);
return;
}
queue = gst_element_factory_make("queue", NULL);
vsink->sink = gst_element_factory_make("fakesink", "videosink");
gst_bin_add_many(GST_BIN(ev->pipeline), queue, vsink->sink, NULL);
gst_element_link(queue, vsink->sink);
videopad = gst_element_get_pad(queue, "sink");
gst_pad_link(new_pad, videopad);
gst_object_unref(videopad);
if (eina_list_count(ev->video_sinks) == 1)
{
ev->ratio = (double)vsink->width / (double)vsink->height;
}
gst_element_set_state(queue, GST_STATE_PAUSED);
gst_element_set_state(vsink->sink, GST_STATE_PAUSED);
}
/* audio stream */
else if (g_str_has_prefix(str, "audio/"))
{
Emotion_Audio_Sink *asink;
GstPad *audiopad;
asink = (Emotion_Audio_Sink *)calloc(1, sizeof(Emotion_Audio_Sink));
if (!asink) return;
ev->audio_sinks = eina_list_append(ev->audio_sinks, asink);
if (eina_error_get())
{
free(asink);
return;
}
index = eina_list_count(ev->audio_sinks);
asink->sink = emotion_audio_sink_create(ev, index);
gst_bin_add(GST_BIN(ev->pipeline), asink->sink);
audiopad = gst_element_get_pad(asink->sink, "sink");
gst_pad_link(new_pad, audiopad);
gst_element_set_state(asink->sink, GST_STATE_PAUSED);
}
free(str);
}
Emotion_Video_Sink *
emotion_video_sink_new(Emotion_Gstreamer_Video *ev)
{
Emotion_Video_Sink *vsink;
Emotion_Video_Stream *vstream;
if (!ev) return NULL;
vsink = (Emotion_Video_Sink *)calloc(1, sizeof(Emotion_Video_Sink));
if (!vsink) return NULL;
vstream = (Emotion_Video_Stream *)calloc(1, sizeof(Emotion_Video_Stream));
if (!vstream) return NULL;
ev->video_sinks = eina_list_append(ev->video_sinks, vsink);
ev->video_streams = eina_list_append(ev->video_streams, vstream);
if (eina_error_get())
{
free(vsink);
free(vstream);
return NULL;
}
return vsink;
return vstream;
}
void
emotion_video_sink_free(Emotion_Gstreamer_Video *ev, Emotion_Video_Sink *vsink)
emotion_video_stream_free(Emotion_Gstreamer_Video *ev, Emotion_Video_Stream *vstream)
{
if (!ev || !vsink) return;
if (!ev || !vstream) return;
ev->video_sinks = eina_list_remove(ev->video_sinks, vsink);
free(vsink);
}
Emotion_Video_Sink *
emotion_visualization_sink_create(Emotion_Gstreamer_Video *ev, Emotion_Audio_Sink *asink)
{
Emotion_Video_Sink *vsink;
if (!ev) return NULL;
vsink = emotion_video_sink_new(ev);
if (!vsink) return NULL;
vsink->sink = gst_bin_get_by_name(GST_BIN(asink->sink), "vissink1");
if (!vsink->sink)
{
emotion_video_sink_free(ev, vsink);
return NULL;
}
vsink->width = 320;
vsink->height = 200;
ev->ratio = (double)vsink->width / (double)vsink->height;
vsink->fps_num = 25;
vsink->fps_den = 1;
vsink->fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
vsink->length_time = asink->length_time;
g_object_set(G_OBJECT(vsink->sink), "sync", TRUE, NULL);
g_object_set(G_OBJECT(vsink->sink), "signal-handoffs", TRUE, NULL);
g_signal_connect(G_OBJECT(vsink->sink),
"handoff",
G_CALLBACK(cb_handoff), ev);
return vsink;
ev->video_streams = eina_list_remove(ev->video_streams, vstream);
free(vstream);
}
int
@ -295,327 +479,3 @@ emotion_visualization_element_name_get(Emotion_Vis visualisation)
return "goom";
}
}
static GstElement *
emotion_visualization_bin_create(Emotion_Gstreamer_Video *ev, int index)
{
const char *vis_name;
char buf[64];
GstElement *vis, *visbin, *queue, *conv, *cspace, *sink;
GstPad *vispad;
GstCaps *caps;
if (ev->vis == EMOTION_VIS_NONE)
return NULL;
vis_name = emotion_visualization_element_name_get(ev->vis);
if (!vis_name)
return NULL;
g_snprintf(buf, sizeof(buf), "vis%d", index);
vis = gst_element_factory_make(vis_name, buf);
if (!vis)
return NULL;
g_snprintf(buf, sizeof(buf), "visbin%d", index);
visbin = gst_bin_new(buf);
queue = gst_element_factory_make("queue", NULL);
conv = gst_element_factory_make("audioconvert", NULL);
cspace = gst_element_factory_make("ffmpegcolorspace", NULL);
g_snprintf(buf, sizeof(buf), "vissink%d", index);
sink = gst_element_factory_make("fakesink", buf);
if ((!visbin) || (!queue) || (!conv) || (!cspace) || (!sink))
goto error;
gst_bin_add_many(GST_BIN(visbin), queue, conv, vis, cspace, sink, NULL);
gst_element_link_many(queue, conv, vis, cspace, NULL);
caps = gst_caps_new_simple("video/x-raw-rgb",
"bpp", G_TYPE_INT, 32,
"width", G_TYPE_INT, 320,
"height", G_TYPE_INT, 200,
NULL);
gst_element_link_filtered(cspace, sink, caps);
vispad = gst_element_get_pad(queue, "sink");
gst_element_add_pad(visbin, gst_ghost_pad_new("sink", vispad));
gst_object_unref(vispad);
return visbin;
error:
if (vis)
gst_object_unref(vis);
if (visbin)
gst_object_unref(visbin);
if (queue)
gst_object_unref(queue);
if (conv)
gst_object_unref(conv);
if (cspace)
gst_object_unref(cspace);
if (sink)
gst_object_unref(sink);
return NULL;
}
static GstElement *
emotion_audio_bin_create(Emotion_Gstreamer_Video *ev, int index)
{
GstElement *audiobin, *queue, *conv, *resample, *volume, *sink;
GstPad *audiopad;
double vol;
audiobin = gst_bin_new(NULL);
queue = gst_element_factory_make("queue", NULL);
conv = gst_element_factory_make("audioconvert", NULL);
resample = gst_element_factory_make("audioresample", NULL);
volume = gst_element_factory_make("volume", "volume");
if (index == 1)
sink = gst_element_factory_make("autoaudiosink", NULL);
else
/* XXX hack: use a proper mixer element here */
sink = gst_element_factory_make("fakesink", NULL);
if ((!audiobin) || (!queue) || (!conv) || (!resample) || (!volume) || (!sink))
goto error;
g_object_get(volume, "volume", &vol, NULL);
ev->volume = vol;
gst_bin_add_many(GST_BIN(audiobin),
queue, conv, resample, volume, sink, NULL);
gst_element_link_many(queue, conv, resample, volume, sink, NULL);
audiopad = gst_element_get_pad(queue, "sink");
gst_element_add_pad(audiobin, gst_ghost_pad_new("sink", audiopad));
gst_object_unref(audiopad);
return audiobin;
error:
if (audiobin)
gst_object_unref(audiobin);
if (queue)
gst_object_unref(queue);
if (conv)
gst_object_unref(conv);
if (resample)
gst_object_unref(resample);
if (volume)
gst_object_unref(volume);
if (sink)
gst_object_unref(sink);
return NULL;
}
GstElement *
emotion_audio_sink_create(Emotion_Gstreamer_Video *ev, int index)
{
gchar buf[128];
GstElement *bin;
GstElement *audiobin;
GstElement *visbin = NULL;
GstElement *tee;
GstPad *teepad;
GstPad *binpad;
audiobin = emotion_audio_bin_create(ev, index);
if (!audiobin)
return NULL;
bin = gst_bin_new(NULL);
if (!bin)
{
gst_object_unref(audiobin);
return NULL;
}
g_snprintf(buf, 128, "tee%d", index);
tee = gst_element_factory_make("tee", buf);
visbin = emotion_visualization_bin_create(ev, index);
gst_bin_add_many(GST_BIN(bin), tee, audiobin, visbin, NULL);
binpad = gst_element_get_pad(audiobin, "sink");
teepad = gst_element_get_request_pad(tee, "src%d");
gst_pad_link(teepad, binpad);
gst_object_unref(teepad);
gst_object_unref(binpad);
if (visbin)
{
binpad = gst_element_get_pad(visbin, "sink");
teepad = gst_element_get_request_pad(tee, "src%d");
gst_pad_link(teepad, binpad);
gst_object_unref(teepad);
gst_object_unref(binpad);
}
teepad = gst_element_get_pad(tee, "sink");
gst_element_add_pad(bin, gst_ghost_pad_new("sink", teepad));
gst_object_unref(teepad);
return bin;
}
void
emotion_streams_sinks_get(Emotion_Gstreamer_Video *ev, GstElement *decoder)
{
GstIterator *it;
Eina_List *alist;
Eina_List *vlist;
gpointer data;
alist = ev->audio_sinks;
vlist = ev->video_sinks;
it = gst_element_iterate_src_pads(decoder);
while (gst_iterator_next(it, &data) == GST_ITERATOR_OK)
{
GstPad *pad;
GstCaps *caps;
gchar *str;
pad = GST_PAD(data);
caps = gst_pad_get_caps(pad);
str = gst_caps_to_string(caps);
DBG("caps %s", str);
/* video stream */
if (g_str_has_prefix(str, "video/"))
{
Emotion_Video_Sink *vsink;
vsink = (Emotion_Video_Sink *)eina_list_data_get(vlist);
vlist = eina_list_next(vlist);
emotion_video_sink_fill(vsink, pad, caps);
ev->ratio = (double)vsink->width / (double)vsink->height;
}
/* audio stream */
else if (g_str_has_prefix(str, "audio/"))
{
Emotion_Audio_Sink *asink;
unsigned int index;
asink = (Emotion_Audio_Sink *)eina_list_data_get(alist);
alist = eina_list_next(alist);
emotion_audio_sink_fill(asink, pad, caps);
for (index = 0; asink != eina_list_nth(ev->audio_sinks, index) ; index++)
;
if (eina_list_count(ev->video_sinks) == 0)
{
if (index == 1)
{
Emotion_Video_Sink *vsink;
vsink = emotion_visualization_sink_create(ev, asink);
if (!vsink) goto finalize;
}
}
else
{
gchar buf[128];
GstElement *visbin;
g_snprintf(buf, 128, "visbin%d", index);
visbin = gst_bin_get_by_name(GST_BIN(ev->pipeline), buf);
if (visbin)
{
GstPad *srcpad;
GstPad *sinkpad;
sinkpad = gst_element_get_pad(visbin, "sink");
srcpad = gst_pad_get_peer(sinkpad);
gst_pad_unlink(srcpad, sinkpad);
gst_object_unref(srcpad);
gst_object_unref(sinkpad);
}
}
}
finalize:
gst_caps_unref(caps);
g_free(str);
gst_object_unref(pad);
}
gst_iterator_free(it);
}
void
emotion_video_sink_fill(Emotion_Video_Sink *vsink, GstPad *pad, GstCaps *caps)
{
GstStructure *structure;
GstQuery *query;
const GValue *val;
gchar *str;
structure = gst_caps_get_structure(caps, 0);
str = gst_caps_to_string(caps);
gst_structure_get_int(structure, "width", &vsink->width);
gst_structure_get_int(structure, "height", &vsink->height);
vsink->fps_num = 1;
vsink->fps_den = 1;
val = gst_structure_get_value(structure, "framerate");
if (val)
{
vsink->fps_num = gst_value_get_fraction_numerator(val);
vsink->fps_den = gst_value_get_fraction_denominator(val);
}
if (g_str_has_prefix(str, "video/x-raw-yuv"))
{
val = gst_structure_get_value(structure, "format");
vsink->fourcc = gst_value_get_fourcc(val);
}
else if (g_str_has_prefix(str, "video/x-raw-rgb"))
vsink->fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
else
vsink->fourcc = 0;
query = gst_query_new_duration(GST_FORMAT_TIME);
if (gst_pad_query(pad, query))
{
gint64 time;
gst_query_parse_duration(query, NULL, &time);
vsink->length_time = (double)time / (double)GST_SECOND;
}
g_free(str);
gst_query_unref(query);
}
void
emotion_audio_sink_fill(Emotion_Audio_Sink *asink, GstPad *pad, GstCaps *caps)
{
GstStructure *structure;
GstQuery *query;
structure = gst_caps_get_structure(caps, 0);
gst_structure_get_int(structure, "channels", &asink->channels);
gst_structure_get_int(structure, "rate", &asink->samplerate);
query = gst_query_new_duration(GST_FORMAT_TIME);
if (gst_pad_query(pad, query))
{
gint64 time;
gst_query_parse_duration(query, NULL, &time);
asink->length_time = (double)time / (double)GST_SECOND;
}
gst_query_unref(query);
}

View File

@ -7,34 +7,18 @@
gboolean emotion_pipeline_pause (GstElement *pipeline);
int emotion_pipeline_cdda_build (void *video, const char * device, unsigned int track);
int emotion_pipeline_file_build (void *video, const char *file);
int emotion_pipeline_uri_build (void *video, const char *uri);
int emotion_pipeline_dvd_build (void *video, const char *device);
int emotion_pipeline_v4l_build (void *video, const char *device);
Eina_Bool _emotion_pipeline_build(Emotion_Gstreamer_Video *ev, const char *file);
int emotion_pipeline_cdda_track_count_get (void *video);
GstElement *emotion_audio_sink_create (Emotion_Gstreamer_Video *ev, int index);
Emotion_Video_Sink *emotion_video_sink_new (Emotion_Gstreamer_Video *ev);
void emotion_video_sink_free (Emotion_Gstreamer_Video *ev, Emotion_Video_Sink *vsink);
Emotion_Video_Sink *emotion_visualization_sink_create (Emotion_Gstreamer_Video *ev, Emotion_Audio_Sink *asink);
void emotion_streams_sinks_get (Emotion_Gstreamer_Video *ev, GstElement *decoder);
void emotion_video_sink_fill (Emotion_Video_Sink *vsink, GstPad *pad, GstCaps *caps);
void emotion_audio_sink_fill (Emotion_Audio_Sink *asink, GstPad *pad, GstCaps *caps);
GstElement *emotion_audio_stream_create (Emotion_Gstreamer_Video *ev, int index);
Emotion_Video_Stream *emotion_video_stream_new (Emotion_Gstreamer_Video *ev);
void emotion_video_stream_free (Emotion_Gstreamer_Video *ev, Emotion_Video_Stream *vstream);
void cb_handoff (GstElement *fakesrc,
GstBuffer *buffer,
GstPad *pad,
gpointer user_data);
void file_new_decoded_pad_cb (GstElement *decodebin,
GstPad *new_pad,
gboolean last,
gpointer user_data);
const char *emotion_visualization_element_name_get(Emotion_Vis visualisation);

View File

@ -1,123 +0,0 @@
#include "emotion_gstreamer.h"
#include "emotion_gstreamer_pipeline.h"
static Emotion_Audio_Sink *_emotion_audio_sink_new (Emotion_Gstreamer_Video *ev);
static void _emotion_audio_sink_free (Emotion_Gstreamer_Video *ev, Emotion_Audio_Sink *asink);
int
emotion_pipeline_cdda_build(void *video, const char * device, unsigned int track)
{
GstElement *cdiocddasrc;
Emotion_Video_Sink *vsink;
Emotion_Audio_Sink *asink;
Emotion_Gstreamer_Video *ev;
/* GstFormat format; */
/* gint64 tracks_count; */
ev = (Emotion_Gstreamer_Video *)video;
if (!ev) return 0;
cdiocddasrc = gst_element_factory_make("cdiocddasrc", "src");
if (!cdiocddasrc)
{
ERR("cdiocddasrc gstreamer element missing. Install it.");
goto failure_cdiocddasrc;
}
if (device)
g_object_set(G_OBJECT(cdiocddasrc), "device", device, NULL);
g_object_set(G_OBJECT(cdiocddasrc), "track", track, NULL);
asink = _emotion_audio_sink_new(ev);
if (!asink)
goto failure_emotion_sink;
asink->sink = emotion_audio_sink_create(ev, 1);
if (!asink->sink)
goto failure_gstreamer_sink;
gst_bin_add_many((GST_BIN(ev->pipeline)), cdiocddasrc, asink->sink, NULL);
if (!gst_element_link(cdiocddasrc, asink->sink))
goto failure_link;
vsink = emotion_visualization_sink_create(ev, asink);
if (!vsink) goto failure_link;
if (!emotion_pipeline_pause(ev->pipeline))
goto failure_gstreamer_pause;
{
GstQuery *query;
GstPad *pad;
GstCaps *caps;
GstStructure *structure;
/* should always be found */
pad = gst_element_get_pad(cdiocddasrc, "src");
caps = gst_pad_get_caps(pad);
structure = gst_caps_get_structure(GST_CAPS(caps), 0);
gst_structure_get_int(structure, "channels", &asink->channels);
gst_structure_get_int(structure, "rate", &asink->samplerate);
gst_caps_unref(caps);
query = gst_query_new_duration(GST_FORMAT_TIME);
if (gst_pad_query(pad, query))
{
gint64 time;
gst_query_parse_duration(query, NULL, &time);
asink->length_time = (double)time / (double)GST_SECOND;
vsink->length_time = asink->length_time;
}
gst_query_unref(query);
gst_object_unref(GST_OBJECT(pad));
}
return 1;
failure_gstreamer_pause:
emotion_video_sink_free(ev, vsink);
failure_link:
gst_bin_remove(GST_BIN(ev->pipeline), asink->sink);
failure_gstreamer_sink:
_emotion_audio_sink_free(ev, asink);
failure_emotion_sink:
gst_bin_remove(GST_BIN(ev->pipeline), cdiocddasrc);
failure_cdiocddasrc:
return 0;
}
static Emotion_Audio_Sink *
_emotion_audio_sink_new(Emotion_Gstreamer_Video *ev)
{
Emotion_Audio_Sink *asink;
if (!ev) return NULL;
asink = (Emotion_Audio_Sink *)malloc(sizeof(Emotion_Audio_Sink));
if (!asink) return NULL;
ev->audio_sinks = eina_list_append(ev->audio_sinks, asink);
if (eina_error_get())
{
free(asink);
return NULL;
}
return asink;
}
static void
_emotion_audio_sink_free(Emotion_Gstreamer_Video *ev, Emotion_Audio_Sink *asink)
{
if (!ev || !asink) return;
ev->audio_sinks = eina_list_remove(ev->audio_sinks, asink);
free(asink);
}

View File

@ -1,243 +0,0 @@
#include "emotion_gstreamer.h"
#include "emotion_gstreamer_pipeline.h"
static void dvd_pad_added_cb (GstElement *dvddemuxer,
GObject *new_pad,
gpointer user_data);
static void dvd_no_more_pads_cb (GstElement *dvddemuxer,
gpointer user_data);
static volatile int no_more_pads = 0;
int
emotion_pipeline_dvd_build(void *video, const char *device)
{
GstElement *dvdreadsrc;
GstElement *dvddemux;
Emotion_Gstreamer_Video *ev;
Eina_List *alist;
Eina_List *vlist;
ev = (Emotion_Gstreamer_Video *)video;
if (!ev) return 0;
dvdreadsrc = gst_element_factory_make("dvdreadsrc", "src");
if (!dvdreadsrc)
goto failure_dvdreadsrc;
if (device)
g_object_set(G_OBJECT(dvdreadsrc), "device", device, NULL);
dvddemux = gst_element_factory_make("dvddemux", "dvddemux");
if (!dvddemux)
goto failure_dvddemux;
g_signal_connect(dvddemux, "pad-added",
G_CALLBACK(dvd_pad_added_cb), ev);
g_signal_connect(dvddemux, "no-more-pads",
G_CALLBACK(dvd_no_more_pads_cb), ev);
gst_bin_add_many(GST_BIN(ev->pipeline), dvdreadsrc, dvddemux, NULL);
if (!gst_element_link(dvdreadsrc, dvddemux))
goto failure_link;
if (!emotion_pipeline_pause(ev->pipeline))
goto failure_gstreamer_pause;
while (no_more_pads == 0)
{
DBG("toto");
}
no_more_pads = 0;
/* We get the informations of streams */
alist = ev->audio_sinks;
vlist = ev->video_sinks;
{
GstIterator *it;
gpointer data;
it = gst_element_iterate_src_pads(dvddemux);
while (gst_iterator_next(it, &data) == GST_ITERATOR_OK)
{
GstPad *pad;
GstCaps *caps;
gchar *str;
pad = GST_PAD(data);
caps = gst_pad_get_caps(pad);
str = gst_caps_to_string(caps);
DBG("caps %s", str);
/* video stream */
if (g_str_has_prefix(str, "video/mpeg"))
{
Emotion_Video_Sink *vsink;
GstPad *sink_pad;
GstCaps *sink_caps;
vsink = (Emotion_Video_Sink *)eina_list_data_get(vlist);
vlist = eina_list_next(vlist);
sink_pad = gst_element_get_pad(gst_bin_get_by_name(GST_BIN(ev->pipeline), "mpeg2dec"), "src");
sink_caps = gst_pad_get_caps(sink_pad);
str = gst_caps_to_string(sink_caps);
DBG("caps video %s", str);
emotion_video_sink_fill(vsink, sink_pad, sink_caps);
gst_caps_unref(sink_caps);
gst_object_unref(sink_pad);
}
/* audio stream */
else if (g_str_has_prefix(str, "audio/"))
{
Emotion_Audio_Sink *asink;
GstPad *sink_pad;
GstCaps *sink_caps;
asink = (Emotion_Audio_Sink *)eina_list_data_get(alist);
alist = eina_list_next(alist);
sink_pad = gst_element_get_pad(gst_bin_get_by_name(GST_BIN(ev->pipeline), "a52dec"), "src");
sink_caps = gst_pad_get_caps(sink_pad);
emotion_audio_sink_fill(asink, sink_pad, sink_caps);
}
gst_caps_unref(caps);
g_free(str);
gst_object_unref(pad);
}
gst_iterator_free(it);
}
/* The first vsink is a valid Emotion_Video_Sink * */
/* If no video stream is found, it's a visualisation sink */
{
Emotion_Video_Sink *vsink;
vsink = (Emotion_Video_Sink *)eina_list_data_get(ev->video_sinks);
if (vsink && vsink->sink)
{
g_object_set(G_OBJECT(vsink->sink), "sync", TRUE, NULL);
g_object_set(G_OBJECT(vsink->sink), "signal-handoffs", TRUE, NULL);
g_signal_connect(G_OBJECT(vsink->sink),
"handoff",
G_CALLBACK(cb_handoff), ev);
}
}
return 1;
failure_gstreamer_pause:
failure_link:
gst_element_set_state(ev->pipeline, GST_STATE_NULL);
gst_bin_remove(GST_BIN(ev->pipeline), dvddemux);
failure_dvddemux:
gst_bin_remove(GST_BIN(ev->pipeline), dvdreadsrc);
failure_dvdreadsrc:
return 0;
}
static void
dvd_pad_added_cb(GstElement *dvddemuxer __UNUSED__,
GObject *new_pad,
gpointer user_data)
{
Emotion_Gstreamer_Video *ev;
GstCaps *caps;
gchar *str;
ev = (Emotion_Gstreamer_Video *)user_data;
caps = gst_pad_get_caps(GST_PAD(new_pad));
str = gst_caps_to_string(caps);
/* video stream */
if (g_str_has_prefix(str, "video/mpeg"))
{
Emotion_Video_Sink *vsink;
GstElement *queue;
GstElement *decoder;
GstPad *videopad;
vsink = (Emotion_Video_Sink *)malloc(sizeof(Emotion_Video_Sink));
if (!vsink) return;
ev->video_sinks = eina_list_append(ev->video_sinks, vsink);
if (eina_error_get())
{
free(vsink);
return;
}
queue = gst_element_factory_make("queue", NULL);
decoder = gst_element_factory_make("mpeg2dec", "mpeg2dec");
vsink->sink = gst_element_factory_make("fakesink", "videosink");
gst_bin_add_many(GST_BIN(ev->pipeline), queue, decoder, vsink->sink, NULL);
gst_element_link(queue, decoder);
gst_element_link(decoder, vsink->sink);
videopad = gst_element_get_pad(queue, "sink");
gst_pad_link(GST_PAD(new_pad), videopad);
gst_object_unref(videopad);
if (eina_list_count(ev->video_sinks) == 1)
{
ev->ratio = (double)vsink->width / (double)vsink->height;
}
gst_element_set_state(queue, GST_STATE_PAUSED);
gst_element_set_state(decoder, GST_STATE_PAUSED);
gst_element_set_state(vsink->sink, GST_STATE_PAUSED);
}
/* audio stream */
else if (g_str_has_prefix(str, "audio/"))
{
Emotion_Audio_Sink *asink;
GstElement *queue;
GstElement *decoder;
GstElement *conv;
GstElement *resample;
GstElement *volume;
GstPad *audiopad;
double vol;
asink = (Emotion_Audio_Sink *)malloc(sizeof(Emotion_Audio_Sink));
if (!asink) return;
ev->audio_sinks = eina_list_append(ev->audio_sinks, asink);
if (eina_error_get())
{
free(asink);
return;
}
queue = gst_element_factory_make("queue", NULL);
decoder = gst_element_factory_make("a52dec", "a52dec");
conv = gst_element_factory_make("audioconvert", NULL);
resample = gst_element_factory_make("audioresample", NULL);
volume = gst_element_factory_make("volume", "volume");
g_object_get(G_OBJECT(volume), "volume", &vol, NULL);
ev->volume = vol / 10.0;
/* FIXME: must manage several audio streams */
asink->sink = gst_element_factory_make("fakesink", NULL);
gst_bin_add_many(GST_BIN(ev->pipeline),
queue, decoder, conv, resample, volume, asink->sink, NULL);
gst_element_link_many(queue, decoder, conv, resample, volume, asink->sink, NULL);
audiopad = gst_element_get_pad(queue, "sink");
gst_pad_link(GST_PAD(new_pad), audiopad);
gst_object_unref(audiopad);
gst_element_set_state(queue, GST_STATE_PAUSED);
gst_element_set_state(decoder, GST_STATE_PAUSED);
gst_element_set_state(conv, GST_STATE_PAUSED);
gst_element_set_state(resample, GST_STATE_PAUSED);
gst_element_set_state(volume, GST_STATE_PAUSED);
gst_element_set_state(asink->sink, GST_STATE_PAUSED);
}
}
static void
dvd_no_more_pads_cb(GstElement *dvddemuxer __UNUSED__,
gpointer user_data __UNUSED__)
{
no_more_pads = 1;
}

View File

@ -1,61 +0,0 @@
#include "emotion_gstreamer.h"
#include "emotion_gstreamer_pipeline.h"
int
emotion_pipeline_file_build(void *video, const char *file)
{
GstElement *filesrc;
GstElement *decodebin;
Emotion_Gstreamer_Video *ev;
ev = (Emotion_Gstreamer_Video *)video;
if (!ev) return 0;
filesrc = gst_element_factory_make("filesrc", "src");
if (!filesrc)
goto failure_filesrc;
g_object_set(G_OBJECT(filesrc), "location", file, NULL);
decodebin = gst_element_factory_make("decodebin", "decodebin");
if (!decodebin)
goto failure_decodebin;
g_signal_connect(decodebin, "new-decoded-pad",
G_CALLBACK(file_new_decoded_pad_cb), ev);
gst_bin_add_many(GST_BIN(ev->pipeline), filesrc, decodebin, NULL);
if (!gst_element_link(filesrc, decodebin))
goto failure_link;
if (!emotion_pipeline_pause(ev->pipeline))
goto failure_gstreamer_pause;
emotion_streams_sinks_get(ev, decodebin);
/* The first vsink is a valid Emotion_Video_Sink * */
/* If no video stream is found, it's a visualisation sink */
{
Emotion_Video_Sink *vsink;
vsink = (Emotion_Video_Sink *)eina_list_data_get(ev->video_sinks);
if (vsink && vsink->sink)
{
g_object_set(G_OBJECT(vsink->sink), "sync", TRUE, NULL);
g_object_set(G_OBJECT(vsink->sink), "signal-handoffs", TRUE, NULL);
g_signal_connect(G_OBJECT(vsink->sink),
"handoff",
G_CALLBACK(cb_handoff), ev);
}
}
return 1;
failure_gstreamer_pause:
failure_link:
gst_element_set_state(ev->pipeline, GST_STATE_NULL);
gst_bin_remove(GST_BIN(ev->pipeline), decodebin);
failure_decodebin:
gst_bin_remove(GST_BIN(ev->pipeline), filesrc);
failure_filesrc:
return 0;
}

View File

@ -1,63 +0,0 @@
#include "emotion_gstreamer.h"
#include "emotion_gstreamer_pipeline.h"
int
emotion_pipeline_uri_build(void *video, const char *uri)
{
GstElement *src;
GstElement *decodebin;
Emotion_Gstreamer_Video *ev;
ev = (Emotion_Gstreamer_Video *)video;
if (!ev) return 0;
if (gst_uri_protocol_is_supported(GST_URI_SRC, uri))
goto failure_src;
src = gst_element_make_from_uri(GST_URI_SRC, uri, "src");
if (!src)
goto failure_src;
g_object_set(G_OBJECT(src), "location", uri, NULL);
decodebin = gst_element_factory_make("decodebin", "decodebin");
if (!decodebin)
goto failure_decodebin;
g_signal_connect(decodebin, "new-decoded-pad",
G_CALLBACK(file_new_decoded_pad_cb), ev);
gst_bin_add_many(GST_BIN(ev->pipeline), src, decodebin, NULL);
if (!gst_element_link(src, decodebin))
goto failure_link;
if (!emotion_pipeline_pause(ev->pipeline))
goto failure_gstreamer_pause;
emotion_streams_sinks_get(ev, decodebin);
/* The first vsink is a valid Emotion_Video_Sink * */
/* If no video stream is found, it's a visualisation sink */
{
Emotion_Video_Sink *vsink;
vsink = (Emotion_Video_Sink *)eina_list_data_get(ev->video_sinks);
if (vsink && vsink->sink)
{
g_object_set(G_OBJECT(vsink->sink), "sync", TRUE, NULL);
g_object_set(G_OBJECT(vsink->sink), "signal-handoffs", TRUE, NULL);
g_signal_connect(G_OBJECT(vsink->sink),
"handoff",
G_CALLBACK(cb_handoff), ev);
}
}
return 1;
failure_gstreamer_pause:
failure_link:
gst_element_set_state(ev->pipeline, GST_STATE_NULL);
gst_bin_remove(GST_BIN(ev->pipeline), decodebin);
failure_decodebin:
gst_bin_remove(GST_BIN(ev->pipeline), src);
failure_src:
return 0;
}

View File

@ -1,84 +0,0 @@
#include "emotion_gstreamer.h"
#include "emotion_gstreamer_pipeline.h"
int
emotion_pipeline_v4l_build(void *video, const char *device)
{
GstElement *v4l2src, *cspace, *queue, *sink;
Emotion_Video_Sink *vsink;
GstCaps *caps;
Emotion_Gstreamer_Video *ev;
char dev[128];
int devno;
ev = (Emotion_Gstreamer_Video *)video;
if (!ev) return 0;
v4l2src = gst_element_factory_make("v4l2src", "v4l2src");
cspace = gst_element_factory_make("ffmpegcolorspace", "cspace");
queue = gst_element_factory_make("queue", "queue");
sink = gst_element_factory_make("fakesink", "sink");
if ((!v4l2src) || (!cspace) || (!queue) || (!sink))
goto failure;
if (sscanf(device, "v4l://%d", &devno) != 1)
devno = 0;
snprintf(dev, sizeof(dev), "/dev/video%d", devno);
g_object_set (v4l2src, "device", dev, NULL);
gst_bin_add_many(GST_BIN(ev->pipeline), v4l2src, cspace, queue, sink, NULL);
caps = gst_caps_new_simple("video/x-raw-yuv",
"width", G_TYPE_INT, 320,
"height", G_TYPE_INT, 240,
NULL);
if (!gst_element_link_filtered(v4l2src, cspace, caps))
{
gst_caps_unref(caps);
goto failure;
}
gst_caps_unref(caps);
caps = gst_caps_new_simple("video/x-raw-rgb",
"bpp", G_TYPE_INT, 32,
"width", G_TYPE_INT, 320,
"height", G_TYPE_INT, 240,
NULL);
if (!gst_element_link_filtered(cspace, queue, caps))
{
gst_caps_unref(caps);
goto failure;
}
gst_caps_unref(caps);
gst_element_link(queue, sink);
vsink = emotion_video_sink_new(ev);
if(!vsink) goto failure;
vsink->sink = sink;
vsink->width=320;
vsink->height=240;
vsink->fourcc = GST_MAKE_FOURCC ('A', 'R', 'G', 'B');
g_object_set(G_OBJECT(vsink->sink), "sync", FALSE, NULL);
g_object_set(G_OBJECT(vsink->sink), "signal-handoffs", TRUE, NULL);
g_signal_connect(G_OBJECT(vsink->sink),
"handoff",
G_CALLBACK(cb_handoff), ev);
return 1;
failure:
if(v4l2src)
gst_object_unref(v4l2src);
if(cspace)
gst_object_unref(cspace);
if(queue)
gst_object_unref(queue);
if(sink)
gst_object_unref(sink);
return 0;
}