emotion: make gstreamer backend async and handle it correctly every where.

SVN revision: 62010
This commit is contained in:
Cedric BAIL 2011-08-02 16:22:30 +00:00
parent 1cffa73758
commit 828b67374e
4 changed files with 413 additions and 324 deletions

View File

@ -82,6 +82,7 @@ struct _Smart_Data
double ratio;
double pos;
double remember_jump;
double seek_pos;
double len;
@ -89,7 +90,9 @@ struct _Smart_Data
Emotion_Suspend state;
Eina_Bool open : 1;
Eina_Bool play : 1;
Eina_Bool remember_play : 1;
Eina_Bool seek : 1;
Eina_Bool seeking : 1;
};
@ -329,8 +332,10 @@ emotion_object_init(Evas_Object *obj, const char *module_filename)
sd->spu.button = -1;
sd->ratio = 1.0;
sd->pos = 0;
sd->remember_jump = 0;
sd->seek_pos = 0;
sd->len = 0;
sd->remember_play = 0;
_emotion_module_close(sd->module, sd->video);
sd->module = NULL;
@ -370,6 +375,7 @@ emotion_object_file_set(Evas_Object *obj, const char *file)
sd->module->file_close(sd->video);
evas_object_image_data_set(sd->obj, NULL);
evas_object_image_size_set(sd->obj, 1, 1);
sd->open = 0;
if (!sd->module->file_open(sd->file, obj, sd->video))
return EINA_FALSE;
sd->module->size_get(sd->video, &w, &h);
@ -419,7 +425,13 @@ emotion_object_play_set(Evas_Object *obj, Eina_Bool play)
if (play == sd->play) return;
if (!sd->module) return;
if (!sd->video) return;
if (!sd->open)
{
sd->remember_play = play;
return;
}
sd->play = play;
sd->remember_play = play;
if (sd->state != EMOTION_WAKEUP) emotion_object_suspend_set(obj, EMOTION_WAKEUP);
if (sd->play) sd->module->play(sd->video, sd->pos);
else sd->module->stop(sd->video);
@ -445,6 +457,12 @@ emotion_object_position_set(Evas_Object *obj, double sec)
DBG("sec=%f", sec);
if (!sd->module) return;
if (!sd->video) return;
if (!sd->open)
{
sd->remember_jump = sec;
return ;
}
sd->remember_jump = 0;
sd->seek_pos = sec;
sd->seek = 1;
sd->pos = sd->seek_pos;
@ -1064,7 +1082,11 @@ emotion_object_last_position_load(Evas_Object *obj)
EINA_REFCOUNT_REF(sd);
sd->load_xattr = eio_file_xattr_get(tmp, "user.e.time_seek", _eio_load_xattr_done, _eio_load_xattr_error, sd);
sd->load_xattr = eio_file_xattr_get(tmp,
"user.e.time_seek",
_eio_load_xattr_done,
_eio_load_xattr_error,
sd);
#else
# ifdef HAVE_XATTR
{
@ -1266,6 +1288,15 @@ _emotion_decode_stop(Evas_Object *obj)
EAPI void
_emotion_open_done(Evas_Object *obj)
{
Smart_Data *sd;
E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
sd->open = 1;
if (sd->remember_jump)
emotion_object_position_set(obj, sd->remember_jump);
if (sd->remember_play != sd->play)
emotion_object_play_set(obj, sd->remember_play);
evas_object_smart_callback_call(obj, SIG_OPEN_DONE, NULL);
}

View File

@ -355,6 +355,12 @@ em_shutdown(void *video)
if (!ev)
return 0;
if (ev->thread)
{
ecore_thread_cancel(ev->thread);
ev->thread = NULL;
}
if (ev->pipeline)
{
gst_element_set_state(ev->pipeline, GST_STATE_NULL);
@ -381,8 +387,6 @@ em_file_open(const char *file,
Emotion_Gstreamer_Video *ev;
Eina_Strbuf *sbuf = NULL;
const char *uri;
double start, end;
int i;
ev = (Emotion_Gstreamer_Video *)video;
@ -410,28 +414,20 @@ em_file_open(const char *file,
eina_strbuf_append(sbuf, file);
}
start = ecore_time_get();
uri = sbuf ? eina_strbuf_string_get(sbuf) : file;
DBG("setting file to '%s'", uri);
ev->pipeline = gstreamer_video_sink_new(ev, obj, uri);
if (sbuf) eina_strbuf_free(sbuf);
end = ecore_time_get();
DBG("Pipeline creation: %f", end - start);
if (!ev->pipeline)
return EINA_FALSE;
eina_threads_init();
start = ecore_time_get();
ev->eos_bus = gst_pipeline_get_bus(GST_PIPELINE(ev->pipeline));
if (!ev->eos_bus)
{
ERR("could not get the bus");
return EINA_FALSE;
}
end = ecore_time_get();
DBG("Get the bus: %f", end - start);
gst_bus_set_sync_handler(ev->eos_bus, _eos_sync_fct, ev);
@ -440,258 +436,6 @@ em_file_open(const char *file,
ev->position = 0.0;
g_object_get(G_OBJECT(ev->pipeline),
"n-audio", &ev->audio_stream_nbr,
"n-video", &ev->video_stream_nbr,
NULL);
if ((ev->video_stream_nbr == 0) && (ev->audio_stream_nbr == 0))
{
ERR("No audio nor video stream found");
gst_object_unref(ev->pipeline);
ev->pipeline = NULL;
return EINA_FALSE;
}
/* video stream */
start = ecore_time_get();
for (i = 0; i < ev->video_stream_nbr; i++)
{
Emotion_Video_Stream *vstream;
GstPad *pad = NULL;
GstCaps *caps;
GstStructure *structure;
GstQuery *query;
const GValue *val;
gchar *str;
gdouble length_time = 0.0;
gint width;
gint height;
gint fps_num;
gint fps_den;
guint32 fourcc = 0;
g_signal_emit_by_name(ev->pipeline, "get-video-pad", i, &pad);
if (!pad)
continue;
caps = gst_pad_get_negotiated_caps(pad);
if (!caps)
goto unref_pad_v;
structure = gst_caps_get_structure(caps, 0);
str = gst_caps_to_string(caps);
if (!gst_structure_get_int(structure, "width", &width))
goto unref_caps_v;
if (!gst_structure_get_int(structure, "height", &height))
goto unref_caps_v;
if (!gst_structure_get_fraction(structure, "framerate", &fps_num, &fps_den))
goto unref_caps_v;
if (g_str_has_prefix(str, "video/x-raw-yuv"))
{
val = gst_structure_get_value(structure, "format");
fourcc = gst_value_get_fourcc(val);
}
else if (g_str_has_prefix(str, "video/x-raw-rgb"))
fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
else
goto unref_caps_v;
query = gst_query_new_duration(GST_FORMAT_TIME);
if (gst_pad_peer_query(pad, query))
{
gint64 t;
gst_query_parse_duration(query, NULL, &t);
length_time = (double)t / (double)GST_SECOND;
}
else
goto unref_query_v;
vstream = emotion_video_stream_new(ev);
if (!vstream) goto unref_query_v;
vstream->length_time = length_time;
vstream->width = width;
vstream->height = height;
vstream->fps_num = fps_num;
vstream->fps_den = fps_den;
vstream->fourcc = fourcc;
vstream->index = i;
unref_query_v:
gst_query_unref(query);
unref_caps_v:
gst_caps_unref(caps);
unref_pad_v:
gst_object_unref(pad);
}
end = ecore_time_get();
DBG("Get video streams: %f", end - start);
/* Audio streams */
start = ecore_time_get();
for (i = 0; i < ev->audio_stream_nbr; i++)
{
Emotion_Audio_Stream *astream;
GstPad *pad;
GstCaps *caps;
GstStructure *structure;
GstQuery *query;
gdouble length_time = 0.0;
gint channels;
gint samplerate;
g_signal_emit_by_name(ev->pipeline, "get-audio-pad", i, &pad);
if (!pad)
continue;
caps = gst_pad_get_negotiated_caps(pad);
if (!caps)
goto unref_pad_a;
structure = gst_caps_get_structure(caps, 0);
if (!gst_structure_get_int(structure, "channels", &channels))
goto unref_caps_a;
if (!gst_structure_get_int(structure, "rate", &samplerate))
goto unref_caps_a;
query = gst_query_new_duration(GST_FORMAT_TIME);
if (gst_pad_peer_query(pad, query))
{
gint64 t;
gst_query_parse_duration(query, NULL, &t);
length_time = (double)t / (double)GST_SECOND;
}
else
goto unref_query_a;
astream = calloc(1, sizeof(Emotion_Audio_Stream));
if (!astream) continue;
ev->audio_streams = eina_list_append(ev->audio_streams, astream);
if (eina_error_get())
{
free(astream);
continue;
}
astream->length_time = length_time;
astream->channels = channels;
astream->samplerate = samplerate;
unref_query_a:
gst_query_unref(query);
unref_caps_a:
gst_caps_unref(caps);
unref_pad_a:
gst_object_unref(pad);
}
end = ecore_time_get();
DBG("Get audio streams: %f", end - start);
/* Visualization sink */
start = ecore_time_get();
if (ev->video_stream_nbr == 0)
{
GstElement *vis = NULL;
Emotion_Video_Stream *vstream;
Emotion_Audio_Stream *astream;
gint flags;
const char *vis_name;
if (!(vis_name = emotion_visualization_element_name_get(ev->vis)))
{
WRN("pb vis name %d", ev->vis);
goto finalize;
}
astream = eina_list_data_get(ev->audio_streams);
vis = gst_element_factory_make(vis_name, "vissink");
vstream = emotion_video_stream_new(ev);
if (!vstream)
goto finalize;
else
DBG("could not create visualization stream");
vstream->length_time = astream->length_time;
vstream->width = 320;
vstream->height = 200;
vstream->fps_num = 25;
vstream->fps_den = 1;
vstream->fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
g_object_set(G_OBJECT(ev->pipeline), "vis-plugin", vis, NULL);
g_object_get(G_OBJECT(ev->pipeline), "flags", &flags, NULL);
flags |= 0x00000008;
g_object_set(G_OBJECT(ev->pipeline), "flags", flags, NULL);
}
end = ecore_time_get();
DBG("Get visualization streams: %f", end - start);
finalize:
ev->video_stream_nbr = eina_list_count(ev->video_streams);
ev->audio_stream_nbr = eina_list_count(ev->audio_streams);
if (ev->video_stream_nbr == 1)
{
Emotion_Video_Stream *vstream;
vstream = eina_list_data_get(ev->video_streams);
ev->ratio = (double)vstream->width / (double)vstream->height;
_emotion_frame_resize(ev->obj, vstream->width, vstream->height, ev->ratio);
}
{
/* on recapitule : */
Emotion_Video_Stream *vstream;
Emotion_Audio_Stream *astream;
vstream = eina_list_data_get(ev->video_streams);
if (vstream)
{
DBG("video size=%dx%d, fps=%d/%d, "
"fourcc=%"GST_FOURCC_FORMAT", length=%"GST_TIME_FORMAT,
vstream->width, vstream->height, vstream->fps_num, vstream->fps_den,
GST_FOURCC_ARGS(vstream->fourcc),
GST_TIME_ARGS((guint64)(vstream->length_time * GST_SECOND)));
}
astream = eina_list_data_get(ev->audio_streams);
if (astream)
{
DBG("audio channels=%d, rate=%d, length=%"GST_TIME_FORMAT,
astream->channels, astream->samplerate,
GST_TIME_ARGS((guint64)(astream->length_time * GST_SECOND)));
}
}
if (ev->metadata)
_free_metadata(ev->metadata);
ev->metadata = calloc(1, sizeof(Emotion_Gstreamer_Metadata));
start = ecore_time_get();
em_audio_channel_volume_set(ev, ev->volume);
em_audio_channel_mute_set(ev, ev->audio_mute);
if (ev->play_started)
{
_emotion_playback_started(ev->obj);
ev->play_started = 0;
}
_emotion_open_done(ev->obj);
end = ecore_time_get();
DBG("Last stuff: %f", end - start);
return 1;
}
@ -712,6 +456,12 @@ em_file_close(void *video)
ev->eos_bus = NULL;
}
if (ev->thread)
{
ecore_thread_cancel(ev->thread);
ev->thread = NULL;
}
if (ev->pipeline)
{
gst_element_set_state(ev->pipeline, GST_STATE_NULL);
@ -719,13 +469,12 @@ em_file_close(void *video)
ev->pipeline = NULL;
}
eina_threads_shutdown();
/* we clear the stream lists */
EINA_LIST_FREE(ev->audio_streams, astream)
free(astream);
EINA_LIST_FREE(ev->video_streams, vstream)
free(vstream);
ev->pipeline_parsed = EINA_FALSE;
/* shutdown eos */
if (ev->metadata)
@ -772,17 +521,21 @@ em_size_get(void *video,
ev = (Emotion_Gstreamer_Video *)video;
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
goto on_error;
vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
{
if (width) *width = vstream->width;
if (height) *height = vstream->height;
return ;
}
else
{
if (width) *width = 0;
if (height) *height = 0;
}
on_error:
if (width) *width = 0;
if (height) *height = 0;
}
static void
@ -833,7 +586,7 @@ em_len_get(void *video)
if (fmt != GST_FORMAT_TIME)
{
DBG("requrested duration in time, but got %s instead.",
gst_format_get_name(fmt));
gst_format_get_name(fmt));
goto fallback;
}
@ -843,6 +596,9 @@ em_len_get(void *video)
return val / 1000000000.0;
fallback:
if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
return 0.0;
EINA_LIST_FOREACH(ev->audio_streams, l, astream)
if (astream->length_time >= 0)
return astream->length_time;
@ -862,7 +618,10 @@ em_fps_num_get(void *video)
ev = (Emotion_Gstreamer_Video *)video;
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
return 0;
vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
return vstream->fps_num;
@ -877,7 +636,10 @@ em_fps_den_get(void *video)
ev = (Emotion_Gstreamer_Video *)video;
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
return 1;
vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
return vstream->fps_den;
@ -892,7 +654,10 @@ em_fps_get(void *video)
ev = (Emotion_Gstreamer_Video *)video;
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
return 0.0;
vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
return (double)vstream->fps_num / (double)vstream->fps_den;
@ -986,6 +751,8 @@ em_video_handled(void *video)
ev = (Emotion_Gstreamer_Video *)video;
_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
if (!eina_list_count(ev->video_streams))
return 0;
@ -999,6 +766,8 @@ em_audio_handled(void *video)
ev = (Emotion_Gstreamer_Video *)video;
_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
if (!eina_list_count(ev->audio_streams))
return 0;
@ -1020,11 +789,14 @@ static Emotion_Format
em_format_get(void *video)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Stream *vstream;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)video;
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
return EMOTION_FORMAT_NONE;
vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
{
switch (vstream->fourcc)
@ -1048,21 +820,25 @@ static void
em_video_data_size_get(void *video, int *w, int *h)
{
Emotion_Gstreamer_Video *ev;
Emotion_Video_Stream *vstream;
Emotion_Video_Stream *vstream;
ev = (Emotion_Gstreamer_Video *)video;
vstream = (Emotion_Video_Stream *)eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
goto on_error;
vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
if (vstream)
{
*w = vstream->width;
*h = vstream->height;
return ;
}
else
{
*w = 0;
*h = 0;
}
on_error:
*w = 0;
*h = 0;
}
static int
@ -1105,6 +881,8 @@ em_video_channel_count(void *video)
ev = (Emotion_Gstreamer_Video *)video;
_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
return eina_list_count(ev->video_streams);
}
@ -1129,6 +907,8 @@ em_video_channel_get(void *video)
ev = (Emotion_Gstreamer_Video *)video;
_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
return ev->video_stream_nbr;
}
@ -1169,6 +949,8 @@ em_audio_channel_count(void *video)
ev = (Emotion_Gstreamer_Video *)video;
_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
return eina_list_count(ev->audio_streams);
}
@ -1193,6 +975,8 @@ em_audio_channel_get(void *video)
ev = (Emotion_Gstreamer_Video *)video;
_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
return ev->audio_stream_nbr;
}
@ -1688,3 +1472,276 @@ _eos_sync_fct(GstBus *bus, GstMessage *msg, gpointer data)
return GST_BUS_DROP;
}
Eina_Bool
_emotion_gstreamer_video_pipeline_parse(Emotion_Gstreamer_Video *ev,
Eina_Bool force)
{
gboolean res;
int i;
if (ev->pipeline_parsed)
return EINA_TRUE;
if (force && ev->thread)
{
ecore_thread_cancel(ev->thread);
ev->thread = NULL;
}
if (ev->thread)
return EINA_FALSE;
res = gst_element_get_state(ev->pipeline, NULL, NULL, GST_CLOCK_TIME_NONE);
if (!(res == GST_STATE_CHANGE_SUCCESS
|| res == GST_STATE_CHANGE_NO_PREROLL))
{
/** NOTE: you need to set: GST_DEBUG_DUMP_DOT_DIR=/tmp EMOTION_ENGINE=gstreamer to save the $EMOTION_GSTREAMER_DOT file in '/tmp' */
/** then call dot -Tpng -oemotion_pipeline.png /tmp/$TIMESTAMP-$EMOTION_GSTREAMER_DOT.dot */
if (getenv("EMOTION_GSTREAMER_DOT"))
GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(ev->pipeline),
GST_DEBUG_GRAPH_SHOW_ALL,
getenv("EMOTION_GSTREAMER_DOT"));
ERR("Unable to get GST_CLOCK_TIME_NONE.");
return EINA_FALSE;
}
g_object_get(G_OBJECT(ev->pipeline),
"n-audio", &ev->audio_stream_nbr,
"n-video", &ev->video_stream_nbr,
NULL);
if ((ev->video_stream_nbr == 0) && (ev->audio_stream_nbr == 0))
{
ERR("No audio nor video stream found");
return EINA_FALSE;
}
/* video stream */
for (i = 0; i < ev->video_stream_nbr; i++)
{
Emotion_Video_Stream *vstream;
GstPad *pad = NULL;
GstCaps *caps;
GstStructure *structure;
GstQuery *query;
const GValue *val;
gchar *str;
gdouble length_time = 0.0;
gint width;
gint height;
gint fps_num;
gint fps_den;
guint32 fourcc = 0;
g_signal_emit_by_name(ev->pipeline, "get-video-pad", i, &pad);
if (!pad)
continue;
caps = gst_pad_get_negotiated_caps(pad);
if (!caps)
goto unref_pad_v;
structure = gst_caps_get_structure(caps, 0);
str = gst_caps_to_string(caps);
if (!gst_structure_get_int(structure, "width", &width))
goto unref_caps_v;
if (!gst_structure_get_int(structure, "height", &height))
goto unref_caps_v;
if (!gst_structure_get_fraction(structure, "framerate", &fps_num, &fps_den))
goto unref_caps_v;
if (g_str_has_prefix(str, "video/x-raw-yuv"))
{
val = gst_structure_get_value(structure, "format");
fourcc = gst_value_get_fourcc(val);
}
else if (g_str_has_prefix(str, "video/x-raw-rgb"))
fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
else
goto unref_caps_v;
query = gst_query_new_duration(GST_FORMAT_TIME);
if (gst_pad_peer_query(pad, query))
{
gint64 t;
gst_query_parse_duration(query, NULL, &t);
length_time = (double)t / (double)GST_SECOND;
}
else
goto unref_query_v;
vstream = emotion_video_stream_new(ev);
if (!vstream) goto unref_query_v;
vstream->length_time = length_time;
vstream->width = width;
vstream->height = height;
vstream->fps_num = fps_num;
vstream->fps_den = fps_den;
vstream->fourcc = fourcc;
vstream->index = i;
unref_query_v:
gst_query_unref(query);
unref_caps_v:
gst_caps_unref(caps);
unref_pad_v:
gst_object_unref(pad);
}
/* Audio streams */
for (i = 0; i < ev->audio_stream_nbr; i++)
{
Emotion_Audio_Stream *astream;
GstPad *pad;
GstCaps *caps;
GstStructure *structure;
GstQuery *query;
gdouble length_time = 0.0;
gint channels;
gint samplerate;
g_signal_emit_by_name(ev->pipeline, "get-audio-pad", i, &pad);
if (!pad)
continue;
caps = gst_pad_get_negotiated_caps(pad);
if (!caps)
goto unref_pad_a;
structure = gst_caps_get_structure(caps, 0);
if (!gst_structure_get_int(structure, "channels", &channels))
goto unref_caps_a;
if (!gst_structure_get_int(structure, "rate", &samplerate))
goto unref_caps_a;
query = gst_query_new_duration(GST_FORMAT_TIME);
if (gst_pad_peer_query(pad, query))
{
gint64 t;
gst_query_parse_duration(query, NULL, &t);
length_time = (double)t / (double)GST_SECOND;
}
else
goto unref_query_a;
astream = calloc(1, sizeof(Emotion_Audio_Stream));
if (!astream) continue;
ev->audio_streams = eina_list_append(ev->audio_streams, astream);
if (eina_error_get())
{
free(astream);
continue;
}
astream->length_time = length_time;
astream->channels = channels;
astream->samplerate = samplerate;
unref_query_a:
gst_query_unref(query);
unref_caps_a:
gst_caps_unref(caps);
unref_pad_a:
gst_object_unref(pad);
}
/* Visualization sink */
if (ev->video_stream_nbr == 0)
{
GstElement *vis = NULL;
Emotion_Video_Stream *vstream;
Emotion_Audio_Stream *astream;
gint flags;
const char *vis_name;
if (!(vis_name = emotion_visualization_element_name_get(ev->vis)))
{
WRN("pb vis name %d", ev->vis);
goto finalize;
}
astream = eina_list_data_get(ev->audio_streams);
vis = gst_element_factory_make(vis_name, "vissink");
vstream = emotion_video_stream_new(ev);
if (!vstream)
goto finalize;
else
DBG("could not create visualization stream");
vstream->length_time = astream->length_time;
vstream->width = 320;
vstream->height = 200;
vstream->fps_num = 25;
vstream->fps_den = 1;
vstream->fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
g_object_set(G_OBJECT(ev->pipeline), "vis-plugin", vis, NULL);
g_object_get(G_OBJECT(ev->pipeline), "flags", &flags, NULL);
flags |= 0x00000008;
g_object_set(G_OBJECT(ev->pipeline), "flags", flags, NULL);
}
finalize:
ev->video_stream_nbr = eina_list_count(ev->video_streams);
ev->audio_stream_nbr = eina_list_count(ev->audio_streams);
if (ev->video_stream_nbr == 1)
{
Emotion_Video_Stream *vstream;
vstream = eina_list_data_get(ev->video_streams);
ev->ratio = (double)vstream->width / (double)vstream->height;
_emotion_frame_resize(ev->obj, vstream->width, vstream->height, ev->ratio);
}
{
/* on recapitule : */
Emotion_Video_Stream *vstream;
Emotion_Audio_Stream *astream;
vstream = eina_list_data_get(ev->video_streams);
if (vstream)
{
DBG("video size=%dx%d, fps=%d/%d, "
"fourcc=%"GST_FOURCC_FORMAT", length=%"GST_TIME_FORMAT,
vstream->width, vstream->height, vstream->fps_num, vstream->fps_den,
GST_FOURCC_ARGS(vstream->fourcc),
GST_TIME_ARGS((guint64)(vstream->length_time * GST_SECOND)));
}
astream = eina_list_data_get(ev->audio_streams);
if (astream)
{
DBG("audio channels=%d, rate=%d, length=%"GST_TIME_FORMAT,
astream->channels, astream->samplerate,
GST_TIME_ARGS((guint64)(astream->length_time * GST_SECOND)));
}
}
if (ev->metadata)
_free_metadata(ev->metadata);
ev->metadata = calloc(1, sizeof(Emotion_Gstreamer_Metadata));
em_audio_channel_volume_set(ev, ev->volume);
em_audio_channel_mute_set(ev, ev->audio_mute);
if (ev->play_started)
{
_emotion_playback_started(ev->obj);
ev->play_started = 0;
}
_emotion_open_done(ev->obj);
ev->pipeline_parsed = EINA_TRUE;
return EINA_TRUE;
}

View File

@ -59,6 +59,7 @@ struct _Emotion_Gstreamer_Video
{
/* Gstreamer elements */
GstElement *pipeline;
Ecore_Thread *thread;
/* eos */
GstBus *eos_bus;
@ -85,10 +86,11 @@ struct _Emotion_Gstreamer_Video
Emotion_Vis vis;
unsigned char play : 1;
unsigned char play_started : 1;
unsigned char video_mute : 1;
unsigned char audio_mute : 1;
Eina_Bool play : 1;
Eina_Bool play_started : 1;
Eina_Bool video_mute : 1;
Eina_Bool audio_mute : 1;
Eina_Bool pipeline_parsed : 1;
};
struct _EvasVideoSink {
@ -188,5 +190,7 @@ void emotion_gstreamer_buffer_free(Emotion_Gstreamer_Buffer *send);
Emotion_Gstreamer_Message *emotion_gstreamer_message_alloc(Emotion_Gstreamer_Video *ev,
GstMessage *msg);
void emotion_gstreamer_message_free(Emotion_Gstreamer_Message *send);
Eina_Bool _emotion_gstreamer_video_pipeline_parse(Emotion_Gstreamer_Video *ev,
Eina_Bool force);
#endif /* __EMOTION_GSTREAMER_H__ */

View File

@ -341,6 +341,11 @@ static void evas_video_sink_main_render(void *data)
gst_data = GST_BUFFER_DATA(buffer);
if (!gst_data) goto exit_point;
ev = evas_object_data_get(priv->o, "_emotion_gstreamer_video");
if (!ev) goto exit_point;
_emotion_gstreamer_video_pipeline_parse(ev, EINA_TRUE);
// This prevent a race condition when data are still in the pipe
// but the buffer size as changed because of a request from
// emotion smart (like on a file set).
@ -348,9 +353,6 @@ static void evas_video_sink_main_render(void *data)
if (w != priv->width || h != priv->height)
goto exit_point;
ev = evas_object_data_get(priv->o, "_emotion_gstreamer_video");
if (!ev) goto exit_point;
evas_object_image_size_set(priv->o, priv->width, priv->height);
evas_object_image_alpha_set(priv->o, 0);
evas_object_image_colorspace_set(priv->o, priv->eformat);
@ -619,6 +621,29 @@ gstreamer_plugin_init (GstPlugin * plugin)
EVAS_TYPE_VIDEO_SINK);
}
static void
_emotion_gstreamer_pause(void *data, Ecore_Thread *thread __UNUSED__)
{
Emotion_Gstreamer_Video *ev = data;
gst_element_set_state(ev->pipeline, GST_STATE_PAUSED);
}
static void
_emotion_gstreamer_cancel(void *data, Ecore_Thread *thread __UNUSED__)
{
Emotion_Gstreamer_Video *ev = data;
ev->thread = NULL;
}
static void
_emotion_gstreamer_end(void *data, Ecore_Thread *thread)
{
_emotion_gstreamer_video_pipeline_parse(data, EINA_TRUE);
_emotion_gstreamer_cancel(data, thread);
}
GstElement *
gstreamer_video_sink_new(Emotion_Gstreamer_Video *ev,
Evas_Object *o,
@ -628,7 +653,6 @@ gstreamer_video_sink_new(Emotion_Gstreamer_Video *ev,
GstElement *sink;
Evas_Object *obj;
GstStateChangeReturn res;
double start, end;
obj = _emotion_image_get(o);
if (!obj)
@ -637,17 +661,13 @@ gstreamer_video_sink_new(Emotion_Gstreamer_Video *ev,
return NULL;
}
start = ecore_time_get();
playbin = gst_element_factory_make("playbin2", "playbin");
if (!playbin)
{
ERR("Unable to create 'playbin' GstElement.");
return NULL;
}
end = ecore_time_get();
DBG("Playbin2: %f", end - start);
start = ecore_time_get();
sink = gst_element_factory_make("emotion-sink", "sink");
if (!sink)
{
@ -659,34 +679,11 @@ gstreamer_video_sink_new(Emotion_Gstreamer_Video *ev,
g_object_set(G_OBJECT(playbin), "uri", uri, NULL);
g_object_set(G_OBJECT(sink), "evas-object", obj, NULL);
end = ecore_time_get();
DBG("emotion-sink: %f", end - start);
start = ecore_time_get();
/* res = gst_element_set_state(playbin, GST_STATE_PLAYING); */
res = gst_element_set_state(playbin, GST_STATE_PAUSED);
if (res == GST_STATE_CHANGE_FAILURE)
{
ERR("Unable to set GST_STATE_PAUSED.");
goto unref_pipeline;
}
end = ecore_time_get();
DBG("Pause pipeline: %f", end - start);
start = ecore_time_get();
res = gst_element_get_state(playbin, NULL, NULL, GST_CLOCK_TIME_NONE);
if (res != GST_STATE_CHANGE_SUCCESS)
{
/** NOTE: you need to set: GST_DEBUG_DUMP_DOT_DIR=/tmp EMOTION_ENGINE=gstreamer to save the $EMOTION_GSTREAMER_DOT file in '/tmp' */
/** then call dot -Tpng -oemotion_pipeline.png /tmp/$TIMESTAMP-$EMOTION_GSTREAMER_DOT.dot */
if (getenv("EMOTION_GSTREAMER_DOT")) GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(playbin), GST_DEBUG_GRAPH_SHOW_ALL, getenv("EMOTION_GSTREAMER_DOT"));
ERR("Unable to get GST_CLOCK_TIME_NONE.");
goto unref_pipeline;
}
end = ecore_time_get();
DBG("No time: %f", end - start);
ev->pipeline = playbin;
ev->thread = ecore_thread_run(_emotion_gstreamer_pause,
_emotion_gstreamer_end,
_emotion_gstreamer_cancel,
ev);
/** NOTE: you need to set: GST_DEBUG_DUMP_DOT_DIR=/tmp EMOTION_ENGINE=gstreamer to save the $EMOTION_GSTREAMER_DOT file in '/tmp' */
/** then call dot -Tpng -oemotion_pipeline.png /tmp/$TIMESTAMP-$EMOTION_GSTREAMER_DOT.dot */