forked from enlightenment/efl
* add two functions to know if audio and video are handled (emotion_object_audio_handled_get and emotion_object_video_handled_get)
* add a smart callback "audio_level_change" to deal with audio level change * alsa is now used by default since many people report troubles with oss SVN revision: 14320
This commit is contained in:
parent
93f67dbe89
commit
36a904a439
|
@ -61,6 +61,8 @@ void emotion_object_play_set (Evas_Object *obj, Evas_Bool p
|
|||
Evas_Bool emotion_object_play_get (Evas_Object *obj);
|
||||
void emotion_object_position_set (Evas_Object *obj, double sec);
|
||||
double emotion_object_position_get (Evas_Object *obj);
|
||||
Evas_Bool emotion_object_video_handled_get (Evas_Object *obj);
|
||||
Evas_Bool emotion_object_audio_handled_get (Evas_Object *obj);
|
||||
Evas_Bool emotion_object_seekable_get (Evas_Object *obj);
|
||||
double emotion_object_play_length_get (Evas_Object *obj);
|
||||
void emotion_object_size_get (Evas_Object *obj, int *iw, int *ih);
|
||||
|
|
|
@ -31,6 +31,8 @@ struct _Emotion_Video_Module
|
|||
double (*fps_get) (void *ef);
|
||||
double (*pos_get) (void *ef);
|
||||
double (*ratio_get) (void *ef);
|
||||
int (*video_handled) (void *ef);
|
||||
int (*audio_handled) (void *ef);
|
||||
int (*seekable) (void *ef);
|
||||
void (*frame_done) (void *ef);
|
||||
void (*yuv_size_get) (void *ef, int *w, int *h);
|
||||
|
@ -76,6 +78,7 @@ void _emotion_video_pos_update(Evas_Object *obj, double pos, double len);
|
|||
void _emotion_frame_resize(Evas_Object *obj, int w, int h, double ratio);
|
||||
void _emotion_decode_stop(Evas_Object *obj);
|
||||
void _emotion_playback_finished(Evas_Object *obj);
|
||||
void _emotion_audio_level_change(Evas_Object *obj);
|
||||
void _emotion_channels_change(Evas_Object *obj);
|
||||
void _emotion_title_set(Evas_Object *obj, char *title);
|
||||
void _emotion_progress_set(Evas_Object *obj, char *info, double stat);
|
||||
|
|
|
@ -265,6 +265,28 @@ emotion_object_seekable_get(Evas_Object *obj)
|
|||
return sd->module->seekable(sd->video);
|
||||
}
|
||||
|
||||
Evas_Bool
|
||||
emotion_object_video_handled_get(Evas_Object *obj)
|
||||
{
|
||||
Smart_Data *sd;
|
||||
|
||||
E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
|
||||
if (!sd->module) return 0;
|
||||
if (!sd->video) return 0;
|
||||
return sd->module->video_handled(sd->video);
|
||||
}
|
||||
|
||||
Evas_Bool
|
||||
emotion_object_audio_handled_get(Evas_Object *obj)
|
||||
{
|
||||
Smart_Data *sd;
|
||||
|
||||
E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
|
||||
if (!sd->module) return 0;
|
||||
if (!sd->video) return 0;
|
||||
return sd->module->audio_handled(sd->video);
|
||||
}
|
||||
|
||||
double
|
||||
emotion_object_play_length_get(Evas_Object *obj)
|
||||
{
|
||||
|
@ -810,6 +832,11 @@ _emotion_playback_finished(Evas_Object *obj)
|
|||
evas_object_smart_callback_call(obj, "playback_finished", NULL);
|
||||
}
|
||||
|
||||
void _emotion_audio_level_change(Evas_Object *obj)
|
||||
{
|
||||
evas_object_smart_callback_call(obj, "audio_level_change", NULL);
|
||||
}
|
||||
|
||||
void
|
||||
_emotion_channels_change(Evas_Object *obj)
|
||||
{
|
||||
|
|
|
@ -227,8 +227,8 @@ em_init(Evas_Object *obj, void **emotion_video)
|
|||
ev->fd = ev->fd_write;
|
||||
|
||||
ev->video = xine_open_video_driver(ev->decoder, "emotion", XINE_VISUAL_TYPE_NONE, ev);
|
||||
ev->audio = xine_open_audio_driver(ev->decoder, "oss", ev);
|
||||
// ev->audio = xine_open_audio_driver(ev->decoder, "alsa", ev);
|
||||
// ev->audio = xine_open_audio_driver(ev->decoder, "oss", ev);
|
||||
ev->audio = xine_open_audio_driver(ev->decoder, "alsa", ev);
|
||||
// ev->audio = xine_open_audio_driver(ev->decoder, "arts", ev);
|
||||
// ev->audio = xine_open_audio_driver(ev->decoder, "esd", ev);
|
||||
ev->stream = xine_stream_new(ev->decoder, ev->audio, ev->video);
|
||||
|
@ -496,6 +496,26 @@ em_ratio_get(void *ef)
|
|||
return ev->ratio;
|
||||
}
|
||||
|
||||
static int
|
||||
em_video_handled(void *ef)
|
||||
{
|
||||
Emotion_Xine_Video *ev;
|
||||
|
||||
ev = (Emotion_Xine_Video *)ef;
|
||||
return (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO) &&
|
||||
xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED));
|
||||
}
|
||||
|
||||
static int
|
||||
em_audio_handled(void *ef)
|
||||
{
|
||||
Emotion_Xine_Video *ev;
|
||||
|
||||
ev = (Emotion_Xine_Video *)ef;
|
||||
return (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_AUDIO) &&
|
||||
xine_get_stream_info(ev->stream, XINE_STREAM_INFO_AUDIO_HANDLED));
|
||||
}
|
||||
|
||||
static int
|
||||
em_seekable(void *ef)
|
||||
{
|
||||
|
@ -1167,6 +1187,7 @@ _em_fd_ev_active(void *data, Ecore_Fd_Handler *fdh)
|
|||
xine_audio_level_data_t *e;
|
||||
|
||||
e = (xine_audio_level_data_t *)eev->xine_event;
|
||||
_emotion_audio_level_change(ev->obj);
|
||||
printf("EV: Audio Level [FIXME: break this out to emotion api]\n");
|
||||
// e->left (0->100)
|
||||
// e->right
|
||||
|
@ -1309,6 +1330,8 @@ static Emotion_Video_Module em_module =
|
|||
em_fps_get, /* fps_get */
|
||||
em_pos_get, /* pos_get */
|
||||
em_ratio_get, /* ratio_get */
|
||||
em_video_handled, /* video_handled */
|
||||
em_audio_handled, /* audio_handled */
|
||||
em_seekable, /* seekable */
|
||||
em_frame_done, /* frame_done */
|
||||
em_yuv_size_get, /* yuv_size_get */
|
||||
|
|
Loading…
Reference in New Issue