remove vlc, gst-0.10, xine deps, modules as they are broken

they dont work. easier to remove than fix, so... remove :) only gst
1.x supported now.
This commit is contained in:
Carsten Haitzler 2020-03-03 20:38:58 +00:00
parent 0dcbc26a5a
commit 075bab83c4
20 changed files with 14 additions and 7303 deletions

View File

@ -13,12 +13,11 @@ if [ "$DISTRO" != "" ] ; then
# TODO:
# - No libelogind package in fedora 30 repo
# - RPM fusion repo for xine and libvlc
# - Ibus
ENABLED_LINUX_COPTS=" -Dfb=true -Dsdl=true -Dbuffer=true -Dbuild-id=travis-build \
-Ddebug-threads=true -Dglib=true -Dg-mainloop=true -Dxpresent=true -Dxinput22=true \
-Devas-loaders-disabler=json -Decore-imf-loaders-disabler= -Demotion-loaders-disabler=libvlc,xine \
-Demotion-generic-loaders-disabler=vlc -Dharfbuzz=true -Dpixman=true -Dhyphen=true \
-Devas-loaders-disabler=json -Decore-imf-loaders-disabler= \
-Dharfbuzz=true -Dpixman=true -Dhyphen=true \
-Dvnc-server=true -Dbindings=luajit,cxx,mono -Delogind=false -Dinstall-eo-files=true -Dphysics=true"
# Enabled png, jpeg evas loader for in tree edje file builds
@ -27,8 +26,8 @@ if [ "$DISTRO" != "" ] ; then
-Dcrypto=gnutls -Dglib=false -Dgstreamer=false -Dsystemd=false -Dpulseaudio=false \
-Dnetwork-backend=connman -Dxinput2=false -Dtslib=false \
-Devas-loaders-disabler=gst,pdf,ps,raw,svg,xcf,bmp,dds,eet,generic,gif,ico,jp2k,json,pmaps,psd,tga,tgv,tiff,wbmp,webp,xpm \
-Decore-imf-loaders-disabler=xim,ibus,scim -Demotion-loaders-disabler=gstreamer1,libvlc,xine \
-Demotion-generic-loaders-disabler=vlc -Dfribidi=false -Dfontconfig=false \
-Decore-imf-loaders-disabler=xim,ibus,scim \
-Dfribidi=false -Dfontconfig=false \
-Dedje-sound-and-video=false -Dembedded-lz4=false -Dlibmount=false -Dv4l2=false \
-Delua=true -Dnls=false -Dbindings= -Dlua-interpreter=luajit -Dnative-arch-optimization=false"
#evas_filter_parser.c:(.text+0xc59): undefined reference to `lua_getglobal' with interpreter lua
@ -98,7 +97,7 @@ elif [ "$TRAVIS_OS_NAME" = "osx" ]; then
export PKG_CONFIG_PATH="/usr/local/opt/openssl/lib/pkgconfig:/usr/local/Cellar/libffi/$LIBFFI_VER/lib/pkgconfig"
export CC="ccache gcc"
travis_fold meson meson
mkdir build && meson build -Dopengl=full -Decore-imf-loaders-disabler=scim,ibus -Dx11=false -Davahi=false -Deeze=false -Dsystemd=false -Dnls=false -Dcocoa=true -Demotion-loaders-disabler=gstreamer1,libvlc,xine
mkdir build && meson build -Dopengl=full -Decore-imf-loaders-disabler=scim,ibus -Dx11=false -Davahi=false -Deeze=false -Dsystemd=false -Dnls=false -Dcocoa=true -Dgstreamer=false
travis_endfold meson
else
travis_fold meson meson

View File

@ -1,4 +1,4 @@
#!/bin/sh
sudo apt-get update -y
sudo apt-get install -y build-essential autoconf automake autopoint doxygen check luajit libharfbuzz-dev libpng-dev libudev-dev libwebp-dev libssl-dev libluajit-5.1-dev libfribidi-dev libcogl-gles2-dev libgif-dev libtiff5-dev libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libdbus-1-dev libmount-dev libblkid-dev libpulse-dev libxrandr-dev libxtst-dev libxcursor-dev libxcomposite-dev libxinerama-dev libxkbfile-dev libbullet-dev libvlc-dev libsndfile1-dev libraw-dev libspectre-dev libpoppler-cpp-dev libpam0g-dev liblz4-dev faenza-icon-theme gettext git imagemagick libasound2-dev libbluetooth-dev libfontconfig1-dev libfreetype6-dev libibus-1.0-dev libiconv-hook-dev libjpeg-dev libjpeg-turbo8-dev libpoppler-dev libpoppler-private-dev libproxy-dev librsvg2-dev libscim-dev libsystemd-dev libtool libudisks2-dev libunibreak-dev libxcb-keysyms1-dev libxine2-dev libxss-dev linux-tools-common libcurl4-openssl-dev systemd ccache git binutils-gold python3-pip ninja-build dbus-x11 libavahi-client-dev python3-setuptools libopenjp2-7-dev
sudo apt-get install -y build-essential autoconf automake autopoint doxygen check luajit libharfbuzz-dev libpng-dev libudev-dev libwebp-dev libssl-dev libluajit-5.1-dev libfribidi-dev libcogl-gles2-dev libgif-dev libtiff5-dev libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libdbus-1-dev libmount-dev libblkid-dev libpulse-dev libxrandr-dev libxtst-dev libxcursor-dev libxcomposite-dev libxinerama-dev libxkbfile-dev libbullet-dev libsndfile1-dev libraw-dev libspectre-dev libpoppler-cpp-dev libpam0g-dev liblz4-dev faenza-icon-theme gettext git imagemagick libasound2-dev libbluetooth-dev libfontconfig1-dev libfreetype6-dev libibus-1.0-dev libiconv-hook-dev libjpeg-dev libjpeg-turbo8-dev libpoppler-dev libpoppler-private-dev libproxy-dev librsvg2-dev libscim-dev libsystemd-dev libtool libudisks2-dev libunibreak-dev libxcb-keysyms1-dev libxss-dev linux-tools-common libcurl4-openssl-dev systemd ccache git binutils-gold python3-pip ninja-build dbus-x11 libavahi-client-dev python3-setuptools libopenjp2-7-dev
sudo pip3 install meson

View File

@ -456,9 +456,6 @@ endforeach
subdir(join_paths('src', 'bin', 'efl'))
subdir(join_paths('src', 'generic', 'evas'))
if sys_windows == false
subdir(join_paths('src', 'generic', 'emotion'))
endif
subdir('cmakeconfig')
subdir(join_paths('src', 'bindings'))
subdir(join_paths('src', 'edje_external'))

View File

@ -122,7 +122,7 @@ option('g-mainloop',
option('gstreamer',
type : 'boolean',
value : true,
description : 'GStreamer 1.0+ support in efl'
description : 'GStreamer support in efl'
)
option('systemd',
@ -200,20 +200,6 @@ option('ecore-imf-loaders-disabler',
value : ['ibus']
)
option('emotion-loaders-disabler',
type : 'array',
description : 'List of video back-ends to disable in efl',
choices : ['gstreamer1', 'libvlc', 'xine'],
value : ['libvlc', 'xine']
)
option('emotion-generic-loaders-disabler',
type : 'array',
description : 'List of out-of-process generic binary video loaders to disable in efl',
choices : ['vlc'],
value : ['vlc']
)
option('harfbuzz',
type : 'boolean',
value : true,

View File

@ -1,8 +0,0 @@
generic_loaders = ['vlc']
foreach loader : generic_loaders
if get_option('emotion-generic-loaders-disabler').contains(loader) == false
subdir(loader)
endif
endforeach

View File

@ -1,789 +0,0 @@
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <pthread.h>
#include <poll.h>
#include <signal.h>
#include <vlc/vlc.h>
#include <Emotion_Generic_Plugin.h>
#include <Eina.h>
#include <Ecore.h>
static int _em_vlc_log_dom = -1;
#define ERR(...) EINA_LOG_DOM_ERR(_em_vlc_log_dom, __VA_ARGS__)
#define DBG(...) EINA_LOG_DOM_DBG(_em_vlc_log_dom, __VA_ARGS__)
#define INF(...) EINA_LOG_DOM_INFO(_em_vlc_log_dom, __VA_ARGS__)
#define WRN(...) EINA_LOG_DOM_WARN(_em_vlc_log_dom, __VA_ARGS__)
#define CRIT(...) EINA_LOG_DOM_CRIT(_em_vlc_log_dom, __VA_ARGS__)
typedef struct _App App;
struct _App {
Emotion_Generic_Video_Shared *vs;
Emotion_Generic_Video_Frame vf;
libvlc_instance_t *libvlc;
libvlc_media_t *m;
libvlc_media_player_t *mp;
libvlc_event_manager_t *event_mgr;
Ecore_Pipe *fd_read; // read commands from emotion here
Ecore_Pipe *fd_write; // write commands for emotion here
Eina_Lock cmd_mutex;// lock used to send just one command at a time
int last_order; // current command received from emotion
char *filename;
char *subtitle_path;
char *shmname;
unsigned w, h;
int volume;
Eina_Bool audio_muted;
Eina_Bool opening;
Eina_Bool closing;
Eina_Bool playing;
Eina_Bool inited;
};
static void _player_setup(App *app);
/* Utilities to send commands back to emotion */
#define SEND_CMD_PARAM(app, i) \
if ((app)->fd_write) \
if (!ecore_pipe_write((app)->fd_write, &(i), sizeof((i)))) \
ecore_main_loop_quit();
static void
_send_cmd(App *app, int cmd)
{
if (!app->fd_write)
return;
eina_lock_take(&app->cmd_mutex); /* LOCK HERE */
if (!ecore_pipe_write(app->fd_write, &cmd, sizeof(cmd)))
ecore_main_loop_quit();
}
static void
_send_cmd_str(App *app, const char *str)
{
int len;
len = str ? strlen(str) + 1 : 0;
if (app->fd_write)
if (!ecore_pipe_write(app->fd_write, &len, sizeof(len)))
ecore_main_loop_quit();
if (app->fd_write)
if (!ecore_pipe_write(app->fd_write, str, len))
ecore_main_loop_quit();
}
static void
_send_cmd_finish(App *app)
{
eina_lock_release(&app->cmd_mutex); /* UNLOCK HERE */
}
/* Commands sent to the emotion pipe */
static void
_send_file_closed(App *app)
{
_send_cmd(app, EM_RESULT_FILE_CLOSE);
_send_cmd_finish(app);
}
static void
_send_time_changed(App *app)
{
float new_time;
if (app->vs->frame_drop > 1)
return;
new_time = libvlc_media_player_get_time(app->mp);
new_time /= 1000;
_send_cmd(app, EM_RESULT_POSITION_CHANGED);
SEND_CMD_PARAM(app, new_time);
_send_cmd_finish(app);
}
static void
_send_resize(App *app, int width, int height)
{
_send_cmd(app, EM_RESULT_FRAME_SIZE);
SEND_CMD_PARAM(app, width);
SEND_CMD_PARAM(app, height);
_send_cmd_finish(app);
}
static void
_send_track_info(App *app, int cmd, int current, int count, libvlc_track_description_t *desc)
{
_send_cmd(app, cmd);
SEND_CMD_PARAM(app, current);
SEND_CMD_PARAM(app, count);
while (desc)
{
int tid = desc->i_id;
const char *name = desc->psz_name;
SEND_CMD_PARAM(app, tid);
_send_cmd_str(app, name);
desc = desc->p_next;
}
_send_cmd_finish(app);
}
static void
_send_all_track_info(App *app)
{
int track_count, current;
libvlc_track_description_t *desc;
current = libvlc_audio_get_track(app->mp);
track_count = libvlc_audio_get_track_count(app->mp);
desc = libvlc_audio_get_track_description(app->mp);
_send_track_info(app, EM_RESULT_AUDIO_TRACK_INFO,
current, track_count, desc);
current = libvlc_video_get_track(app->mp);
track_count = libvlc_video_get_track_count(app->mp);
desc = libvlc_video_get_track_description(app->mp);
_send_track_info(app, EM_RESULT_VIDEO_TRACK_INFO,
current, track_count, desc);
current = libvlc_video_get_spu(app->mp);
track_count = libvlc_video_get_spu_count(app->mp);
desc = libvlc_video_get_spu_description(app->mp);
_send_track_info(app, EM_RESULT_SPU_TRACK_INFO,
current, track_count, desc);
}
static void
_send_all_meta_info(App *app)
{
const char *meta;
_send_cmd(app, EM_RESULT_META_INFO);
/*
* Will send in this order: title, artist, album, year,
* genre, comments, disc id and track count.
*/
meta = libvlc_media_get_meta(app->m, libvlc_meta_Title);
_send_cmd_str(app, meta);
meta = libvlc_media_get_meta(app->m, libvlc_meta_Artist);
_send_cmd_str(app, meta);
meta = libvlc_media_get_meta(app->m, libvlc_meta_Album);
_send_cmd_str(app, meta);
meta = libvlc_media_get_meta(app->m, libvlc_meta_Date);
_send_cmd_str(app, meta);
meta = libvlc_media_get_meta(app->m, libvlc_meta_Genre);
_send_cmd_str(app, meta);
meta = NULL; // sending empty comments
_send_cmd_str(app, meta);
meta = NULL; // sending empty disc id
_send_cmd_str(app, meta);
meta = libvlc_media_get_meta(app->m, libvlc_meta_TrackNumber);
_send_cmd_str(app, meta);
_send_cmd_finish(app);
}
static void
_send_length_changed(App *app)
{
float length = libvlc_media_player_get_length(app->mp);
length /= 1000;
_send_cmd(app, EM_RESULT_LENGTH_CHANGED);
SEND_CMD_PARAM(app, length);
_send_cmd_finish(app);
}
static void
_send_seekable_changed(App *app, const struct libvlc_event_t *ev)
{
int seekable = ev->u.media_player_seekable_changed.new_seekable;
_send_cmd(app, EM_RESULT_SEEKABLE_CHANGED);
SEND_CMD_PARAM(app, seekable);
_send_cmd_finish(app);
}
static void
_send_playback_started(App *app)
{
_send_cmd(app, EM_RESULT_PLAYBACK_STARTED);
_send_cmd_finish(app);
}
static void
_send_playback_stopped(App *app)
{
_send_cmd(app, EM_RESULT_PLAYBACK_STOPPED);
_send_cmd_finish(app);
}
static void
_send_init(App *app)
{
_send_cmd(app, EM_RESULT_INIT);
_send_cmd_finish(app);
}
static void
_send_file_set(App *app)
{
_send_cmd(app, EM_RESULT_FILE_SET);
_send_cmd_finish(app);
}
static void
_send_file_set_done(App *app, int success)
{
_send_cmd(app, EM_RESULT_FILE_SET_DONE);
SEND_CMD_PARAM(app, success);
_send_cmd_finish(app);
}
/* VLC events and callbacks */
static void
_event_cb(const struct libvlc_event_t *ev, void *data)
{
App *app = data;
ecore_thread_main_loop_begin();
switch (ev->type)
{
case libvlc_MediaPlayerTimeChanged:
// DBG("libvlc_MediaPlayerTimeChanged");
_send_time_changed(app);
break;
case libvlc_MediaPlayerLengthChanged:
DBG("libvlc_MediaPlayerLengthChanged");
_send_length_changed(app);
break;
case libvlc_MediaPlayerSeekableChanged:
DBG("libvlc_MediaPlayerSeekableChanged");
_send_seekable_changed(app, ev);
break;
case libvlc_MediaPlayerPlaying:
DBG("libvlc_MediaPlayerPlaying");
libvlc_audio_set_volume(app->mp, app->volume);
libvlc_audio_set_mute(app->mp, app->audio_muted);
_send_playback_started(app);
break;
case libvlc_MediaPlayerStopped:
DBG("libvlc_MediaPlayerStopped");
_send_playback_stopped(app);
if (app->closing)
{
free(app->filename);
app->filename = NULL;
free(app->subtitle_path);
app->subtitle_path = NULL;
libvlc_media_release(app->m);
app->m = NULL;
libvlc_media_player_release(app->mp);
app->mp = NULL;
emotion_generic_shm_free(app->vs);
app->playing = EINA_FALSE;
app->closing = EINA_FALSE;
_send_file_closed(app);
}
break;
case libvlc_MediaPlayerEndReached:
DBG("libvlc_MediaPlayerEndReached");
app->playing = EINA_FALSE;
/* vlc had released the media_playere here, we create a new one */
app->mp = libvlc_media_player_new_from_media(app->m);
_player_setup(app);
_send_playback_stopped(app);
break;
}
ecore_thread_main_loop_end();
}
static void
_tmp_playing_event_cb(const struct libvlc_event_t *ev, void *data)
{
App *app = data;
if (ev->type != libvlc_MediaPlayerPlaying)
return;
/* pause and stop listening the temporary event */
libvlc_event_detach(app->event_mgr,libvlc_MediaPlayerPlaying,
_tmp_playing_event_cb, app);
libvlc_media_player_set_pause(app->mp, 1);
/* sending size info */
libvlc_video_get_size(app->mp, 0, &app->w, &app->h);
_send_resize(app, app->w, app->h);
/* sending total lenght */
_send_length_changed(app);
/* sending audio track info */
_send_all_track_info(app);
/* sending meta info */
_send_all_meta_info(app);
/* ok, we are done! Now let emotion create the shmem for us */
_send_file_set(app);
}
static void *
_lock(void *data, void **pixels)
{
App *app = data;
if (app->playing)
*pixels = app->vf.frames[app->vs->frame.player];
else
*pixels = NULL;
return NULL; // picture identifier, not needed here
}
static void
_unlock(void *data EINA_UNUSED, void *id EINA_UNUSED, void *const *pixels EINA_UNUSED)
{
}
static void
_display(void *data, void *id EINA_UNUSED)
{
App *app = data;
if (!app->playing)
return;
eina_semaphore_lock(&app->vs->lock);
app->vs->frame.last = app->vs->frame.player;
app->vs->frame.player = app->vs->frame.next;
app->vs->frame.next = app->vs->frame.last;
if (!app->vs->frame_drop++)
{
_send_cmd(app, EM_RESULT_FRAME_NEW);
_send_cmd_finish(app);
}
eina_semaphore_release(&app->vs->lock, 1);
}
static void
_player_setup(App *app)
{
libvlc_video_set_format(app->mp, "RV32", app->w, app->h, app->w * 4);
libvlc_video_set_callbacks(app->mp, _lock, _unlock, _display, app);
app->event_mgr = libvlc_media_player_event_manager(app->mp);
libvlc_event_attach(app->event_mgr, libvlc_MediaPlayerPlaying,
_event_cb, app);
libvlc_event_attach(app->event_mgr, libvlc_MediaPlayerTimeChanged,
_event_cb, app);
libvlc_event_attach(app->event_mgr, libvlc_MediaPlayerLengthChanged,
_event_cb, app);
libvlc_event_attach(app->event_mgr, libvlc_MediaPlayerSeekableChanged,
_event_cb, app);
libvlc_event_attach(app->event_mgr, libvlc_MediaPlayerEndReached,
_event_cb, app);
libvlc_event_attach(app->event_mgr, libvlc_MediaPlayerStopped,
_event_cb, app);
}
/* Commands received from the emotion pipe */
static void
_file_set(App *app)
{
DBG("Path: %s", app->filename);
app->m = libvlc_media_new_path(app->libvlc, app->filename);
if (!app->m)
{
ERR("could not open path: \"%s\"", app->filename);
return;
}
app->mp = libvlc_media_player_new_from_media(app->m);
if (!app->mp)
{
ERR("could not create new player from media.");
return;
}
app->opening = EINA_TRUE;
/* Here we start playing and connect a temporary callback to know when
* the file is parsed and ready to be played for real.
*/
app->event_mgr = libvlc_media_player_event_manager(app->mp);
libvlc_event_attach(app->event_mgr, libvlc_MediaPlayerPlaying,
_tmp_playing_event_cb, app);
libvlc_media_player_play(app->mp);
}
static void
_file_set_done(App *app)
{
int r;
DBG("Path: %s", app->filename);
app->opening = EINA_FALSE;
r = emotion_generic_shm_get(app->shmname, &app->vs, &app->vf);
if (!r)
{
free(app->filename);
libvlc_media_release(app->m);
libvlc_media_player_release(app->mp);
app->filename = NULL;
app->m = NULL;
app->mp = NULL;
}
else
{
_player_setup(app);
}
_send_file_set_done(app, r);
}
static void
_file_close(App *app)
{
DBG("closing file");
if (!app->mp)
return;
app->closing = EINA_TRUE;
libvlc_media_player_stop(app->mp);
}
static void
_stop(App *app)
{
DBG("Stop");
if (app->mp)
libvlc_media_player_set_pause(app->mp, 1);
}
static void
_play(App *app, float pos)
{
DBG("Play at %.3f", pos);
if (!app->mp)
return;
if (app->playing)
{
libvlc_media_player_set_pause(app->mp, 0);
}
else
{
libvlc_time_t new_time = pos * 1000;
libvlc_media_player_set_time(app->mp, new_time);
libvlc_media_player_play(app->mp);
if (app->subtitle_path)
libvlc_video_set_subtitle_file(app->mp, app->subtitle_path);
app->playing = EINA_TRUE;
}
}
static void
_position_set(App *app, float position)
{
libvlc_time_t new_time;
DBG("Position set %.3f", position);
if (!app->mp)
return;
new_time = position * 1000;
libvlc_media_player_set_time(app->mp, new_time);
if (libvlc_media_player_get_state(app->mp) == libvlc_Paused)
_send_time_changed(app);
}
static void
_speed_set(App *app, float rate)
{
DBG("Speed set %.3f", rate);
if (!app->mp)
return;
libvlc_media_player_set_rate(app->mp, rate);
}
static void
_mute_set(App *app, int mute)
{
DBG("Mute %d", mute);
if (!app->mp)
return;
app->audio_muted = mute;
libvlc_audio_set_mute(app->mp, mute);
}
static void
_volume_set(App *app, float volume)
{
DBG("Volume set %.2f", volume);
if (!app->mp)
return;
app->volume = volume * 100;
libvlc_audio_set_volume(app->mp, app->volume);
}
static void
_spu_track_set(App *app, int track)
{
DBG("SPU track %d", track);
libvlc_video_set_spu(app->mp, track);
}
static void
_audio_track_set(App *app, int track)
{
DBG("Audio track %d", track);
libvlc_audio_set_track(app->mp, track);
}
static void
_video_track_set(App *app, int track)
{
DBG("Video Track %d", track);
libvlc_video_set_track(app->mp, track);
}
static void
_remote_command(void *data, void *buffer, unsigned int nbyte)
{
App *app = data;
if (nbyte == 0)
{
ecore_main_loop_quit();
return ;
}
if (app->last_order == EM_CMD_LAST)
{
if (nbyte != sizeof (int))
{
ERR("didn't receive a valid command from emotion (%i) !", nbyte);
ecore_main_loop_quit();
return ;
}
app->last_order = *((int*) buffer);
if (!app->inited &&
app->last_order != EM_CMD_INIT)
{
ERR("wrong init command!");
ecore_main_loop_quit();
return ;
}
switch (app->last_order)
{
case EM_CMD_FILE_SET:
if (app->opening)
{
libvlc_media_release(app->m);
libvlc_media_player_release(app->mp);
free(app->filename);
app->opening = EINA_FALSE;
}
break;
case EM_CMD_FILE_SET_DONE:
_file_set_done(app);
app->last_order = EM_CMD_LAST;
break;
case EM_CMD_FILE_CLOSE:
_file_close(app);
app->last_order = EM_CMD_LAST;
break;
case EM_CMD_STOP:
_stop(app);
app->last_order = EM_CMD_LAST;
break;
}
}
else
{
switch (app->last_order)
{
case EM_CMD_INIT:
app->shmname = strdup(buffer);
app->inited = EINA_TRUE;
_send_init(app);
break;
case EM_CMD_FILE_SET:
app->filename = strdup(buffer);
_file_set(app);
break;
case EM_CMD_SUBTITLE_SET:
app->subtitle_path = strdup(buffer);
break;
case EM_CMD_PLAY:
_play(app, *(float*) buffer);
break;
case EM_CMD_POSITION_SET:
_position_set(app, *(float*) buffer);
break;
case EM_CMD_SPEED_SET:
_speed_set(app, *(float*) buffer);
break;
case EM_CMD_AUDIO_MUTE_SET:
_mute_set(app, *(int*) buffer);
break;
case EM_CMD_VOLUME_SET:
_volume_set(app, *(float*) buffer);
break;
case EM_CMD_SPU_TRACK_SET:
_spu_track_set(app, *(int*) buffer);
break;
case EM_CMD_AUDIO_TRACK_SET:
_audio_track_set(app, *(int*) buffer);
break;
case EM_CMD_VIDEO_TRACK_SET:
_video_track_set(app, *(int*) buffer);
break;
}
app->last_order = EM_CMD_LAST;
}
}
static void
_dummy(void *data EINA_UNUSED, void *buffer EINA_UNUSED, unsigned int nbyte EINA_UNUSED)
{
/* This function is useless for the pipe we use to send message back
to emotion, but still needed */
}
/* Main */
static Eina_Bool
exit_func(void *data EINA_UNUSED, int ev_type EINA_UNUSED, void *ev EINA_UNUSED)
{
DBG("Quit signal received !");
ecore_main_loop_quit();
return EINA_TRUE;
}
int
main(int argc, const char *argv[])
{
App app;
Ecore_Event_Handler *hld;
int vlc_argc;
const char *vlc_argv[] =
{
"--quiet",
"--intf", "dummy", /* no interface */
"--vout", "dummy", /* we don't want video (output) */
"--no-video-title-show", /* nor the filename displayed */
"--no-sub-autodetect-file", /* we don't want automatic subtitles */
"--no-stats", /* no stats */
"--no-inhibit", /* we don't want interfaces */
"--no-disable-screensaver", /* we don't want interfaces */
// XXX: causes newer vlcs to segv!
// "--codec", "avcodec",
// XXX: disable this just in case
// "--demux", "avformat"
};
vlc_argc = sizeof(vlc_argv) / sizeof(*vlc_argv);
memset(&app, 0, sizeof(app));
if (!eina_init())
{
EINA_LOG_CRIT("Can't initialize generic vlc player, eina failed.");
return -1;
}
_em_vlc_log_dom = eina_log_domain_register("emotion_generic_vlc",
EINA_COLOR_CYAN);
if (_em_vlc_log_dom < 0)
{
EINA_LOG_CRIT("Unable to register emotion_generic_vlc log domain.");
goto error;
}
if (!eina_log_domain_level_check(_em_vlc_log_dom, EINA_LOG_LEVEL_WARN))
eina_log_domain_level_set("emotion_generic_vlc", EINA_LOG_LEVEL_WARN);
if (argc < 3)
{
ERR("missing parameters.");
ERR("syntax:\n\t%s <fd read> <fd write>", argv[0]);
goto error;
}
ecore_init();
eina_lock_new(&app.cmd_mutex);
app.fd_read = ecore_pipe_full_add(_remote_command, &app,
atoi(argv[1]), -1, EINA_FALSE, EINA_FALSE);
app.fd_write = ecore_pipe_full_add(_dummy, NULL,
-1, atoi(argv[2]), EINA_FALSE, EINA_FALSE);
hld = ecore_event_handler_add(ECORE_EVENT_SIGNAL_HUP, exit_func, NULL);
app.libvlc = libvlc_new(vlc_argc, vlc_argv);
app.mp = NULL;
app.filename = NULL;
app.subtitle_path = NULL;
app.w = 0;
app.h = 0;
app.opening = EINA_FALSE;
app.playing = EINA_FALSE;
app.inited = EINA_FALSE;
app.last_order = EM_CMD_LAST;
ecore_main_loop_begin();
libvlc_release(app.libvlc);
ecore_pipe_del(app.fd_read);
ecore_pipe_del(app.fd_write);
ecore_event_handler_del(hld);
eina_lock_free(&app.cmd_mutex);
ecore_shutdown();
eina_shutdown();
return 0;
error:
eina_shutdown();
return -1;
}
#undef SEND_CMD_PARAM

View File

@ -1,8 +0,0 @@
vlc = dependency('libvlc')
executable('vlc',
'emotion_generic_vlc.c',
dependencies: [emotion_generic, eina, ecore, rt, vlc],
install: true,
install_dir: join_paths(dir_lib, 'emotion', 'generic_players', version_name)
)

View File

@ -1,8 +1,8 @@
generic_loaders = ['gst', 'pdf',
'ps',
'raw',
'rsvg',
'xcf']
generic_loaders = [ 'pdf', 'ps', 'raw', 'rsvg', 'xcf' ]
if get_option('gstreamer') == true
generic_loaders += [ 'gst' ]
endif
generic_src = []
generic_deps = []

View File

@ -1,148 +0,0 @@
#ifndef EMOTION_GENERIC_PLUGIN_H
#define EMOTION_GENERIC_PLUGIN_H
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <Eina.h>
#define DEFAULTWIDTH 320
#define DEFAULTHEIGHT 240
#define DEFAULTPITCH 4
typedef enum _Emotion_Generic_Cmd Emotion_Generic_Cmd;
typedef enum _Emotion_Generic_Result Emotion_Generic_Result;
typedef struct _Emotion_Generic_Video_Frame Emotion_Generic_Video_Frame;
typedef struct _Emotion_Generic_Video_Shared Emotion_Generic_Video_Shared;
enum _Emotion_Generic_Cmd
{
EM_CMD_INIT = 0, // 0 param: shared memory identifier (string)
EM_CMD_PLAY, // 1 param: position (float)
EM_CMD_STOP, // 2 param: none
EM_CMD_FILE_SET, // 3 param: filename (string)
EM_CMD_FILE_SET_DONE, // 4 param: none
EM_CMD_FILE_CLOSE, // 5 param: none
EM_CMD_POSITION_SET, // 6 param: position (float)
EM_CMD_SPEED_SET, // 7 param: speed (float)
EM_CMD_AUDIO_MUTE_SET, // 8 param: muted (int)
EM_CMD_VIDEO_MUTE_SET, // 9 param: muted (int)
EM_CMD_SPU_MUTE_SET, // 10 param: muted (int)
EM_CMD_VOLUME_SET, // 11 param: volume (float)
EM_CMD_AUDIO_TRACK_SET, // 12 param: track id (int)
EM_CMD_VIDEO_TRACK_SET, // 13 param: track id (int)
EM_CMD_SPU_TRACK_SET, // 14 param: track id (int)
EM_CMD_SUBTITLE_SET, // 15 param: subtitle filename (string)
EM_CMD_LAST
};
enum _Emotion_Generic_Result
{
EM_RESULT_INIT = 0, // param: none
EM_RESULT_FILE_SET, // param: none
EM_RESULT_FILE_SET_DONE, // param: success (int)
EM_RESULT_PLAYBACK_STARTED, // param: none
EM_RESULT_PLAYBACK_STOPPED, // param: none
EM_RESULT_FILE_CLOSE, // param: none
EM_RESULT_FRAME_NEW, // param: none
EM_RESULT_FRAME_SIZE, // param: int, int (width, height)
EM_RESULT_LENGTH_CHANGED, // param: float
EM_RESULT_POSITION_CHANGED, // param: float
EM_RESULT_SEEKABLE_CHANGED, // param: int
EM_RESULT_AUDIO_TRACK_INFO, // param: current track, track count, track_id, track_name, track_id2, track_name2, ...
EM_RESULT_VIDEO_TRACK_INFO, // param: current track, track count, track_id, track_name, track_id2, track_name2, ...
EM_RESULT_SPU_TRACK_INFO, // param: current spu, spu count, spu_id, spu_name, spu_id2, spu_name2, ...
// (int, int, int, string, int, string, ...)
EM_RESULT_META_INFO, // param: title, artist, album, year, genre, comments, disc id, count (all int)
EM_RESULT_LAST
};
/* structure for frames 2 buffers to keep integrity */
struct _Emotion_Generic_Video_Frame
{
unsigned char *frames[3];
};
/* structure for frames 2 buffers to keep integrity */
struct _Emotion_Generic_Video_Shared
{
int size;
int width;
int height;
int pitch;
/**
* - "emotion" is the frame from where the Emotion process is reading pixels.
* The player shouldn't touch this frame.
* - "player" is the frame where the slave process is writing pixels.
* The emotion process shouldn't touch this frame.
* - "last" is the last frame that was rendered by the player. Emotion will
* use this frame the next time it will fetch pixels to Evas.
* - "next" is the unused frame. The player currently using the "player"
* should, after finishing this frame, set "last" to "player", and "player"
* to "next", and finally "next" to "last" so this operation can be done
* many times in case that Emotion does not request pixels fast enough.
*/
struct {
int emotion;
int player;
int last;
int next;
} frame;
Eina_Semaphore lock;
int frame_drop;
};
static inline int
emotion_generic_shm_get(const char *shmname, Emotion_Generic_Video_Shared **vs, Emotion_Generic_Video_Frame *vf)
{
int shmfd = -1;
int size;
Emotion_Generic_Video_Shared *t_vs;
shmfd = shm_open(shmname, O_RDWR, 0700);
if (shmfd == -1)
{
fprintf(stderr, "player: could not open shm: %s: %s\n",
shmname, strerror(errno));
return 0;
}
t_vs = mmap(NULL, sizeof(*t_vs), PROT_READ|PROT_WRITE, MAP_SHARED, shmfd, 0);
if (t_vs == MAP_FAILED)
{
fprintf(stderr, "player: could not map shared memory: %s\n",
strerror(errno));
close(shmfd);
return 0;
}
size = t_vs->size;
munmap(t_vs, sizeof(*t_vs));
t_vs = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, shmfd, 0);
if (t_vs == MAP_FAILED)
{
fprintf(stderr, "player: could not map shared memory: %s\n",
strerror(errno));
close(shmfd);
return 0;
}
close(shmfd);
vf->frames[0] = (unsigned char *)t_vs + sizeof(*t_vs);
vf->frames[1] = (unsigned char *)t_vs + sizeof(*t_vs) + t_vs->height * t_vs->width * t_vs->pitch;
vf->frames[2] = (unsigned char *)t_vs + sizeof(*t_vs) + 2 * t_vs->height * t_vs->width * t_vs->pitch;
*vs = t_vs;
return 1;
}
static inline void
emotion_generic_shm_free(Emotion_Generic_Video_Shared *vs)
{
munmap(vs, vs->size);
}
#endif // EMOTION_GENERIC_PLUGIN_H

View File

@ -1,79 +0,0 @@
Generic - emotion backend
=========================
This generic player backend executes a separate player in another
process. It receives the bytes to be drawn on the emotion object through
a shared memory, and communicates with the player through a pipe, using
the player standard input/output.
The player must communicate with emotion using the defined commands
specified in the Emotion_Generic_Plugin.h. It doesn't need to link
against emotion, just include this file for easier implementation.
How does it work?
=================
When the module is initialized for an emotion object, it starts another process
that runs the specified player. The player command line is specified using:
emotion_object_module_option_set(object, "player", <command>);
A player using libvlc is being provided now, and the generic module internally
checks if the command given was "vlc", in which case it will use this provided
vlc player.
When a file is set to this object, it will send the file name to the player, and
expect an answer that will tell that the player already decoded a bit of the
file, and the video size is already set on the module, so it can allocate a
shared memory with correct size.
The module then allocates the memory, sends a message to the player and expect
an answer. After this last answer, the "open_done" signal is sent and the module
knows that it is ready for playing. Commands sent before the module being ready
are now applied (and play is resumed if necessary).
During this setup stage, info about the file set will be stored in the module,
so commands like meta data get, length get and so will be available to sync
calls like emotion_object_play_length_get();
If the player dies for any reason, a "decode_stop" signal is sent (should change
to something more like an error signal), and if play is called again, it will be
restarted. The playback should start from the same point it was before the
player crashed, if the player supports seek on the current media format).
TODO
====
- Provide better description for commands;
- Explain in details the communication emotion <-> player;
- Make more common functions for players;
- (maybe) add support for named pipes, so we don't rely on standard in/out
for communication;
- Add a detection on the player to know that the emotion process died (so it
can just exit);
- shmname should contain the child pid too;
- better names for commands, maybe add namespace everywhere;
questions
=========
- Using semaphores to lock the critical region between process, and pthread
mutexes for the threads inside the player. Should move to only one type
(semphores or mutexes)?
- There are 2 inline functions insde Emotion_Generic_Plugin.h to make it easier
for the player to get the shared memory correctly. Any problem with this?
Would be good to add more functions/macros to make common tasks like
parsing commands there too?
- Should move players to another project (outside of emotion)?
problems
========
- file_set has some critical time when file is not set yet when we can't call
some functions (I think only another file_set now);
- communication player -> emotion depends on '\n' to delimitate commands, will
remove this soon (fix this urgently!);
- need to implement missing APIs;

File diff suppressed because it is too large Load Diff

View File

@ -1,123 +0,0 @@
#ifndef EMOTION_GENERIC_H
#define EMOTION_GENERIC_H
#include <sys/types.h>
#include "Emotion_Generic_Plugin.h"
/* default values */
typedef struct _Emotion_Generic_Video Emotion_Generic_Video;
typedef struct _Emotion_Generic_Player Emotion_Generic_Player;
typedef struct _Emotion_Generic_Cmd_Buffer Emotion_Generic_Cmd_Buffer;
typedef struct _Emotion_Generic_Channel Emotion_Generic_Channel;
typedef struct _Emotion_Generic_Meta Emotion_Generic_Meta;
struct _Emotion_Generic_Player
{
Ecore_Exe *exe;
};
struct _Emotion_Generic_Channel
{
int id;
const char *name;
};
struct _Emotion_Generic_Meta
{
const char *title;
const char *artist;
const char *album;
const char *year;
const char *genre;
const char *comment;
const char *disc_id;
const char *count;
};
struct _Emotion_Generic_Cmd_Buffer
{
char *tmp;
int type;
ssize_t i, total;
int s_len;
int num_params, cur_param;
int padding;
union {
struct {
int width;
int height;
} size;
int i_num;
float f_num;
struct {
int total;
int current;
Emotion_Generic_Channel *channels;
} track;
Emotion_Generic_Meta meta;
} param;
};
typedef struct _Emotion_Engine_Generic
{
Emotion_Engine engine;
char *path;
} Emotion_Engine_Generic;
/* emotion/generic main structure */
struct _Emotion_Generic_Video
{
const Emotion_Engine_Generic *engine;
const char *shmname;
Emotion_Generic_Player player;
Emotion_Generic_Cmd_Buffer cmd;
Ecore_Event_Handler *player_add, *player_del, *player_data;
int drop;
Ecore_Pipe *fd_read;
Ecore_Pipe *fd_write;
const unsigned char *buffer;
ssize_t length;
ssize_t offset;
const char *filename;
volatile double len;
volatile double pos;
double fps;
double ratio;
int w, h;
Evas_Object *obj;
Emotion_Generic_Video_Shared *shared;
Emotion_Generic_Video_Frame frame;
volatile int fq;
float volume;
float speed;
Emotion_Vis vis;
Eina_Bool initializing : 1;
Eina_Bool ready : 1;
Eina_Bool play : 1;
Eina_Bool video_mute : 1;
Eina_Bool audio_mute : 1;
Eina_Bool spu_mute : 1;
Eina_Bool seekable : 1;
volatile Eina_Bool opening : 1;
volatile Eina_Bool closing : 1;
Eina_Bool file_changed : 1;
Eina_Bool file_ready : 1;
int audio_channels_count;
int audio_channel_current;
Emotion_Generic_Channel *audio_channels;
int video_channels_count;
int video_channel_current;
Emotion_Generic_Channel *video_channels;
int spu_channels_count;
int spu_channel_current;
Emotion_Generic_Channel *spu_channels;
Emotion_Generic_Meta meta;
const char *subtitle_path;
};
#endif

View File

@ -1,24 +0,0 @@
generic_src = files([
'emotion_generic.c',
'emotion_generic.h',
])
emotion_generic = declare_dependency(
include_directories: include_directories('.'),
dependencies: emotion,
)
if sys_windows == false
shared_module(emotion_loader,
generic_src,
include_directories : config_dir,
dependencies: [eina, evas, emotion, generic_deps, rt],
install: true,
install_dir : mod_install_dir,
c_args : package_c_args,
)
install_headers('Emotion_Generic_Plugin.h',
install_dir : dir_package_include,
)
endif

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +0,0 @@
generic_src = files([
'emotion_libvlc.c',
])
generic_deps = [dependency('libvlc', version: '>= 3.0')]
shared_module(emotion_loader,
generic_src,
include_directories : config_dir,
dependencies: [eina, evas, emotion, generic_deps],
install: true,
install_dir : mod_install_dir,
c_args : package_c_args,
)

View File

@ -1,19 +1,11 @@
emotion_loaders = [
'gstreamer1',
'libvlc',
'xine'
]
if sys_windows == false
emotion_loaders += 'generic'
endif
emotion_loaders = [ 'gstreamer1' ]
foreach emotion_loader : emotion_loaders
generic_src = []
generic_deps = []
mod_install_dir = join_paths(dir_lib, 'emotion', 'modules', emotion_loader, version_name)
if get_option('emotion-loaders-disabler').contains(emotion_loader) == false
if get_option('gstreamer') == true
subdir(emotion_loader)
module_files += join_paths(mod_install_dir, 'lib'+emotion_loader+'.'+sys_mod_extension)
config_h.set('EMOTION_BUILD_'+emotion_loader.to_upper(), 1)

File diff suppressed because it is too large Load Diff

View File

@ -1,118 +0,0 @@
#ifndef EMOTION_XINE_H
#define EMOTION_XINE_H
#include <xine.h>
#include <xine/xine_plugin.h>
#include <unistd.h>
#include <fcntl.h>
#include <pthread.h>
typedef struct _Emotion_Xine_Video Emotion_Xine_Video;
typedef struct _Emotion_Xine_Video_Frame Emotion_Xine_Video_Frame;
typedef struct _Emotion_Xine_Event Emotion_Xine_Event;
struct _Emotion_Xine_Video
{
xine_t *decoder;
xine_video_port_t *video;
xine_audio_port_t *audio;
xine_stream_t *stream;
xine_event_queue_t *queue;
volatile double len;
volatile double pos;
volatile double last_pos;
volatile double volume;
volatile double buffer;
double fps;
double ratio;
int w, h;
Evas_Object *obj;
volatile Emotion_Xine_Video_Frame *cur_frame;
volatile int get_poslen;
volatile int spu_channel;
volatile int audio_channel;
volatile int video_channel;
volatile int fq;
Emotion_Vis vis;
int fd_read;
int fd_write;
Ecore_Fd_Handler *fd_handler;
int fd_ev_read;
int fd_ev_write;
Ecore_Fd_Handler *fd_ev_handler;
Ecore_Animator *anim;
unsigned char play : 1;
unsigned char just_loaded : 1;
unsigned char video_mute : 1;
unsigned char audio_mute : 1;
unsigned char spu_mute : 1;
Eina_Bool opt_no_video : 1;
Eina_Bool opt_no_audio : 1;
volatile unsigned char delete_me : 1;
volatile unsigned char no_time : 1;
volatile unsigned char opening : 1;
volatile unsigned char closing : 1;
volatile unsigned char have_vo : 1;
volatile unsigned char play_ok : 1;
pthread_t get_pos_len_th;
pthread_cond_t get_pos_len_cond;
pthread_mutex_t get_pos_len_mutex;
pthread_t slave_th;
int fd_slave_read;
int fd_slave_write;
unsigned char get_pos_thread_deleted : 1;
};
struct _Emotion_Xine_Video_Frame
{
int w, h;
double ratio;
Emotion_Format format;
unsigned char *y, *u, *v;
unsigned char *bgra_data;
int y_stride, u_stride, v_stride;
Evas_Object *obj;
double timestamp;
void (*done_func)(void *data);
void *done_data;
void *frame;
};
struct _Emotion_Xine_Event
{
int type;
void *xine_event;
int mtype;
};
#ifdef DBG
#undef DBG
#endif
#define DBG(...) EINA_LOG_DOM_DBG(_emotion_xine_log_domain, __VA_ARGS__)
#ifdef INF
#undef INF
#endif
#define INF(...) EINA_LOG_DOM_INFO(_emotion_xine_log_domain, __VA_ARGS__)
#ifdef WRN
#undef WRN
#endif
#define WRN(...) EINA_LOG_DOM_WARN(_emotion_xine_log_domain, __VA_ARGS__)
#ifdef ERR
#undef ERR
#endif
#define ERR(...) EINA_LOG_DOM_ERR(_emotion_xine_log_domain, __VA_ARGS__)
#ifdef CRI
#undef CRI
#endif
#define CRI(...) EINA_LOG_DOM_CRIT(_emotion_xine_log_domain, __VA_ARGS__)
extern int _emotion_xine_log_domain;
#endif

View File

@ -1,766 +0,0 @@
/***************************************************************************/
/*** emotion xine display engine ***/
/***************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <Eina.h>
#include <Evas.h>
#include <Ecore.h>
#include "emotion_modules.h"
#include "emotion_xine.h"
#include <xine.h>
#include <xine/video_out.h>
#include <xine/xine_internal.h>
#include <xine/xineutils.h>
#include <xine/vo_scale.h>
#define BLEND_BYTE(dst, src, o) (((src)*o + ((dst)*(0xf-o)))/0xf)
/***************************************************************************/
typedef struct _Emotion_Frame Emotion_Frame;
typedef struct _Emotion_Driver Emotion_Driver;
typedef struct _Emotion_Class Emotion_Class;
typedef struct _Emotion_Lut Emotion_Lut;
struct _Emotion_Frame
{
vo_frame_t vo_frame;
int width;
int height;
double ratio;
int format;
xine_t *xine;
Emotion_Xine_Video_Frame frame;
unsigned char in_use : 1;
};
struct _Emotion_Driver
{
vo_driver_t vo_driver;
config_values_t *config;
int ratio;
xine_t *xine;
Emotion_Xine_Video *ev;
};
struct _Emotion_Class
{
video_driver_class_t driver_class;
config_values_t *config;
xine_t *xine;
};
struct _Emotion_Lut
{
uint8_t cb : 8;
uint8_t cr : 8;
uint8_t y : 8;
uint8_t foo : 8;
} __attribute__ ((packed));
typedef void (*done_func_type)(void *data);
/***************************************************************************/
static void *_emotion_class_init (xine_t *xine, void *visual);
static void _emotion_class_dispose (video_driver_class_t *driver_class);
static char *_emotion_class_identifier_get (video_driver_class_t *driver_class);
static char *_emotion_class_description_get (video_driver_class_t *driver_class);
static vo_driver_t *_emotion_open (video_driver_class_t *driver_class, const void *visual);
static void _emotion_dispose (vo_driver_t *vo_driver);
static int _emotion_redraw (vo_driver_t *vo_driver);
static uint32_t _emotion_capabilities_get (vo_driver_t *vo_driver);
static int _emotion_gui_data_exchange (vo_driver_t *vo_driver, int data_type, void *data);
static int _emotion_property_set (vo_driver_t *vo_driver, int property, int value);
static int _emotion_property_get (vo_driver_t *vo_driver, int property);
static void _emotion_property_min_max_get (vo_driver_t *vo_driver, int property, int *min, int *max);
static vo_frame_t *_emotion_frame_alloc (vo_driver_t *vo_driver);
static void _emotion_frame_dispose (vo_frame_t *vo_frame);
static void _emotion_frame_format_update (vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags);
static void _emotion_frame_display (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
static void _emotion_frame_field (vo_frame_t *vo_frame, int which_field);
static void _emotion_frame_data_free (Emotion_Frame *fr);
static void _emotion_frame_data_unlock (Emotion_Frame *fr);
static void _emotion_overlay_begin (vo_driver_t *vo_driver, vo_frame_t *vo_frame, int changed);
static void _emotion_overlay_end (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
static void _emotion_overlay_blend (vo_driver_t *vo_driver, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay);
static void _emotion_overlay_mem_blend_8 (uint8_t *mem, uint8_t val, uint8_t o, size_t sz);
static void _emotion_overlay_blend_yuv (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]);
static void _emotion_yuy2_to_bgra32 (int width, int height, unsigned char *src, unsigned char *dst);
/***************************************************************************/
static vo_info_t _emotion_info =
{
1, /* priority */
XINE_VISUAL_TYPE_NONE /* visual type */
};
plugin_info_t emotion_xine_plugin_info[] =
{
{ PLUGIN_VIDEO_OUT, 21, "emotion", XINE_VERSION_CODE, &_emotion_info, _emotion_class_init },
{ PLUGIN_VIDEO_OUT, 22, "emotion", XINE_VERSION_CODE, &_emotion_info, _emotion_class_init },
{ PLUGIN_NONE, 0, "", 0, NULL, NULL }
};
/***************************************************************************/
static void *
_emotion_class_init(xine_t *xine, void *visual EINA_UNUSED)
{
Emotion_Class *cl;
// DBG("");
cl = (Emotion_Class *) malloc(sizeof(Emotion_Class));
if (!cl) return NULL;
cl->driver_class.open_plugin = _emotion_open;
#if XINE_MAJOR_VERSION < 1 || (XINE_MAJOR_VERSION == 1 && XINE_MINOR_VERSION < 2)
cl->driver_class.get_identifier = _emotion_class_identifier_get;
cl->driver_class.get_description = _emotion_class_description_get;
#else
cl->driver_class.identifier = _emotion_class_identifier_get(NULL);
cl->driver_class.description = _emotion_class_description_get(NULL);
#endif
cl->driver_class.dispose = _emotion_class_dispose;
cl->config = xine->config;
cl->xine = xine;
return cl;
}
static void
_emotion_class_dispose(video_driver_class_t *driver_class)
{
Emotion_Class *cl;
cl = (Emotion_Class *)driver_class;
free(cl);
}
static char *
_emotion_class_identifier_get(video_driver_class_t *driver_class EINA_UNUSED)
{
return "emotion";
}
static char *
_emotion_class_description_get(video_driver_class_t *driver_class EINA_UNUSED)
{
return "Emotion xine video output plugin";
}
/***************************************************************************/
static vo_driver_t *
_emotion_open(video_driver_class_t *driver_class, const void *visual)
{
Emotion_Class *cl;
Emotion_Driver *dv;
cl = (Emotion_Class *)driver_class;
/* visual here is the data ptr passed to xine_open_video_driver() */
// DBG("");
dv = (Emotion_Driver *)malloc(sizeof(Emotion_Driver));
if (!dv) return NULL;
dv->config = cl->config;
dv->xine = cl->xine;
dv->ratio = XINE_VO_ASPECT_AUTO;
dv->vo_driver.get_capabilities = _emotion_capabilities_get;
dv->vo_driver.alloc_frame = _emotion_frame_alloc;
dv->vo_driver.update_frame_format = _emotion_frame_format_update;
dv->vo_driver.overlay_begin = _emotion_overlay_begin;
dv->vo_driver.overlay_blend = _emotion_overlay_blend;
dv->vo_driver.overlay_end = _emotion_overlay_end;
dv->vo_driver.display_frame = _emotion_frame_display;
dv->vo_driver.get_property = _emotion_property_get;
dv->vo_driver.set_property = _emotion_property_set;
dv->vo_driver.get_property_min_max = _emotion_property_min_max_get;
dv->vo_driver.gui_data_exchange = _emotion_gui_data_exchange;
dv->vo_driver.dispose = _emotion_dispose;
dv->vo_driver.redraw_needed = _emotion_redraw;
dv->ev = (Emotion_Xine_Video *)visual;
dv->ev->have_vo = 1;
DBG("vo_driver = %p", &dv->vo_driver);
return &dv->vo_driver;
}
static void
_emotion_dispose(vo_driver_t *vo_driver)
{
Emotion_Driver *dv;
dv = (Emotion_Driver *)vo_driver;
dv->ev->have_vo = 0;
DBG("vo_driver = %p", dv);
free(dv);
}
/***************************************************************************/
static int
_emotion_redraw(vo_driver_t *vo_driver EINA_UNUSED)
{
// DBG("");
return 0;
}
/***************************************************************************/
static uint32_t
_emotion_capabilities_get(vo_driver_t *vo_driver EINA_UNUSED)
{
// DBG("");
return VO_CAP_YV12 | VO_CAP_YUY2;
}
/***************************************************************************/
static int
_emotion_gui_data_exchange(vo_driver_t *vo_driver EINA_UNUSED, int data_type, void *data EINA_UNUSED)
{
// DBG("");
switch (data_type)
{
case XINE_GUI_SEND_COMPLETION_EVENT:
break;
case XINE_GUI_SEND_DRAWABLE_CHANGED:
break;
case XINE_GUI_SEND_EXPOSE_EVENT:
break;
case XINE_GUI_SEND_TRANSLATE_GUI_TO_VIDEO:
break;
case XINE_GUI_SEND_VIDEOWIN_VISIBLE:
break;
case XINE_GUI_SEND_SELECT_VISUAL:
break;
default:
break;
}
return 0;
}
/***************************************************************************/
static int
_emotion_property_set(vo_driver_t *vo_driver, int property, int value)
{
Emotion_Driver *dv;
dv = (Emotion_Driver *)vo_driver;
// DBG("");
switch (property)
{
case VO_PROP_ASPECT_RATIO:
if (value >= XINE_VO_ASPECT_NUM_RATIOS)
value = XINE_VO_ASPECT_AUTO;
// DBG("DRIVER RATIO SET %i!", value);
dv->ratio = value;
break;
default:
break;
}
return value;
}
static int
_emotion_property_get(vo_driver_t *vo_driver, int property)
{
Emotion_Driver *dv;
dv = (Emotion_Driver *)vo_driver;
// DBG("");
switch (property)
{
case VO_PROP_ASPECT_RATIO:
return dv->ratio;
break;
default:
break;
}
return 0;
}
static void
_emotion_property_min_max_get(vo_driver_t *vo_driver EINA_UNUSED, int property EINA_UNUSED, int *min, int *max)
{
// DBG("");
*min = 0;
*max = 0;
}
/***************************************************************************/
static vo_frame_t *
_emotion_frame_alloc(vo_driver_t *vo_driver EINA_UNUSED)
{
Emotion_Frame *fr;
// DBG("");
fr = (Emotion_Frame *)calloc(1, sizeof(Emotion_Frame));
if (!fr) return NULL;
fr->vo_frame.base[0] = NULL;
fr->vo_frame.base[1] = NULL;
fr->vo_frame.base[2] = NULL;
fr->vo_frame.proc_slice = NULL;
fr->vo_frame.proc_frame = NULL;
fr->vo_frame.field = _emotion_frame_field;
fr->vo_frame.dispose = _emotion_frame_dispose;
fr->vo_frame.driver = vo_driver;
return (vo_frame_t *)fr;
}
static void
_emotion_frame_dispose(vo_frame_t *vo_frame)
{
Emotion_Frame *fr;
fr = (Emotion_Frame *)vo_frame;
// DBG("");
_emotion_frame_data_free(fr);
free(fr);
}
static void
_emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags EINA_UNUSED)
{
Emotion_Driver *dv;
Emotion_Frame *fr;
dv = (Emotion_Driver *)vo_driver;
fr = (Emotion_Frame *)vo_frame;
if ((fr->width != (int)width) || (fr->height != (int)height) ||
(fr->format != format) || (!fr->vo_frame.base[0]))
{
// DBG("");
_emotion_frame_data_free(fr);
fr->width = width;
fr->height = height;
fr->format = format;
switch (format)
{
case XINE_IMGFMT_YV12:
{
int y_size, uv_size;
fr->frame.format = EMOTION_FORMAT_YV12;
fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8);
fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16);
fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16);
y_size = fr->vo_frame.pitches[0] * height;
uv_size = fr->vo_frame.pitches[1] * ((height + 1) / 2);
fr->vo_frame.base[0] = malloc(y_size + (2 * uv_size));
fr->vo_frame.base[1] = fr->vo_frame.base[0] + y_size + uv_size;
fr->vo_frame.base[2] = fr->vo_frame.base[0] + y_size;
fr->frame.w = fr->width;
fr->frame.h = fr->height;
fr->frame.ratio = fr->vo_frame.ratio;
fr->frame.y = fr->vo_frame.base[0];
fr->frame.u = fr->vo_frame.base[1];
fr->frame.v = fr->vo_frame.base[2];
fr->frame.bgra_data = NULL;
fr->frame.y_stride = fr->vo_frame.pitches[0];
fr->frame.u_stride = fr->vo_frame.pitches[1];
fr->frame.v_stride = fr->vo_frame.pitches[2];
fr->frame.obj = dv->ev->obj;
}
break;
case XINE_IMGFMT_YUY2:
{
fr->frame.format = EMOTION_FORMAT_BGRA;
fr->vo_frame.pitches[0] = 8 * ((width + 3) / 4);
fr->vo_frame.pitches[1] = 0;
fr->vo_frame.pitches[2] = 0;
fr->vo_frame.base[0] = malloc(fr->vo_frame.pitches[0] * height);
fr->vo_frame.base[1] = NULL;
fr->vo_frame.base[2] = NULL;
fr->frame.w = fr->width;
fr->frame.h = fr->height;
fr->frame.ratio = fr->vo_frame.ratio;
fr->frame.y = NULL;
fr->frame.u = NULL;
fr->frame.v = NULL;
fr->frame.bgra_data = malloc(fr->width * fr->height * 4);
fr->frame.y_stride = 0;
fr->frame.u_stride = 0;
fr->frame.v_stride = 0;
fr->frame.obj = dv->ev->obj;
}
break;
default:
break;
}
if (((format == XINE_IMGFMT_YV12)
&& ((!fr->vo_frame.base[0])
|| (!fr->vo_frame.base[1])
|| (!fr->vo_frame.base[2])))
|| ((format == XINE_IMGFMT_YUY2)
&& ((!fr->vo_frame.base[0])
|| (!fr->frame.bgra_data))))
{
_emotion_frame_data_free(fr);
}
}
fr->frame.ratio = fr->vo_frame.ratio;
fr->ratio = ratio;
}
static void
_emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
{
Emotion_Driver *dv;
Emotion_Frame *fr;
dv = (Emotion_Driver *)vo_driver;
fr = (Emotion_Frame *)vo_frame;
// DBG("fq %i %p", dv->ev->fq, dv->ev);
// if my frame queue is too deep ( > 4 frames) simply block and wait for them
// to drain
// while (dv->ev->fq > 4) usleep(1);
if (dv->ev)
{
void *buf;
if (dv->ev->closing) return;
if (fr->format == XINE_IMGFMT_YUY2)
{
_emotion_yuy2_to_bgra32(fr->width, fr->height, fr->vo_frame.base[0], fr->frame.bgra_data);
}
buf = &(fr->frame);
fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
fr->frame.done_func = (done_func_type)_emotion_frame_data_unlock;
fr->frame.done_data = fr;
// DBG("FRAME FOR %p", dv->ev);
if (write(dv->ev->fd_write, &buf, sizeof(void *)) < 0) perror("write");
// DBG("-- FRAME DEC %p == %i", fr->frame.obj, ret);
fr->in_use = 1;
dv->ev->fq++;
}
/* hmm - must find a way to sanely copy data out... FIXME problem */
// fr->vo_frame.free(&fr->vo_frame);
}
static void
_emotion_frame_field(vo_frame_t *vo_frame EINA_UNUSED, int which_field EINA_UNUSED)
{
// DBG("");
}
/***************************************************************************/
static void
_emotion_frame_data_free(Emotion_Frame *fr)
{
if (fr->vo_frame.base[0])
{
free(fr->vo_frame.base[0]);
fr->vo_frame.base[0] = NULL;
fr->vo_frame.base[1] = NULL;
fr->vo_frame.base[2] = NULL;
fr->frame.y = fr->vo_frame.base[0];
fr->frame.u = fr->vo_frame.base[1];
fr->frame.v = fr->vo_frame.base[2];
}
if (fr->frame.bgra_data)
{
free(fr->frame.bgra_data);
fr->frame.bgra_data = NULL;
}
}
static void
_emotion_frame_data_unlock(Emotion_Frame *fr)
{
// DBG("");
if (fr->in_use)
{
fr->vo_frame.free(&fr->vo_frame);
fr->in_use = 0;
}
}
/***************************************************************************/
static void
_emotion_overlay_begin(vo_driver_t *vo_driver EINA_UNUSED, vo_frame_t *vo_frame EINA_UNUSED, int changed EINA_UNUSED)
{
// DBG("");
}
static void
_emotion_overlay_end(vo_driver_t *vo_driver EINA_UNUSED, vo_frame_t *vo_frame EINA_UNUSED)
{
// DBG("");
}
static void
_emotion_overlay_blend(vo_driver_t *vo_driver EINA_UNUSED, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay EINA_UNUSED)
{
Emotion_Frame *fr;
fr = (Emotion_Frame *)vo_frame;
// DBG("");
_emotion_overlay_blend_yuv(fr->vo_frame.base, vo_overlay,
fr->width, fr->height,
fr->vo_frame.pitches);
}
static void _emotion_overlay_mem_blend_8(uint8_t *mem, uint8_t val, uint8_t o, size_t sz)
{
uint8_t *limit = mem + sz;
while (mem < limit)
{
*mem = BLEND_BYTE(*mem, val, o);
mem++;
}
}
static void _emotion_overlay_blend_yuv(uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3])
{
Emotion_Lut *my_clut;
uint8_t *my_trans;
int src_width;
int src_height;
rle_elem_t *rle;
rle_elem_t *rle_limit;
int x_off;
int y_off;
int ymask, xmask;
int rle_this_bite;
int rle_remainder;
int rlelen;
int x, y;
int hili_right;
uint8_t clr = 0;
src_width = img_overl->width;
src_height = img_overl->height;
rle = img_overl->rle;
rle_limit = rle + img_overl->num_rle;
x_off = img_overl->x;
y_off = img_overl->y;
if (!rle) return;
uint8_t *dst_y = dst_base[0] + dst_pitches[0] * y_off + x_off;
uint8_t *dst_cr = dst_base[2] + (y_off / 2) * dst_pitches[1] + (x_off / 2) + 1;
uint8_t *dst_cb = dst_base[1] + (y_off / 2) * dst_pitches[2] + (x_off / 2) + 1;
my_clut = (Emotion_Lut *) img_overl->hili_color;
my_trans = img_overl->hili_trans;
/* avoid wraping overlay if drawing to small image */
if( (x_off + img_overl->hili_right) < dst_width )
hili_right = img_overl->hili_right;
else
hili_right = dst_width - 1 - x_off;
/* avoid buffer overflow */
if( (src_height + y_off) >= dst_height )
src_height = dst_height - 1 - y_off;
rlelen=rle_remainder=0;
for (y = 0; y < src_height; y++)
{
ymask = ((img_overl->hili_top > y) || (img_overl->hili_bottom < y));
xmask = 0;
for (x = 0; x < src_width;)
{
uint16_t o;
if (rlelen == 0)
{
rle_remainder = rlelen = rle->len;
clr = rle->color;
rle++;
}
if (rle_remainder == 0)
{
rle_remainder = rlelen;
}
if ((rle_remainder + x) > src_width)
{
/* Do something for long rlelengths */
rle_remainder = src_width - x;
}
if (ymask == 0)
{
if (x <= img_overl->hili_left)
{
/* Starts outside clip area */
if ((x + rle_remainder - 1) > img_overl->hili_left )
{
/* Cutting needed, starts outside, ends inside */
rle_this_bite = (img_overl->hili_left - x + 1);
rle_remainder -= rle_this_bite;
rlelen -= rle_this_bite;
my_clut = (Emotion_Lut *) img_overl->color;
my_trans = img_overl->trans;
xmask = 0;
}
else
{
/* no cutting needed, starts outside, ends outside */
rle_this_bite = rle_remainder;
rle_remainder = 0;
rlelen -= rle_this_bite;
my_clut = (Emotion_Lut *) img_overl->color;
my_trans = img_overl->trans;
xmask = 0;
}
}
else if (x < hili_right)
{
/* Starts inside clip area */
if ((x + rle_remainder) > hili_right )
{
/* Cutting needed, starts inside, ends outside */
rle_this_bite = (hili_right - x);
rle_remainder -= rle_this_bite;
rlelen -= rle_this_bite;
my_clut = (Emotion_Lut *) img_overl->hili_color;
my_trans = img_overl->hili_trans;
xmask++;
}
else
{
/* no cutting needed, starts inside, ends inside */
rle_this_bite = rle_remainder;
rle_remainder = 0;
rlelen -= rle_this_bite;
my_clut = (Emotion_Lut *) img_overl->hili_color;
my_trans = img_overl->hili_trans;
xmask++;
}
}
else if (x >= hili_right)
{
/* Starts outside clip area, ends outsite clip area */
if ((x + rle_remainder ) > src_width )
{
/* Cutting needed, starts outside, ends at right edge */
/* It should never reach here due to the earlier test of src_width */
rle_this_bite = (src_width - x );
rle_remainder -= rle_this_bite;
rlelen -= rle_this_bite;
my_clut = (Emotion_Lut *) img_overl->color;
my_trans = img_overl->trans;
xmask = 0;
}
else
{
/* no cutting needed, starts outside, ends outside */
rle_this_bite = rle_remainder;
rle_remainder = 0;
rlelen -= rle_this_bite;
my_clut = (Emotion_Lut *) img_overl->color;
my_trans = img_overl->trans;
xmask = 0;
}
}
}
else
{
/* Outside clip are due to y */
/* no cutting needed, starts outside, ends outside */
rle_this_bite = rle_remainder;
rle_remainder = 0;
rlelen -= rle_this_bite;
my_clut = (Emotion_Lut *) img_overl->color;
my_trans = img_overl->trans;
xmask = 0;
}
o = my_trans[clr];
if (o)
{
if (o >= 15)
{
memset(dst_y + x, my_clut[clr].y, rle_this_bite);
if (y & 1)
{
memset(dst_cr + (x >> 1), my_clut[clr].cr, (rle_this_bite+1) >> 1);
memset(dst_cb + (x >> 1), my_clut[clr].cb, (rle_this_bite+1) >> 1);
}
}
else
{
_emotion_overlay_mem_blend_8(dst_y + x, my_clut[clr].y, o, rle_this_bite);
if (y & 1)
{
/* Blending cr and cb should use a different function, with pre -128 to each sample */
_emotion_overlay_mem_blend_8(dst_cr + (x >> 1), my_clut[clr].cr, o, (rle_this_bite+1) >> 1);
_emotion_overlay_mem_blend_8(dst_cb + (x >> 1), my_clut[clr].cb, o, (rle_this_bite+1) >> 1);
}
}
}
x += rle_this_bite;
if (rle >= rle_limit)
{
break;
}
}
if (rle >= rle_limit)
{
break;
}
dst_y += dst_pitches[0];
if (y & 1)
{
dst_cr += dst_pitches[2];
dst_cb += dst_pitches[1];
}
}
}
//TODO: Really need to improve this converter!
#define LIMIT(x) ((x) > 0xff ? 0xff : ((x) < 0 ? 0 : (x)))
static void
_emotion_yuy2_to_bgra32(int width, int height, unsigned char *src, unsigned char *dst)
{
int i, j;
unsigned char *y, *u, *v;
y = src;
u = src + 1;
v = src + 3;
for (i = 0; i < width; i++)
{
for (j = 0; j < height; j++)
{
*dst++ = LIMIT(1.164 * (*y - 16) + 2.018 * (*u - 128));
*dst++ = LIMIT(1.164 * (*y - 16) - 0.813 * (*v - 128) - 0.391 * (*u - 128));
*dst++ = LIMIT(1.164 * (*y - 16) + 1.596 * (*v - 128));
*dst++ = 0;
y += 2;
if (j % 2 == 1)
{
u += 4;
v += 4;
}
}
}
}

View File

@ -1,16 +0,0 @@
generic_src = files([
'emotion_xine.h',
'emotion_xine.c',
'emotion_xine_vo_out.c',
])
generic_deps = dependency('libxine')
shared_module(emotion_loader,
generic_src,
include_directories : config_dir,
dependencies: [eina, evas, emotion, generic_deps],
install: true,
install_dir : mod_install_dir,
c_args : package_c_args,
)