2009-07-31 10:06:11 -07:00
|
|
|
#ifdef HAVE_CONFIG_H
|
|
|
|
# include <config.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef HAVE_EVIL
|
|
|
|
# include <Evil.h>
|
|
|
|
#endif
|
|
|
|
|
2009-11-11 15:43:58 -08:00
|
|
|
#ifdef EFL_HAVE_PTHREAD
|
2009-08-19 23:09:15 -07:00
|
|
|
# include <pthread.h>
|
|
|
|
#endif
|
2009-07-31 10:06:11 -07:00
|
|
|
|
|
|
|
#include "Ecore.h"
|
2009-12-22 13:15:12 -08:00
|
|
|
#include "ecore_private.h"
|
2009-07-31 10:06:11 -07:00
|
|
|
|
|
|
|
typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
|
|
|
|
typedef struct _Ecore_Pthread Ecore_Pthread;
|
|
|
|
|
|
|
|
struct _Ecore_Pthread_Worker
|
|
|
|
{
|
2010-06-30 06:25:28 -07:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
void (*func_heavy)(void *data);
|
|
|
|
} short_run;
|
|
|
|
struct {
|
|
|
|
void (*func_heavy)(Ecore_Thread *thread, void *data);
|
2010-07-02 04:15:20 -07:00
|
|
|
void (*func_notify)(Ecore_Thread *thread, void *msg_data, void *data);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
|
|
|
Ecore_Pipe *notify;
|
|
|
|
|
|
|
|
#ifdef EFL_HAVE_PTHREAD
|
|
|
|
pthread_t self;
|
|
|
|
#endif
|
|
|
|
} long_run;
|
|
|
|
} u;
|
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
void (*func_cancel)(void *data);
|
2010-06-30 06:25:28 -07:00
|
|
|
void (*func_end)(void *data);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
|
|
|
const void *data;
|
2009-11-06 14:15:04 -08:00
|
|
|
|
|
|
|
Eina_Bool cancel : 1;
|
2010-06-30 06:25:28 -07:00
|
|
|
Eina_Bool long_run : 1;
|
2009-11-06 14:15:04 -08:00
|
|
|
};
|
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
#ifdef EFL_HAVE_PTHREAD
|
|
|
|
typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
|
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
struct _Ecore_Pthread_Data
|
|
|
|
{
|
|
|
|
Ecore_Pipe *p;
|
2010-06-30 06:25:28 -07:00
|
|
|
void *data;
|
2009-11-06 14:15:04 -08:00
|
|
|
pthread_t thread;
|
2009-07-31 10:06:11 -07:00
|
|
|
};
|
2009-11-08 14:16:17 -08:00
|
|
|
#endif
|
2009-07-31 10:06:11 -07:00
|
|
|
|
|
|
|
static int _ecore_thread_count_max = 0;
|
2009-11-08 14:16:17 -08:00
|
|
|
static int ECORE_THREAD_PIPE_DEL = 0;
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2009-11-11 15:43:58 -08:00
|
|
|
#ifdef EFL_HAVE_PTHREAD
|
2009-11-08 14:16:17 -08:00
|
|
|
static int _ecore_thread_count = 0;
|
2009-07-31 10:06:11 -07:00
|
|
|
static Eina_List *_ecore_thread = NULL;
|
2010-06-30 06:25:28 -07:00
|
|
|
static Eina_List *_ecore_thread_data = NULL;
|
|
|
|
static Eina_List *_ecore_long_thread_data = NULL;
|
2009-07-31 10:06:11 -07:00
|
|
|
static Ecore_Event_Handler *del_handler = NULL;
|
|
|
|
|
|
|
|
static pthread_mutex_t _mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
|
|
|
|
static void
|
|
|
|
_ecore_thread_pipe_free(void *data __UNUSED__, void *event)
|
|
|
|
{
|
|
|
|
Ecore_Pipe *p = event;
|
|
|
|
|
|
|
|
ecore_pipe_del(p);
|
|
|
|
}
|
|
|
|
|
2010-06-24 09:15:56 -07:00
|
|
|
static Eina_Bool
|
2009-07-31 10:06:11 -07:00
|
|
|
_ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__)
|
|
|
|
{
|
|
|
|
/* This is a hack to delay pipe destruction until we are out of it's internal loop. */
|
2010-06-24 09:15:56 -07:00
|
|
|
return ECORE_CALLBACK_CANCEL;
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2009-11-06 14:15:04 -08:00
|
|
|
_ecore_thread_end(Ecore_Pthread_Data *pth)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
|
|
|
Ecore_Pipe *p;
|
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
if (pthread_join(pth->thread, (void**) &p) != 0)
|
2009-07-31 10:06:11 -07:00
|
|
|
return ;
|
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
_ecore_thread = eina_list_remove(_ecore_thread, pth);
|
|
|
|
|
|
|
|
ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
static void
|
|
|
|
_ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work;
|
2009-11-06 14:15:04 -08:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
if (nbyte != sizeof (Ecore_Pthread_Worker*)) return ;
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
work = *(Ecore_Pthread_Worker**)buffer;
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
if (work->cancel)
|
|
|
|
{
|
|
|
|
if (work->func_cancel)
|
|
|
|
work->func_cancel((void*) work->data);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (work->func_end)
|
|
|
|
work->func_end((void*) work->data);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (work->long_run) ecore_pipe_del(work->u.long_run.notify);
|
|
|
|
free(work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2010-07-02 04:15:20 -07:00
|
|
|
_ecore_notify_handler(void *data, void *buffer, unsigned int nbyte)
|
2010-06-30 06:25:28 -07:00
|
|
|
{
|
2010-07-02 04:15:20 -07:00
|
|
|
Ecore_Pthread_Worker *work = data;
|
|
|
|
void *user_data;
|
2010-06-30 06:25:28 -07:00
|
|
|
|
|
|
|
if (nbyte != sizeof (Ecore_Pthread_Worker*)) return ;
|
|
|
|
|
2010-07-02 04:15:20 -07:00
|
|
|
user_data = *(void**)buffer;
|
2010-06-30 06:25:28 -07:00
|
|
|
|
|
|
|
if (work->u.long_run.func_notify)
|
2010-07-02 04:15:20 -07:00
|
|
|
work->u.long_run.func_notify((Ecore_Thread *) work, user_data, (void*) work->data);
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_ecore_short_job(Ecore_Pipe *end_pipe)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work;
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
while (_ecore_thread_data)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
|
|
|
pthread_mutex_lock(&_mutex);
|
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
if (!_ecore_thread_data)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
work = eina_list_data_get(_ecore_thread_data);
|
|
|
|
_ecore_thread_data = eina_list_remove_list(_ecore_thread_data, _ecore_thread_data);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
work->u.short_run.func_heavy((void*) work->data);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker*));
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
static void
|
|
|
|
_ecore_long_job(Ecore_Pipe *end_pipe, pthread_t thread)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work;
|
|
|
|
|
|
|
|
while (_ecore_long_thread_data)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
2010-06-30 06:25:28 -07:00
|
|
|
pthread_mutex_lock(&_mutex);
|
|
|
|
|
|
|
|
if (!_ecore_long_thread_data)
|
|
|
|
{
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
work = eina_list_data_get(_ecore_long_thread_data);
|
|
|
|
_ecore_long_thread_data = eina_list_remove_list(_ecore_long_thread_data, _ecore_long_thread_data);
|
|
|
|
|
2009-07-31 10:06:11 -07:00
|
|
|
pthread_mutex_unlock(&_mutex);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
|
|
|
work->u.long_run.self = thread;
|
|
|
|
work->u.long_run.func_heavy((Ecore_Thread *) work, (void*) work->data);
|
|
|
|
|
|
|
|
ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker*));
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
static void *
|
|
|
|
_ecore_direct_worker(Ecore_Pthread_Worker *work)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Data *pth;
|
|
|
|
|
|
|
|
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
|
|
|
|
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
|
|
|
|
|
|
|
|
pth = malloc(sizeof (Ecore_Pthread_Data));
|
|
|
|
if (!pth) return NULL;
|
|
|
|
|
|
|
|
pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
|
|
|
|
if (!pth->p)
|
|
|
|
{
|
|
|
|
free(pth);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
pth->thread = pthread_self();
|
|
|
|
|
|
|
|
work->u.long_run.self = pth->thread;
|
|
|
|
work->u.long_run.func_heavy((Ecore_Thread *) work, (void*) work->data);
|
|
|
|
|
|
|
|
ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker*));
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
work = malloc(sizeof (Ecore_Pthread_Worker));
|
2010-06-30 06:25:28 -07:00
|
|
|
if (!work)
|
|
|
|
{
|
|
|
|
ecore_pipe_del(pth->p);
|
|
|
|
free(pth);
|
|
|
|
return NULL;
|
|
|
|
}
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
work->data = pth;
|
2010-06-30 06:25:28 -07:00
|
|
|
work->u.short_run.func_heavy = NULL;
|
2009-07-31 10:06:11 -07:00
|
|
|
work->func_end = (void*) _ecore_thread_end;
|
2009-11-06 14:15:04 -08:00
|
|
|
work->func_cancel = NULL;
|
|
|
|
work->cancel = EINA_FALSE;
|
2010-06-30 06:25:28 -07:00
|
|
|
work->long_run = EINA_FALSE;
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker*));
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
return pth->p;
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
static void *
|
|
|
|
_ecore_thread_worker(Ecore_Pthread_Data *pth)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work;
|
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
|
|
|
|
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
pthread_mutex_lock(&_mutex);
|
|
|
|
_ecore_thread_count++;
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
on_error:
|
|
|
|
if (_ecore_thread_data) _ecore_short_job(pth->p);
|
|
|
|
if (_ecore_long_thread_data) _ecore_long_job(pth->p, pth->thread);
|
|
|
|
|
|
|
|
/* FIXME: Check if there is long running task todo, and switch to long run handler. */
|
|
|
|
|
|
|
|
pthread_mutex_lock(&_mutex);
|
|
|
|
if (_ecore_thread_data)
|
2009-11-06 14:15:04 -08:00
|
|
|
{
|
2010-06-30 06:25:28 -07:00
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
goto on_error;
|
2009-11-06 14:15:04 -08:00
|
|
|
}
|
2010-06-30 06:25:28 -07:00
|
|
|
if (_ecore_long_thread_data)
|
2009-11-06 14:15:04 -08:00
|
|
|
{
|
2010-06-30 06:25:28 -07:00
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
goto on_error;
|
2009-11-06 14:15:04 -08:00
|
|
|
}
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
_ecore_thread_count--;
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
|
|
|
|
work = malloc(sizeof (Ecore_Pthread_Worker));
|
|
|
|
if (!work) return NULL;
|
|
|
|
|
|
|
|
work->data = pth;
|
|
|
|
work->u.short_run.func_heavy = NULL;
|
|
|
|
work->func_end = (void*) _ecore_thread_end;
|
|
|
|
work->func_cancel = NULL;
|
|
|
|
work->cancel = EINA_FALSE;
|
|
|
|
work->long_run = EINA_FALSE;
|
|
|
|
|
|
|
|
ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker*));
|
|
|
|
|
|
|
|
return pth->p;
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2009-07-31 10:06:11 -07:00
|
|
|
#endif
|
|
|
|
|
2009-10-09 20:24:56 -07:00
|
|
|
void
|
2009-09-03 22:49:54 -07:00
|
|
|
_ecore_thread_init(void)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
|
|
|
_ecore_thread_count_max = eina_cpu_count();
|
2009-08-03 01:19:33 -07:00
|
|
|
if (_ecore_thread_count_max <= 0)
|
2009-07-31 10:06:11 -07:00
|
|
|
_ecore_thread_count_max = 1;
|
|
|
|
|
|
|
|
ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
|
2009-11-11 15:43:58 -08:00
|
|
|
#ifdef EFL_HAVE_PTHREAD
|
2009-07-31 10:06:11 -07:00
|
|
|
del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
|
2009-08-12 17:30:59 -07:00
|
|
|
#endif
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
|
|
|
|
2009-10-09 20:24:56 -07:00
|
|
|
void
|
2009-09-03 22:49:54 -07:00
|
|
|
_ecore_thread_shutdown(void)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
2009-10-09 20:24:56 -07:00
|
|
|
/* FIXME: If function are still running in the background, should we kill them ? */
|
2009-11-11 15:43:58 -08:00
|
|
|
#ifdef EFL_HAVE_PTHREAD
|
2009-11-06 14:15:04 -08:00
|
|
|
Ecore_Pthread_Worker *work;
|
|
|
|
Ecore_Pthread_Data *pth;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&_mutex);
|
|
|
|
|
|
|
|
EINA_LIST_FREE(_ecore_thread_data, work)
|
|
|
|
{
|
|
|
|
if (work->func_cancel)
|
|
|
|
work->func_cancel((void*)work->data);
|
|
|
|
free(work);
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
|
|
|
|
EINA_LIST_FREE(_ecore_thread, pth)
|
|
|
|
{
|
|
|
|
Ecore_Pipe *p;
|
|
|
|
|
|
|
|
pthread_cancel(pth->thread);
|
|
|
|
pthread_join(pth->thread, (void **) &p);
|
|
|
|
|
|
|
|
ecore_pipe_del(pth->p);
|
|
|
|
}
|
|
|
|
|
|
|
|
ecore_event_handler_del(del_handler);
|
|
|
|
del_handler = NULL;
|
2009-08-25 22:59:56 -07:00
|
|
|
#endif
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ecore_thread_run provide a facility for easily managing heavy task in a
|
|
|
|
* parallel thread. You should provide two function, the first one, func_heavy,
|
|
|
|
* that will do the heavy work in another thread (so you should not use the
|
|
|
|
* EFL in it except Eina if you are carefull), and the second one, func_end,
|
|
|
|
* that will be called in Ecore main loop when func_heavy is done. So you
|
|
|
|
* can use all the EFL inside this function.
|
|
|
|
*
|
|
|
|
* Be aware, that you can't make assumption on the result order of func_end
|
|
|
|
* after many call to ecore_thread_run, as we start as much thread as the
|
|
|
|
* host CPU can handle.
|
|
|
|
*/
|
2009-11-06 14:15:04 -08:00
|
|
|
EAPI Ecore_Thread *
|
2009-07-31 10:06:11 -07:00
|
|
|
ecore_thread_run(void (*func_heavy)(void *data),
|
2009-11-06 14:15:04 -08:00
|
|
|
void (*func_end)(void *data),
|
|
|
|
void (*func_cancel)(void *data),
|
|
|
|
const void *data)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
2009-11-11 15:43:58 -08:00
|
|
|
#ifdef EFL_HAVE_PTHREAD
|
2009-07-31 10:06:11 -07:00
|
|
|
Ecore_Pthread_Worker *work;
|
2010-06-30 06:25:28 -07:00
|
|
|
Ecore_Pthread_Data *pth = NULL;
|
|
|
|
|
|
|
|
if (!func_heavy) return NULL;
|
2009-07-31 10:06:11 -07:00
|
|
|
|
|
|
|
work = malloc(sizeof (Ecore_Pthread_Worker));
|
2009-12-21 04:25:32 -08:00
|
|
|
if (!work)
|
|
|
|
{
|
2009-12-21 08:17:29 -08:00
|
|
|
func_cancel((void*) data);
|
2009-12-21 04:25:32 -08:00
|
|
|
return NULL;
|
|
|
|
}
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
work->u.short_run.func_heavy = func_heavy;
|
2009-07-31 10:06:11 -07:00
|
|
|
work->func_end = func_end;
|
2009-11-06 14:15:04 -08:00
|
|
|
work->func_cancel = func_cancel;
|
|
|
|
work->cancel = EINA_FALSE;
|
2010-06-30 06:25:28 -07:00
|
|
|
work->long_run = EINA_FALSE;
|
2009-07-31 10:06:11 -07:00
|
|
|
work->data = data;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&_mutex);
|
2009-11-06 14:15:04 -08:00
|
|
|
_ecore_thread_data = eina_list_append(_ecore_thread_data, work);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
|
|
|
if (_ecore_thread_count == _ecore_thread_count_max)
|
2009-08-03 07:09:09 -07:00
|
|
|
{
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
2009-11-06 14:15:04 -08:00
|
|
|
return (Ecore_Thread*) work;
|
2009-08-03 07:09:09 -07:00
|
|
|
}
|
2009-07-31 10:06:11 -07:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
|
|
|
|
/* One more thread could be created. */
|
2009-11-06 14:15:04 -08:00
|
|
|
pth = malloc(sizeof (Ecore_Pthread_Data));
|
2010-06-30 06:25:28 -07:00
|
|
|
if (!pth) goto on_error;
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
|
2010-06-30 06:25:28 -07:00
|
|
|
if (!pth->p) goto on_error;
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
if (pthread_create(&pth->thread, NULL, (void*) _ecore_thread_worker, pth) == 0)
|
|
|
|
return (Ecore_Thread*) work;
|
|
|
|
|
|
|
|
on_error:
|
2010-06-30 06:25:28 -07:00
|
|
|
if (pth)
|
|
|
|
{
|
|
|
|
if (pth->p) ecore_pipe_del(pth->p);
|
|
|
|
free(pth);
|
|
|
|
}
|
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
if (_ecore_thread_count == 0)
|
|
|
|
{
|
|
|
|
if (work->func_cancel)
|
|
|
|
work->func_cancel((void*) work->data);
|
|
|
|
free(work);
|
2010-06-30 06:25:28 -07:00
|
|
|
work = NULL;
|
2009-11-06 14:15:04 -08:00
|
|
|
}
|
2010-06-30 06:25:28 -07:00
|
|
|
return (Ecore_Thread*) work;
|
2009-07-31 10:06:11 -07:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
If no thread and as we don't want to break app that rely on this
|
|
|
|
facility, we will lock the interface until we are done.
|
|
|
|
*/
|
2009-12-19 03:43:44 -08:00
|
|
|
func_heavy((void *)data);
|
|
|
|
func_end((void *)data);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2009-12-19 03:43:44 -08:00
|
|
|
return NULL;
|
2009-07-31 10:06:11 -07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
/*
|
|
|
|
* ecore_thread_cancel give the possibility to cancel a task still running. It
|
|
|
|
* will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is
|
|
|
|
* cancelled after this call.
|
|
|
|
*
|
|
|
|
* You should use this function only in the main loop.
|
|
|
|
*
|
|
|
|
* func_end, func_cancel will destroy the handler, so don't use it after.
|
|
|
|
* And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also.
|
|
|
|
*/
|
|
|
|
EAPI Eina_Bool
|
|
|
|
ecore_thread_cancel(Ecore_Thread *thread)
|
|
|
|
{
|
2009-11-11 15:43:58 -08:00
|
|
|
#ifdef EFL_HAVE_PTHREAD
|
2009-11-06 14:15:04 -08:00
|
|
|
Ecore_Pthread_Worker *work;
|
|
|
|
Eina_List *l;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&_mutex);
|
|
|
|
|
|
|
|
EINA_LIST_FOREACH(_ecore_thread_data, l, work)
|
|
|
|
if ((void*) work == (void*) thread)
|
|
|
|
{
|
|
|
|
_ecore_thread_data = eina_list_remove_list(_ecore_thread_data, l);
|
|
|
|
|
2010-04-12 14:51:35 -07:00
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
if (work->func_cancel)
|
|
|
|
work->func_cancel((void*) work->data);
|
|
|
|
free(work);
|
|
|
|
|
|
|
|
return EINA_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
|
|
|
|
/* Delay the destruction */
|
2010-04-08 21:52:04 -07:00
|
|
|
((Ecore_Pthread_Worker*)thread)->cancel = EINA_TRUE;
|
2009-11-06 14:15:04 -08:00
|
|
|
return EINA_FALSE;
|
2009-11-08 14:16:17 -08:00
|
|
|
#else
|
2010-02-25 07:26:38 -08:00
|
|
|
return EINA_TRUE;
|
2009-11-08 14:16:17 -08:00
|
|
|
#endif
|
2009-11-06 14:15:04 -08:00
|
|
|
}
|
2010-06-30 06:25:28 -07:00
|
|
|
|
|
|
|
EAPI Eina_Bool
|
|
|
|
ecore_thread_check(Ecore_Thread *thread)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker*) thread;
|
|
|
|
|
|
|
|
if (!worker) return EINA_FALSE;
|
|
|
|
return worker->cancel;
|
|
|
|
}
|
|
|
|
|
|
|
|
EAPI Ecore_Thread *
|
|
|
|
ecore_long_run(void (*func_heavy)(Ecore_Thread *thread, void *data),
|
2010-07-02 04:15:20 -07:00
|
|
|
void (*func_notify)(Ecore_Thread *thread, void *msg_data, void *data),
|
2010-06-30 06:25:28 -07:00
|
|
|
void (*func_end)(void *data),
|
|
|
|
void (*func_cancel)(void *data),
|
|
|
|
const void *data,
|
|
|
|
Eina_Bool try_no_queue)
|
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef EFL_HAVE_PTHREAD
|
|
|
|
Ecore_Pthread_Worker *worker;
|
|
|
|
Ecore_Pthread_Data *pth = NULL;
|
|
|
|
|
|
|
|
if (!func_heavy) return NULL;
|
|
|
|
|
|
|
|
worker = malloc(sizeof (Ecore_Pthread_Worker));
|
|
|
|
if (!worker) goto on_error;
|
|
|
|
|
|
|
|
worker->u.long_run.func_heavy = func_heavy;
|
|
|
|
worker->u.long_run.func_notify = func_notify;
|
|
|
|
worker->func_cancel = func_cancel;
|
|
|
|
worker->func_end = func_end;
|
|
|
|
worker->data = data;
|
|
|
|
worker->cancel = EINA_FALSE;
|
|
|
|
worker->long_run = EINA_TRUE;
|
|
|
|
|
2010-07-02 04:15:20 -07:00
|
|
|
worker->u.long_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
|
|
|
if (!try_no_queue)
|
|
|
|
{
|
|
|
|
pthread_t t;
|
|
|
|
|
|
|
|
if (pthread_create(&t, NULL, (void*) _ecore_direct_worker, worker) == 0)
|
|
|
|
return (Ecore_Thread*) worker;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_lock(&_mutex);
|
|
|
|
_ecore_long_thread_data = eina_list_append(_ecore_long_thread_data, worker);
|
|
|
|
|
|
|
|
if (_ecore_thread_count == _ecore_thread_count_max)
|
|
|
|
{
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
return (Ecore_Thread*) worker;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&_mutex);
|
|
|
|
|
|
|
|
/* One more thread could be created. */
|
|
|
|
pth = malloc(sizeof (Ecore_Pthread_Data));
|
|
|
|
if (!pth) goto on_error;
|
|
|
|
|
|
|
|
pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
|
|
|
|
if (pth->p) goto on_error;
|
|
|
|
|
|
|
|
if (pthread_create(&pth->thread, NULL, (void*) _ecore_thread_worker, pth) == 0)
|
|
|
|
return (Ecore_Thread*) worker;
|
|
|
|
|
|
|
|
on_error:
|
|
|
|
if (pth)
|
|
|
|
{
|
|
|
|
if (pth->p) ecore_pipe_del(pth->p);
|
|
|
|
free(pth);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_ecore_thread_count == 0)
|
|
|
|
{
|
|
|
|
if (func_cancel) func_cancel((void*) data);
|
|
|
|
|
|
|
|
if (worker)
|
|
|
|
{
|
|
|
|
ecore_pipe_del(worker->u.long_run.notify);
|
|
|
|
free(worker);
|
|
|
|
worker = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (Ecore_Thread*) worker;
|
|
|
|
#else
|
|
|
|
Ecore_Pthread_Worker worker;
|
|
|
|
|
|
|
|
/*
|
|
|
|
If no thread and as we don't want to break app that rely on this
|
|
|
|
facility, we will lock the interface until we are done.
|
|
|
|
*/
|
|
|
|
worker.u.long_run.func_heavy = func_heavy;
|
|
|
|
worker.u.long_run.func_notify = func_notify;
|
|
|
|
worker.u.long_run.notify = NULL;
|
|
|
|
worker.func_cancel = func_cancel;
|
|
|
|
worker.func_end = func_end;
|
|
|
|
worker.data = data;
|
|
|
|
worker.cancel = EINA_FALSE;
|
|
|
|
worker.long_run = EINA_TRUE;
|
|
|
|
|
|
|
|
func_heavy((Ecore_Thread*) &worker, data);
|
|
|
|
|
|
|
|
if (worker.cancel) func_cancel(data);
|
|
|
|
else func_end(data);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
EAPI Eina_Bool
|
|
|
|
ecore_thread_notify(Ecore_Thread *thread, void *data)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker*) thread;
|
|
|
|
|
|
|
|
if (!worker) return EINA_FALSE;
|
|
|
|
if (!worker->long_run) return EINA_FALSE;
|
|
|
|
|
|
|
|
#ifdef EFL_HAVE_PTHREAD
|
|
|
|
if (worker->u.long_run.self != pthread_self()) return EINA_FALSE;
|
|
|
|
|
2010-07-02 04:15:20 -07:00
|
|
|
ecore_pipe_write(worker->u.long_run.notify, &data, sizeof (void*));
|
2010-06-30 06:25:28 -07:00
|
|
|
|
|
|
|
return EINA_TRUE;
|
|
|
|
#else
|
|
|
|
worker->u.long_run.func_notify(thread, data);
|
|
|
|
|
|
|
|
return EINA_TRUE;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|