2009-07-31 10:06:11 -07:00
|
|
|
#ifdef HAVE_CONFIG_H
|
|
|
|
# include <config.h>
|
|
|
|
#endif
|
|
|
|
|
2011-12-06 08:09:05 -08:00
|
|
|
#include <stdlib.h>
|
2010-11-26 10:41:43 -08:00
|
|
|
#include <sys/time.h>
|
2011-07-28 05:01:16 -07:00
|
|
|
#include <assert.h>
|
2011-11-14 14:29:46 -08:00
|
|
|
#include <sys/types.h>
|
2013-01-10 12:25:26 -08:00
|
|
|
#include <unistd.h>
|
2010-11-26 10:41:43 -08:00
|
|
|
|
2009-07-31 10:06:11 -07:00
|
|
|
#include "Ecore.h"
|
2009-12-22 13:15:12 -08:00
|
|
|
#include "ecore_private.h"
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
# define LK(x) Eina_Lock x
|
|
|
|
# define LKI(x) eina_lock_new(&(x))
|
|
|
|
# define LKD(x) eina_lock_free(&(x))
|
|
|
|
# define LKL(x) eina_lock_take(&(x))
|
|
|
|
# define LKU(x) eina_lock_release(&(x))
|
|
|
|
|
|
|
|
# define SLK(x) Eina_Spinlock x
|
|
|
|
# define SLKI(x) eina_spinlock_new(&(x))
|
|
|
|
# define SLKD(x) eina_spinlock_free(&(x))
|
|
|
|
# define SLKL(x) eina_spinlock_take(&(x))
|
|
|
|
# define SLKU(x) eina_spinlock_release(&(x))
|
|
|
|
|
|
|
|
# define CD(x) Eina_Condition x
|
|
|
|
# define CDI(x, m) eina_condition_new(&(x), &(m))
|
|
|
|
# define CDD(x) eina_condition_free(&(x))
|
|
|
|
# define CDB(x) eina_condition_broadcast(&(x))
|
|
|
|
# define CDW(x, t) eina_condition_timedwait(&(x), t)
|
|
|
|
|
|
|
|
# define LRWK(x) Eina_RWLock x
|
|
|
|
# define LRWKI(x) eina_rwlock_new(&(x));
|
|
|
|
# define LRWKD(x) eina_rwlock_free(&(x));
|
|
|
|
# define LRWKWL(x) eina_rwlock_take_write(&(x));
|
|
|
|
# define LRWKRL(x) eina_rwlock_take_read(&(x));
|
|
|
|
# define LRWKU(x) eina_rwlock_release(&(x));
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2012-10-18 22:49:48 -07:00
|
|
|
# define PH(x) Eina_Thread x
|
|
|
|
# define PHE(x, y) eina_thread_equal(x, y)
|
|
|
|
# define PHS() eina_thread_self()
|
2012-10-19 00:13:21 -07:00
|
|
|
# define PHC(x, f, d) eina_thread_create(&(x), EINA_THREAD_BACKGROUND, -1, (void *)f, d)
|
2020-05-21 10:00:47 -07:00
|
|
|
# define PHC2(x, f, d)eina_thread_create(&(x), EINA_THREAD_URGENT, -1, (void *)f, d)
|
2012-10-18 22:49:48 -07:00
|
|
|
# define PHJ(x) eina_thread_join(x)
|
|
|
|
|
2009-07-31 10:06:11 -07:00
|
|
|
typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
|
2011-10-20 22:40:39 -07:00
|
|
|
typedef struct _Ecore_Pthread Ecore_Pthread;
|
|
|
|
typedef struct _Ecore_Thread_Data Ecore_Thread_Data;
|
2018-06-20 14:12:51 -07:00
|
|
|
typedef struct _Ecore_Thread_Waiter Ecore_Thread_Waiter;
|
2017-03-02 09:32:40 -08:00
|
|
|
|
|
|
|
struct _Ecore_Thread_Waiter
|
|
|
|
{
|
|
|
|
Ecore_Thread_Cb func_cancel;
|
|
|
|
Ecore_Thread_Cb func_end;
|
2018-06-20 14:12:51 -07:00
|
|
|
Eina_Bool waiting;
|
2017-03-02 09:32:40 -08:00
|
|
|
};
|
2010-07-30 08:56:18 -07:00
|
|
|
|
|
|
|
struct _Ecore_Thread_Data
|
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
void *data;
|
2010-07-30 08:56:18 -07:00
|
|
|
Eina_Free_Cb cb;
|
|
|
|
};
|
2009-07-31 10:06:11 -07:00
|
|
|
|
|
|
|
struct _Ecore_Pthread_Worker
|
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
union
|
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
struct
|
|
|
|
{
|
2010-11-12 05:28:19 -08:00
|
|
|
Ecore_Thread_Cb func_blocking;
|
2010-08-24 17:26:01 -07:00
|
|
|
} short_run;
|
2011-10-20 22:40:39 -07:00
|
|
|
struct
|
|
|
|
{
|
|
|
|
Ecore_Thread_Cb func_heavy;
|
2010-08-24 17:26:01 -07:00
|
|
|
Ecore_Thread_Notify_Cb func_notify;
|
2010-10-06 04:48:45 -07:00
|
|
|
|
2011-10-20 22:40:39 -07:00
|
|
|
Ecore_Pthread_Worker *direct_worker;
|
2010-11-23 10:32:17 -08:00
|
|
|
|
2011-10-20 22:40:39 -07:00
|
|
|
int send;
|
|
|
|
int received;
|
2010-09-22 02:47:55 -07:00
|
|
|
} feedback_run;
|
2018-06-20 14:12:51 -07:00
|
|
|
struct
|
|
|
|
{
|
|
|
|
Ecore_Thread_Cb func_main;
|
2012-02-20 07:57:18 -08:00
|
|
|
Ecore_Thread_Notify_Cb func_notify;
|
|
|
|
|
|
|
|
Ecore_Pipe *send;
|
|
|
|
Ecore_Pthread_Worker *direct_worker;
|
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
struct
|
|
|
|
{
|
2012-02-20 07:57:18 -08:00
|
|
|
int send;
|
|
|
|
int received;
|
|
|
|
} from, to;
|
|
|
|
} message_run;
|
2010-08-24 17:26:01 -07:00
|
|
|
} u;
|
2010-10-06 04:48:45 -07:00
|
|
|
|
2017-03-02 09:32:40 -08:00
|
|
|
Ecore_Thread_Waiter *waiter;
|
2018-06-20 14:12:51 -07:00
|
|
|
Ecore_Thread_Cb func_cancel;
|
|
|
|
Ecore_Thread_Cb func_end;
|
|
|
|
PH(self);
|
|
|
|
Eina_Hash *hash;
|
|
|
|
CD(cond);
|
|
|
|
LK(mutex);
|
2010-10-06 04:48:45 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
const void *data;
|
2010-10-06 04:48:45 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
int cancel;
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLK(cancel_mutex);
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
Eina_Bool message_run : 1;
|
|
|
|
Eina_Bool feedback_run : 1;
|
|
|
|
Eina_Bool kill : 1;
|
|
|
|
Eina_Bool reschedule : 1;
|
|
|
|
Eina_Bool no_queue : 1;
|
2009-11-06 14:15:04 -08:00
|
|
|
};
|
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
typedef struct _Ecore_Pthread_Notify Ecore_Pthread_Notify;
|
|
|
|
struct _Ecore_Pthread_Notify
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work;
|
2018-06-20 14:12:51 -07:00
|
|
|
const void *user_data;
|
2012-02-20 07:57:18 -08:00
|
|
|
};
|
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
typedef void *(*Ecore_Thread_Sync_Cb)(void *data, Ecore_Thread *thread);
|
2012-02-20 07:57:18 -08:00
|
|
|
|
|
|
|
typedef struct _Ecore_Pthread_Message Ecore_Pthread_Message;
|
|
|
|
struct _Ecore_Pthread_Message
|
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
union
|
|
|
|
{
|
|
|
|
Ecore_Thread_Cb async;
|
2012-02-20 07:57:18 -08:00
|
|
|
Ecore_Thread_Sync_Cb sync;
|
|
|
|
} u;
|
|
|
|
|
|
|
|
const void *data;
|
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
int code;
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
Eina_Bool callback : 1;
|
|
|
|
Eina_Bool sync : 1;
|
2012-02-20 07:57:18 -08:00
|
|
|
};
|
|
|
|
|
2009-07-31 10:06:11 -07:00
|
|
|
static int _ecore_thread_count_max = 0;
|
2010-11-23 10:32:17 -08:00
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
static void _ecore_thread_handler(void *data);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2009-11-08 14:16:17 -08:00
|
|
|
static int _ecore_thread_count = 0;
|
2018-06-25 11:19:59 -07:00
|
|
|
static int _ecore_thread_count_no_queue = 0;
|
2010-07-23 08:33:22 -07:00
|
|
|
|
2012-05-29 20:10:30 -07:00
|
|
|
static Eina_List *_ecore_running_job = NULL;
|
2010-07-20 02:40:53 -07:00
|
|
|
static Eina_List *_ecore_pending_job_threads = NULL;
|
2010-09-22 02:47:55 -07:00
|
|
|
static Eina_List *_ecore_pending_job_threads_feedback = NULL;
|
2013-10-10 02:02:00 -07:00
|
|
|
static SLK(_ecore_pending_job_threads_mutex);
|
|
|
|
static SLK(_ecore_running_job_mutex);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-07-23 08:33:22 -07:00
|
|
|
static Eina_Hash *_ecore_thread_global_hash = NULL;
|
2010-11-23 08:52:18 -08:00
|
|
|
static LRWK(_ecore_thread_global_hash_lock);
|
|
|
|
static LK(_ecore_thread_global_hash_mutex);
|
|
|
|
static CD(_ecore_thread_global_hash_cond);
|
|
|
|
|
2010-08-24 17:26:01 -07:00
|
|
|
static Eina_Bool have_main_loop_thread = 0;
|
2010-10-14 09:45:48 -07:00
|
|
|
|
2010-11-23 10:32:17 -08:00
|
|
|
static Eina_Trash *_ecore_thread_worker_trash = NULL;
|
|
|
|
static int _ecore_thread_worker_count = 0;
|
|
|
|
|
2019-09-22 23:17:45 -07:00
|
|
|
static void *_ecore_thread_worker(void *, Eina_Thread);
|
2011-06-28 08:53:19 -07:00
|
|
|
static Ecore_Pthread_Worker *_ecore_thread_worker_new(void);
|
|
|
|
|
2011-10-20 22:40:39 -07:00
|
|
|
static PH(get_main_loop_thread) (void)
|
2011-07-07 03:11:13 -07:00
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
static PH(main_loop_thread);
|
|
|
|
static pid_t main_loop_pid;
|
|
|
|
pid_t pid = getpid();
|
|
|
|
|
|
|
|
if (pid != main_loop_pid)
|
|
|
|
{
|
|
|
|
main_loop_pid = pid;
|
|
|
|
main_loop_thread = PHS();
|
|
|
|
have_main_loop_thread = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return main_loop_thread;
|
2011-07-07 03:11:13 -07:00
|
|
|
}
|
|
|
|
|
2010-11-23 10:32:17 -08:00
|
|
|
static void
|
|
|
|
_ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
|
|
|
|
{
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKD(worker->cancel_mutex);
|
2012-02-22 13:38:39 -08:00
|
|
|
CDD(worker->cond);
|
|
|
|
LKD(worker->mutex);
|
|
|
|
|
2012-03-29 01:52:25 -07:00
|
|
|
if (_ecore_thread_worker_count > ((_ecore_thread_count_max + 1) * 16))
|
2010-11-23 10:32:17 -08:00
|
|
|
{
|
2012-05-30 03:25:44 -07:00
|
|
|
_ecore_thread_worker_count--;
|
2010-11-23 10:32:17 -08:00
|
|
|
free(worker);
|
2011-10-20 22:40:39 -07:00
|
|
|
return;
|
2010-11-23 10:32:17 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
eina_trash_push(&_ecore_thread_worker_trash, worker);
|
|
|
|
}
|
|
|
|
|
2010-07-30 01:52:18 -07:00
|
|
|
static void
|
|
|
|
_ecore_thread_data_free(void *data)
|
|
|
|
{
|
|
|
|
Ecore_Thread_Data *d = data;
|
|
|
|
|
|
|
|
if (d->cb) d->cb(d->data);
|
|
|
|
free(d);
|
|
|
|
}
|
|
|
|
|
2018-01-25 12:32:14 -08:00
|
|
|
void
|
2018-11-07 06:33:27 -08:00
|
|
|
_ecore_thread_join(void *data)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
2018-11-07 06:33:27 -08:00
|
|
|
PH(thread) = (uintptr_t)data;
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
DBG("joining thread=%" PRIu64, (uint64_t)thread);
|
2012-05-30 05:14:34 -07:00
|
|
|
PHJ(thread);
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
static void
|
2010-10-06 04:48:45 -07:00
|
|
|
_ecore_thread_kill(Ecore_Pthread_Worker *work)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
2010-06-30 06:25:28 -07:00
|
|
|
if (work->cancel)
|
|
|
|
{
|
2010-07-20 21:26:57 -07:00
|
|
|
if (work->func_cancel)
|
2011-10-20 22:40:39 -07:00
|
|
|
work->func_cancel((void *)work->data, (Ecore_Thread *)work);
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-07-20 21:26:57 -07:00
|
|
|
if (work->func_end)
|
2011-10-20 22:40:39 -07:00
|
|
|
work->func_end((void *)work->data, (Ecore_Thread *)work);
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
|
|
|
|
2010-09-22 02:47:55 -07:00
|
|
|
if (work->feedback_run)
|
2010-11-23 10:32:17 -08:00
|
|
|
{
|
|
|
|
if (work->u.feedback_run.direct_worker)
|
|
|
|
_ecore_thread_worker_free(work->u.feedback_run.direct_worker);
|
|
|
|
}
|
2010-07-23 10:52:50 -07:00
|
|
|
if (work->hash)
|
|
|
|
eina_hash_free(work->hash);
|
2012-01-11 05:45:34 -08:00
|
|
|
_ecore_thread_worker_free(work);
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
|
|
|
|
2010-10-06 04:48:45 -07:00
|
|
|
static void
|
2012-02-20 07:57:18 -08:00
|
|
|
_ecore_thread_handler(void *data)
|
2010-10-06 04:48:45 -07:00
|
|
|
{
|
2012-02-20 07:57:18 -08:00
|
|
|
Ecore_Pthread_Worker *work = data;
|
2010-10-06 04:48:45 -07:00
|
|
|
|
|
|
|
if (work->feedback_run)
|
|
|
|
{
|
|
|
|
if (work->u.feedback_run.send != work->u.feedback_run.received)
|
|
|
|
{
|
|
|
|
work->kill = EINA_TRUE;
|
2011-10-20 22:40:39 -07:00
|
|
|
return;
|
2010-10-06 04:48:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_ecore_thread_kill(work);
|
|
|
|
}
|
|
|
|
|
2012-02-25 00:30:51 -08:00
|
|
|
#if 0
|
2010-06-30 06:25:28 -07:00
|
|
|
static void
|
2012-11-25 01:55:32 -08:00
|
|
|
_ecore_nothing_handler(void *data EINA_UNUSED, void *buffer EINA_UNUSED, unsigned int nbyte EINA_UNUSED)
|
2010-06-30 06:25:28 -07:00
|
|
|
{
|
2012-02-20 07:57:18 -08:00
|
|
|
}
|
2018-06-20 14:12:51 -07:00
|
|
|
|
2012-02-25 00:30:51 -08:00
|
|
|
#endif
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
static void
|
|
|
|
_ecore_notify_handler(void *data)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Notify *notify = data;
|
|
|
|
Ecore_Pthread_Worker *work = notify->work;
|
2018-06-20 14:12:51 -07:00
|
|
|
void *user_data = (void *)notify->user_data;
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2010-10-06 04:48:45 -07:00
|
|
|
work->u.feedback_run.received++;
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2010-09-22 02:47:55 -07:00
|
|
|
if (work->u.feedback_run.func_notify)
|
2011-10-20 22:40:39 -07:00
|
|
|
work->u.feedback_run.func_notify((void *)work->data, (Ecore_Thread *)work, user_data);
|
2010-10-06 04:48:45 -07:00
|
|
|
|
|
|
|
/* Force reading all notify event before killing the thread */
|
|
|
|
if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
|
|
|
|
{
|
|
|
|
_ecore_thread_kill(work);
|
|
|
|
}
|
2012-03-14 10:51:38 -07:00
|
|
|
|
|
|
|
free(notify);
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2012-02-20 07:57:18 -08:00
|
|
|
_ecore_message_notify_handler(void *data)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Notify *notify = data;
|
|
|
|
Ecore_Pthread_Worker *work = notify->work;
|
2018-06-20 14:12:51 -07:00
|
|
|
Ecore_Pthread_Message *user_data = (void *)notify->user_data;
|
2012-02-20 07:57:18 -08:00
|
|
|
Eina_Bool delete = EINA_TRUE;
|
|
|
|
|
|
|
|
work->u.message_run.from.received++;
|
|
|
|
|
|
|
|
if (!user_data->callback)
|
|
|
|
{
|
|
|
|
if (work->u.message_run.func_notify)
|
2018-06-20 14:12:51 -07:00
|
|
|
work->u.message_run.func_notify((void *)work->data, (Ecore_Thread *)work, (void *)user_data->data);
|
2012-02-20 07:57:18 -08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (user_data->sync)
|
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
user_data->data = user_data->u.sync((void *)user_data->data, (Ecore_Thread *)work);
|
2012-02-20 07:57:18 -08:00
|
|
|
user_data->callback = EINA_FALSE;
|
|
|
|
user_data->code = INT_MAX;
|
|
|
|
ecore_pipe_write(work->u.message_run.send, &user_data, sizeof (Ecore_Pthread_Message *));
|
|
|
|
|
|
|
|
delete = EINA_FALSE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
user_data->u.async((void *)user_data->data, (Ecore_Thread *)work);
|
2012-02-20 07:57:18 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (delete)
|
|
|
|
{
|
|
|
|
free(user_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Force reading all notify event before killing the thread */
|
|
|
|
if (work->kill && work->u.message_run.from.send == work->u.message_run.from.received)
|
|
|
|
{
|
|
|
|
_ecore_thread_kill(work);
|
|
|
|
}
|
2012-03-14 10:51:38 -07:00
|
|
|
free(notify);
|
2012-02-20 07:57:18 -08:00
|
|
|
}
|
|
|
|
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
static void
|
|
|
|
_ecore_short_job_cleanup(void *data)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work = data;
|
|
|
|
|
|
|
|
DBG("cleanup work=%p, thread=%" PRIu64, work, (uint64_t)work->self);
|
|
|
|
|
|
|
|
SLKL(_ecore_running_job_mutex);
|
|
|
|
_ecore_running_job = eina_list_remove(_ecore_running_job, work);
|
|
|
|
SLKU(_ecore_running_job_mutex);
|
|
|
|
|
|
|
|
if (work->reschedule)
|
|
|
|
{
|
|
|
|
work->reschedule = EINA_FALSE;
|
|
|
|
|
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
|
|
|
_ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
|
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
static void
|
|
|
|
_ecore_short_job(PH(thread))
|
2010-06-30 06:25:28 -07:00
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work;
|
2012-05-24 02:51:17 -07:00
|
|
|
int cancel;
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2017-03-02 09:32:40 -08:00
|
|
|
|
2012-05-24 02:51:17 -07:00
|
|
|
if (!_ecore_pending_job_threads)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2012-05-24 02:51:17 -07:00
|
|
|
return;
|
|
|
|
}
|
2017-03-02 09:32:40 -08:00
|
|
|
|
2012-05-24 02:51:17 -07:00
|
|
|
work = eina_list_data_get(_ecore_pending_job_threads);
|
|
|
|
_ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
|
|
|
|
_ecore_pending_job_threads);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2012-08-06 20:47:14 -07:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_running_job_mutex);
|
2012-08-06 20:47:14 -07:00
|
|
|
_ecore_running_job = eina_list_append(_ecore_running_job, work);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_running_job_mutex);
|
2018-06-20 14:12:51 -07:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(work->cancel_mutex);
|
2012-05-24 02:51:17 -07:00
|
|
|
cancel = work->cancel;
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(work->cancel_mutex);
|
2012-05-24 02:51:17 -07:00
|
|
|
work->self = thread;
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
|
|
|
|
EINA_THREAD_CLEANUP_PUSH(_ecore_short_job_cleanup, work);
|
2012-05-24 02:51:17 -07:00
|
|
|
if (!cancel)
|
2018-06-20 14:12:51 -07:00
|
|
|
work->u.short_run.func_blocking((void *)work->data, (Ecore_Thread *)work);
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
eina_thread_cancellable_set(EINA_FALSE, NULL);
|
|
|
|
EINA_THREAD_CLEANUP_POP(EINA_TRUE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_ecore_feedback_job_cleanup(void *data)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work = data;
|
|
|
|
|
|
|
|
DBG("cleanup work=%p, thread=%" PRIu64, work, (uint64_t)work->self);
|
2012-05-29 20:10:30 -07:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_running_job_mutex);
|
2012-05-29 20:10:30 -07:00
|
|
|
_ecore_running_job = eina_list_remove(_ecore_running_job, work);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_running_job_mutex);
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
|
2012-05-24 02:51:17 -07:00
|
|
|
if (work->reschedule)
|
|
|
|
{
|
|
|
|
work->reschedule = EINA_FALSE;
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
_ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2012-05-24 02:51:17 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
static void
|
2012-02-20 07:57:18 -08:00
|
|
|
_ecore_feedback_job(PH(thread))
|
2010-06-30 06:25:28 -07:00
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work;
|
2012-05-24 02:51:17 -07:00
|
|
|
int cancel;
|
2018-06-20 14:12:51 -07:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2018-06-20 14:12:51 -07:00
|
|
|
|
2012-05-24 02:51:17 -07:00
|
|
|
if (!_ecore_pending_job_threads_feedback)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2012-05-24 02:51:17 -07:00
|
|
|
return;
|
|
|
|
}
|
2018-06-20 14:12:51 -07:00
|
|
|
|
2012-05-24 02:51:17 -07:00
|
|
|
work = eina_list_data_get(_ecore_pending_job_threads_feedback);
|
|
|
|
_ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
|
|
|
|
_ecore_pending_job_threads_feedback);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
|
|
|
SLKL(_ecore_running_job_mutex);
|
2012-08-06 20:47:14 -07:00
|
|
|
_ecore_running_job = eina_list_append(_ecore_running_job, work);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_running_job_mutex);
|
2018-06-20 14:12:51 -07:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(work->cancel_mutex);
|
2012-05-24 02:51:17 -07:00
|
|
|
cancel = work->cancel;
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(work->cancel_mutex);
|
2012-05-24 02:51:17 -07:00
|
|
|
work->self = thread;
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
|
|
|
|
EINA_THREAD_CLEANUP_PUSH(_ecore_feedback_job_cleanup, work);
|
2012-05-24 02:51:17 -07:00
|
|
|
if (!cancel)
|
2018-06-20 14:12:51 -07:00
|
|
|
work->u.feedback_run.func_heavy((void *)work->data, (Ecore_Thread *)work);
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
eina_thread_cancellable_set(EINA_FALSE, NULL);
|
|
|
|
EINA_THREAD_CLEANUP_POP(EINA_TRUE);
|
|
|
|
}
|
2012-05-29 20:10:30 -07:00
|
|
|
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
static void
|
|
|
|
_ecore_direct_worker_cleanup(void *data)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work = data;
|
2012-05-29 20:10:30 -07:00
|
|
|
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
DBG("cleanup work=%p, thread=%" PRIu64 " (should join)", work, (uint64_t)work->self);
|
|
|
|
|
2018-06-25 11:19:59 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
|
|
|
_ecore_thread_count_no_queue--;
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
|
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
ecore_main_loop_thread_safe_call_async((Ecore_Cb)_ecore_thread_join,
|
|
|
|
(void *)(intptr_t)PHS());
|
2018-06-25 11:19:59 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
static void *
|
2019-09-22 23:17:45 -07:00
|
|
|
_ecore_direct_worker(void *data, Eina_Thread t EINA_UNUSED)
|
2010-06-30 06:25:28 -07:00
|
|
|
{
|
2019-09-22 23:17:45 -07:00
|
|
|
Ecore_Pthread_Worker *work = data;
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
eina_thread_cancellable_set(EINA_FALSE, NULL);
|
2015-09-09 23:17:08 -07:00
|
|
|
eina_thread_name_set(eina_thread_self(), "Ethread-feedback");
|
2012-05-22 03:13:14 -07:00
|
|
|
work->self = PHS();
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
|
|
|
|
EINA_THREAD_CLEANUP_PUSH(_ecore_direct_worker_cleanup, work);
|
2012-02-20 07:57:18 -08:00
|
|
|
if (work->message_run)
|
2018-06-20 14:12:51 -07:00
|
|
|
work->u.message_run.func_main((void *)work->data, (Ecore_Thread *)work);
|
2012-02-20 07:57:18 -08:00
|
|
|
else
|
2018-06-20 14:12:51 -07:00
|
|
|
work->u.feedback_run.func_heavy((void *)work->data, (Ecore_Thread *)work);
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
eina_thread_cancellable_set(EINA_FALSE, NULL);
|
|
|
|
EINA_THREAD_CLEANUP_POP(EINA_TRUE);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
return NULL;
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
|
|
|
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
static void
|
|
|
|
_ecore_thread_worker_cleanup(void *data EINA_UNUSED)
|
|
|
|
{
|
2016-10-09 09:51:53 -07:00
|
|
|
DBG("cleanup thread=%" PRIuPTR " (should join)", PHS());
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
|
|
|
_ecore_thread_count--;
|
2018-06-20 14:12:51 -07:00
|
|
|
ecore_main_loop_thread_safe_call_async((Ecore_Cb)_ecore_thread_join,
|
|
|
|
(void *)(intptr_t)PHS());
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
|
|
|
}
|
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
static void *
|
2019-09-22 23:17:45 -07:00
|
|
|
_ecore_thread_worker(void *data EINA_UNUSED, Eina_Thread t EINA_UNUSED)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
eina_thread_cancellable_set(EINA_FALSE, NULL);
|
|
|
|
EINA_THREAD_CLEANUP_PUSH(_ecore_thread_worker_cleanup, NULL);
|
2011-10-20 22:40:39 -07:00
|
|
|
restart:
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
|
|
|
|
/* these 2 are cancellation points as user cb may enable */
|
2012-05-24 02:51:17 -07:00
|
|
|
_ecore_short_job(PHS());
|
|
|
|
_ecore_feedback_job(PHS());
|
2010-06-30 06:25:28 -07:00
|
|
|
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
/* from here on, cancellations are guaranteed to be disabled */
|
|
|
|
|
2010-09-22 02:47:55 -07:00
|
|
|
/* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
|
2015-09-09 23:17:08 -07:00
|
|
|
eina_thread_name_set(eina_thread_self(), "Ethread-worker");
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2010-11-23 08:52:18 -08:00
|
|
|
if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
|
2009-11-06 14:15:04 -08:00
|
|
|
{
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-10-14 09:45:48 -07:00
|
|
|
goto restart;
|
2009-11-06 14:15:04 -08:00
|
|
|
}
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-10-14 09:45:48 -07:00
|
|
|
/* Sleep a little to prevent premature death */
|
2011-05-17 00:17:53 -07:00
|
|
|
#ifdef _WIN32
|
|
|
|
Sleep(1); /* around 50ms */
|
|
|
|
#else
|
2012-05-24 02:51:17 -07:00
|
|
|
usleep(50);
|
2011-05-17 00:17:53 -07:00
|
|
|
#endif
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2010-11-23 08:52:18 -08:00
|
|
|
if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
|
2010-10-14 09:45:48 -07:00
|
|
|
{
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-10-14 09:45:48 -07:00
|
|
|
goto restart;
|
|
|
|
}
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
EINA_THREAD_CLEANUP_POP(EINA_TRUE);
|
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
return NULL;
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2010-11-26 05:50:31 -08:00
|
|
|
static Ecore_Pthread_Worker *
|
|
|
|
_ecore_thread_worker_new(void)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *result;
|
|
|
|
|
|
|
|
result = eina_trash_pop(&_ecore_thread_worker_trash);
|
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
if (!result)
|
2012-05-30 03:25:44 -07:00
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
result = calloc(1, sizeof(Ecore_Pthread_Worker));
|
|
|
|
_ecore_thread_worker_count++;
|
2012-05-30 03:25:44 -07:00
|
|
|
}
|
2016-12-27 08:49:35 -08:00
|
|
|
else
|
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
memset(result, 0, sizeof(Ecore_Pthread_Worker));
|
2016-12-27 08:49:35 -08:00
|
|
|
}
|
2010-11-26 05:50:31 -08:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKI(result->cancel_mutex);
|
2012-02-22 13:38:39 -08:00
|
|
|
LKI(result->mutex);
|
|
|
|
CDI(result->cond, result->mutex);
|
|
|
|
|
2010-11-26 05:50:31 -08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2009-10-09 20:24:56 -07:00
|
|
|
void
|
2009-09-03 22:49:54 -07:00
|
|
|
_ecore_thread_init(void)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
2017-02-04 20:23:05 -08:00
|
|
|
_ecore_thread_count_max = eina_cpu_count() * 4;
|
2009-08-03 01:19:33 -07:00
|
|
|
if (_ecore_thread_count_max <= 0)
|
2009-07-31 10:06:11 -07:00
|
|
|
_ecore_thread_count_max = 1;
|
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKI(_ecore_pending_job_threads_mutex);
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKI(_ecore_thread_global_hash_lock);
|
|
|
|
LKI(_ecore_thread_global_hash_mutex);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKI(_ecore_running_job_mutex);
|
2012-02-20 07:57:18 -08:00
|
|
|
CDI(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex);
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
|
|
|
|
2009-10-09 20:24:56 -07:00
|
|
|
void
|
2009-09-03 22:49:54 -07:00
|
|
|
_ecore_thread_shutdown(void)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
2009-10-09 20:24:56 -07:00
|
|
|
/* FIXME: If function are still running in the background, should we kill them ? */
|
2018-06-20 14:12:51 -07:00
|
|
|
Ecore_Pthread_Worker *work;
|
|
|
|
Eina_List *l;
|
|
|
|
Eina_Bool test;
|
|
|
|
int iteration = 0;
|
2011-10-20 22:40:39 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2011-10-20 22:40:39 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
EINA_LIST_FREE(_ecore_pending_job_threads, work)
|
|
|
|
{
|
|
|
|
if (work->func_cancel)
|
|
|
|
work->func_cancel((void *)work->data, (Ecore_Thread *)work);
|
|
|
|
free(work);
|
|
|
|
}
|
2011-10-20 22:40:39 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
|
|
|
|
{
|
|
|
|
if (work->func_cancel)
|
|
|
|
work->func_cancel((void *)work->data, (Ecore_Thread *)work);
|
|
|
|
free(work);
|
|
|
|
}
|
2011-10-20 22:40:39 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
|
|
|
SLKL(_ecore_running_job_mutex);
|
2012-08-06 20:47:14 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
EINA_LIST_FOREACH(_ecore_running_job, l, work)
|
|
|
|
ecore_thread_cancel((Ecore_Thread *)work);
|
2012-05-29 20:10:30 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
SLKU(_ecore_running_job_mutex);
|
2011-10-20 22:40:39 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
do
|
|
|
|
{
|
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2018-06-25 11:19:59 -07:00
|
|
|
if (_ecore_thread_count + _ecore_thread_count_no_queue > 0)
|
2018-06-20 14:12:51 -07:00
|
|
|
{
|
|
|
|
test = EINA_TRUE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
test = EINA_FALSE;
|
|
|
|
}
|
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
|
|
|
iteration++;
|
2018-06-26 13:35:55 -07:00
|
|
|
if (test)
|
|
|
|
{
|
|
|
|
_ecore_main_call_flush();
|
|
|
|
usleep(1000);
|
|
|
|
}
|
2018-06-26 13:34:24 -07:00
|
|
|
} while (test == EINA_TRUE && iteration < 50);
|
2018-06-20 14:12:51 -07:00
|
|
|
|
|
|
|
if (iteration == 20 && _ecore_thread_count > 0)
|
|
|
|
{
|
|
|
|
ERR("%i of the child thread are still running after 1s. This can lead to a segv. Sorry.", _ecore_thread_count);
|
|
|
|
}
|
2012-05-29 20:10:30 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
if (_ecore_thread_global_hash)
|
|
|
|
eina_hash_free(_ecore_thread_global_hash);
|
|
|
|
have_main_loop_thread = 0;
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
while ((work = eina_trash_pop(&_ecore_thread_worker_trash)))
|
|
|
|
{
|
|
|
|
free(work);
|
|
|
|
}
|
|
|
|
|
|
|
|
SLKD(_ecore_pending_job_threads_mutex);
|
|
|
|
LRWKD(_ecore_thread_global_hash_lock);
|
|
|
|
LKD(_ecore_thread_global_hash_mutex);
|
|
|
|
SLKD(_ecore_running_job_mutex);
|
|
|
|
CDD(_ecore_thread_global_hash_cond);
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
2010-10-17 00:03:28 -07:00
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Ecore_Thread *
|
2010-11-12 05:28:19 -08:00
|
|
|
ecore_thread_run(Ecore_Thread_Cb func_blocking,
|
|
|
|
Ecore_Thread_Cb func_end,
|
|
|
|
Ecore_Thread_Cb func_cancel,
|
2018-06-20 14:12:51 -07:00
|
|
|
const void *data)
|
2009-07-31 10:06:11 -07:00
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *work;
|
2012-05-30 05:14:34 -07:00
|
|
|
Eina_Bool tried = EINA_FALSE;
|
2012-05-22 03:13:14 -07:00
|
|
|
PH(thread);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2012-03-29 01:52:25 -07:00
|
|
|
EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2010-07-02 09:01:21 -07:00
|
|
|
if (!func_blocking) return NULL;
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-11-23 10:32:17 -08:00
|
|
|
work = _ecore_thread_worker_new();
|
2009-12-21 04:25:32 -08:00
|
|
|
if (!work)
|
|
|
|
{
|
2010-10-25 02:39:00 -07:00
|
|
|
if (func_cancel)
|
2011-10-20 22:40:39 -07:00
|
|
|
func_cancel((void *)data, NULL);
|
2010-07-20 21:26:57 -07:00
|
|
|
return NULL;
|
2009-12-21 04:25:32 -08:00
|
|
|
}
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2010-07-02 09:01:21 -07:00
|
|
|
work->u.short_run.func_blocking = func_blocking;
|
2009-07-31 10:06:11 -07:00
|
|
|
work->func_end = func_end;
|
2009-11-06 14:15:04 -08:00
|
|
|
work->func_cancel = func_cancel;
|
|
|
|
work->cancel = EINA_FALSE;
|
2010-09-22 02:47:55 -07:00
|
|
|
work->feedback_run = EINA_FALSE;
|
2012-02-20 07:57:18 -08:00
|
|
|
work->message_run = EINA_FALSE;
|
2010-10-06 04:48:45 -07:00
|
|
|
work->kill = EINA_FALSE;
|
2011-03-16 06:16:14 -07:00
|
|
|
work->reschedule = EINA_FALSE;
|
2012-02-20 07:57:18 -08:00
|
|
|
work->no_queue = EINA_FALSE;
|
2009-07-31 10:06:11 -07:00
|
|
|
work->data = data;
|
|
|
|
|
2012-01-12 01:06:28 -08:00
|
|
|
work->self = 0;
|
2010-10-22 10:39:25 -07:00
|
|
|
work->hash = NULL;
|
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2010-07-20 02:40:53 -07:00
|
|
|
_ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2018-06-25 13:53:40 -07:00
|
|
|
if (_ecore_thread_count == _ecore_thread_count_max)
|
2009-08-03 07:09:09 -07:00
|
|
|
{
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2011-10-20 22:40:39 -07:00
|
|
|
return (Ecore_Thread *)work;
|
2009-08-03 07:09:09 -07:00
|
|
|
}
|
2009-07-31 10:06:11 -07:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2009-07-31 10:06:11 -07:00
|
|
|
|
|
|
|
/* One more thread could be created. */
|
2010-10-13 09:44:15 -07:00
|
|
|
eina_threads_init();
|
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2012-05-22 03:13:14 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
retry:
|
2012-10-19 03:45:25 -07:00
|
|
|
if (PHC(thread, _ecore_thread_worker, NULL))
|
2011-06-28 08:53:19 -07:00
|
|
|
{
|
|
|
|
_ecore_thread_count++;
|
2018-06-20 14:12:51 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2011-10-20 22:40:39 -07:00
|
|
|
return (Ecore_Thread *)work;
|
2011-06-28 08:53:19 -07:00
|
|
|
}
|
2012-05-30 05:14:34 -07:00
|
|
|
if (!tried)
|
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
_ecore_main_call_flush();
|
|
|
|
tried = EINA_TRUE;
|
|
|
|
goto retry;
|
2012-05-30 05:14:34 -07:00
|
|
|
}
|
2010-10-13 09:44:15 -07:00
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
if (_ecore_thread_count == 0)
|
|
|
|
{
|
2010-10-14 09:45:48 -07:00
|
|
|
_ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
|
|
|
|
|
2010-07-20 21:26:57 -07:00
|
|
|
if (work->func_cancel)
|
2018-06-20 14:12:51 -07:00
|
|
|
work->func_cancel((void *)work->data, (Ecore_Thread *)work);
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
_ecore_thread_worker_free(work);
|
2010-07-20 21:26:57 -07:00
|
|
|
work = NULL;
|
2009-11-06 14:15:04 -08:00
|
|
|
}
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2012-05-30 05:14:34 -07:00
|
|
|
|
|
|
|
eina_threads_shutdown();
|
|
|
|
|
2011-10-20 22:40:39 -07:00
|
|
|
return (Ecore_Thread *)work;
|
2009-07-31 10:06:11 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Eina_Bool
|
2009-11-06 14:15:04 -08:00
|
|
|
ecore_thread_cancel(Ecore_Thread *thread)
|
|
|
|
{
|
2012-02-20 07:57:18 -08:00
|
|
|
Ecore_Pthread_Worker *volatile work = (Ecore_Pthread_Worker *)thread;
|
2009-11-06 14:15:04 -08:00
|
|
|
Eina_List *l;
|
2012-02-20 07:57:18 -08:00
|
|
|
int cancel;
|
2010-10-04 08:24:34 -07:00
|
|
|
|
2010-07-26 13:41:54 -07:00
|
|
|
if (!work)
|
2010-10-04 08:24:34 -07:00
|
|
|
return EINA_TRUE;
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(work->cancel_mutex);
|
2012-02-20 07:57:18 -08:00
|
|
|
cancel = work->cancel;
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(work->cancel_mutex);
|
2012-02-20 07:57:18 -08:00
|
|
|
if (cancel)
|
2010-10-04 08:24:34 -07:00
|
|
|
return EINA_FALSE;
|
|
|
|
|
2010-10-06 04:48:45 -07:00
|
|
|
if (work->feedback_run)
|
|
|
|
{
|
|
|
|
if (work->kill)
|
|
|
|
return EINA_TRUE;
|
|
|
|
if (work->u.feedback_run.send != work->u.feedback_run.received)
|
|
|
|
goto on_exit;
|
|
|
|
}
|
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2010-10-04 08:24:34 -07:00
|
|
|
|
2010-08-24 17:26:01 -07:00
|
|
|
if ((have_main_loop_thread) &&
|
2011-07-07 03:11:13 -07:00
|
|
|
(PHE(get_main_loop_thread(), PHS())))
|
2010-08-24 17:26:01 -07:00
|
|
|
{
|
2010-10-04 08:24:34 -07:00
|
|
|
if (!work->feedback_run)
|
|
|
|
EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
|
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
if ((void *)work == (void *)thread)
|
2010-10-04 08:24:34 -07:00
|
|
|
{
|
|
|
|
_ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
|
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-10-04 08:24:34 -07:00
|
|
|
|
|
|
|
if (work->func_cancel)
|
2011-10-20 22:40:39 -07:00
|
|
|
work->func_cancel((void *)work->data, (Ecore_Thread *)work);
|
2010-10-04 08:24:34 -07:00
|
|
|
free(work);
|
|
|
|
|
|
|
|
return EINA_TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
|
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
if ((void *)work == (void *)thread)
|
2010-10-04 08:24:34 -07:00
|
|
|
{
|
|
|
|
_ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
|
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-10-04 08:24:34 -07:00
|
|
|
|
|
|
|
if (work->func_cancel)
|
2011-10-20 22:40:39 -07:00
|
|
|
work->func_cancel((void *)work->data, (Ecore_Thread *)work);
|
2010-10-04 08:24:34 -07:00
|
|
|
free(work);
|
|
|
|
|
|
|
|
return EINA_TRUE;
|
|
|
|
}
|
|
|
|
}
|
2010-08-24 17:26:01 -07:00
|
|
|
}
|
2009-11-06 14:15:04 -08:00
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2009-11-06 14:15:04 -08:00
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
work = (Ecore_Pthread_Worker *)thread;
|
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
/* Delay the destruction */
|
2018-06-20 14:12:51 -07:00
|
|
|
on_exit:
|
eina/ecore: allow threads to be canceled, use in ecore_con.
As discussed in the mailing list, many people will use worker threads
to execute blocking syscalls and mandating ecore_thread_check() for
voluntary preemption reduces the ecore_thread usefulness a lot.
A clear example is ecore_con usage of connect() and getaddrinfo() in
threads. If the connect timeout expires, the thread will be cancelled,
but it was blocked on syscalls and they will hang around for long
time. If the application exits, ecore will print an error saying it
can SEGV.
Then enable access to pthread_setcancelstate(PTHREAD_CANCEL_ENABLE)
via eina_thread_cancellable_set(EINA_TRUE), to pthread_cancel() via
eina_thread_cancel(), to pthread_cleanup_push()/pthread_cleanup_pop()
via EINA_THREAD_CLEANUP_PUSH()/EINA_THREAD_CLEANUP_POP() and so on.
Ecore threads will enforce non-cancellable threads on its own code,
but the user may decide to enable that and allow cancellation, that's
not an issue since ecore_thread now plays well and use cleanup
functions.
Ecore con connect/resolve make use of that and enable cancellable
state, efl_net_dialer_tcp benefits a lot from that.
A good comparison of the benefit is to run:
./src/examples/ecore/efl_io_copier_example tcp://google.com:1234 :stdout:
before and after. It will timeout after 30s and with this patch the
thread is gone, no ecore error is printed about possible SEGV.
2016-09-13 21:38:58 -07:00
|
|
|
eina_thread_cancel(work->self); /* noop unless eina_thread_cancellable_set() was used by user */
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(work->cancel_mutex);
|
2012-02-20 07:57:18 -08:00
|
|
|
work->cancel = EINA_TRUE;
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(work->cancel_mutex);
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2009-11-06 14:15:04 -08:00
|
|
|
return EINA_FALSE;
|
|
|
|
}
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2015-01-07 07:42:24 -08:00
|
|
|
static void
|
|
|
|
_ecore_thread_wait_reset(Ecore_Thread_Waiter *waiter,
|
|
|
|
Ecore_Pthread_Worker *worker)
|
|
|
|
{
|
|
|
|
worker->func_cancel = waiter->func_cancel;
|
|
|
|
worker->func_end = waiter->func_end;
|
2017-03-02 09:32:40 -08:00
|
|
|
worker->waiter = NULL;
|
2017-02-04 10:12:03 -08:00
|
|
|
|
2015-01-07 07:42:24 -08:00
|
|
|
waiter->func_end = NULL;
|
|
|
|
waiter->func_cancel = NULL;
|
2017-02-04 10:12:03 -08:00
|
|
|
waiter->waiting = EINA_FALSE;
|
2015-01-07 07:42:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-02 09:32:40 -08:00
|
|
|
_ecore_thread_wait_cancel(void *data EINA_UNUSED, Ecore_Thread *thread)
|
2015-01-07 07:42:24 -08:00
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
|
2017-03-02 09:32:40 -08:00
|
|
|
Ecore_Thread_Waiter *waiter = worker->waiter;
|
2015-01-07 07:42:24 -08:00
|
|
|
|
2017-03-02 09:32:40 -08:00
|
|
|
if (waiter->func_cancel) waiter->func_cancel(data, thread);
|
2015-01-07 07:42:24 -08:00
|
|
|
_ecore_thread_wait_reset(waiter, worker);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-02 09:32:40 -08:00
|
|
|
_ecore_thread_wait_end(void *data EINA_UNUSED, Ecore_Thread *thread)
|
2015-01-07 07:42:24 -08:00
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
|
2017-03-02 09:32:40 -08:00
|
|
|
Ecore_Thread_Waiter *waiter = worker->waiter;
|
2015-01-07 07:42:24 -08:00
|
|
|
|
2017-03-02 09:32:40 -08:00
|
|
|
if (waiter->func_end) waiter->func_end(data, thread);
|
2015-01-07 07:42:24 -08:00
|
|
|
_ecore_thread_wait_reset(waiter, worker);
|
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Eina_Bool
|
2015-01-07 07:42:24 -08:00
|
|
|
ecore_thread_wait(Ecore_Thread *thread, double wait)
|
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
|
2015-01-07 07:42:24 -08:00
|
|
|
Ecore_Thread_Waiter waiter;
|
|
|
|
|
2015-01-07 13:12:29 -08:00
|
|
|
if (!thread) return EINA_TRUE;
|
2015-01-07 07:42:24 -08:00
|
|
|
|
|
|
|
waiter.func_end = worker->func_end;
|
|
|
|
waiter.func_cancel = worker->func_cancel;
|
2017-02-04 10:12:03 -08:00
|
|
|
waiter.waiting = EINA_TRUE;
|
2017-03-02 09:32:40 -08:00
|
|
|
|
2015-01-07 07:42:24 -08:00
|
|
|
// Now trick the thread to call the wrapper function
|
2017-03-02 09:32:40 -08:00
|
|
|
worker->waiter = &waiter;
|
2015-01-07 07:42:24 -08:00
|
|
|
worker->func_cancel = _ecore_thread_wait_cancel;
|
|
|
|
worker->func_end = _ecore_thread_wait_end;
|
|
|
|
|
2017-02-04 10:12:03 -08:00
|
|
|
while (waiter.waiting == EINA_TRUE)
|
2015-01-07 07:42:24 -08:00
|
|
|
{
|
|
|
|
double start, end;
|
|
|
|
|
|
|
|
start = ecore_time_get();
|
2018-06-26 13:37:08 -07:00
|
|
|
_ecore_main_call_flush();
|
|
|
|
ecore_main_loop_thread_safe_call_wait(0.0001);
|
2015-01-07 07:42:24 -08:00
|
|
|
end = ecore_time_get();
|
|
|
|
|
|
|
|
wait -= end - start;
|
|
|
|
|
|
|
|
if (wait <= 0) break;
|
|
|
|
}
|
|
|
|
|
2017-02-04 10:12:03 -08:00
|
|
|
if (waiter.waiting == EINA_FALSE)
|
2016-12-27 13:25:30 -08:00
|
|
|
{
|
|
|
|
return EINA_TRUE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
_ecore_thread_wait_reset(&waiter, worker);
|
|
|
|
return EINA_FALSE;
|
|
|
|
}
|
2015-01-07 07:42:24 -08:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Eina_Bool
|
2010-06-30 06:25:28 -07:00
|
|
|
ecore_thread_check(Ecore_Thread *thread)
|
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
Ecore_Pthread_Worker *volatile worker = (Ecore_Pthread_Worker *)thread;
|
2012-02-20 07:57:18 -08:00
|
|
|
int cancel;
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2010-07-02 09:01:21 -07:00
|
|
|
if (!worker) return EINA_TRUE;
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(worker->cancel_mutex);
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
cancel = worker->cancel;
|
|
|
|
/* FIXME: there is an insane bug driving me nuts here. I don't know if
|
2018-06-20 14:12:51 -07:00
|
|
|
it's a race condition, some cache issue or some alien attack on our software.
|
|
|
|
But ecore_thread_check will only work correctly with a printf, all the volatile,
|
|
|
|
lock and even usleep don't help here... */
|
2012-02-20 07:57:18 -08:00
|
|
|
/* fprintf(stderr, "wc: %i\n", cancel); */
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(worker->cancel_mutex);
|
2012-02-20 07:57:18 -08:00
|
|
|
return cancel;
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Ecore_Thread *
|
2018-06-20 14:12:51 -07:00
|
|
|
ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,
|
2011-10-20 22:40:39 -07:00
|
|
|
Ecore_Thread_Notify_Cb func_notify,
|
2018-06-20 14:12:51 -07:00
|
|
|
Ecore_Thread_Cb func_end,
|
|
|
|
Ecore_Thread_Cb func_cancel,
|
|
|
|
const void *data,
|
|
|
|
Eina_Bool try_no_queue)
|
2010-06-30 06:25:28 -07:00
|
|
|
{
|
|
|
|
Ecore_Pthread_Worker *worker;
|
2012-05-30 05:14:34 -07:00
|
|
|
Eina_Bool tried = EINA_FALSE;
|
2012-05-22 03:13:14 -07:00
|
|
|
PH(thread);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2012-03-29 01:52:25 -07:00
|
|
|
EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2010-06-30 06:25:28 -07:00
|
|
|
if (!func_heavy) return NULL;
|
|
|
|
|
2010-11-23 10:32:17 -08:00
|
|
|
worker = _ecore_thread_worker_new();
|
2010-06-30 06:25:28 -07:00
|
|
|
if (!worker) goto on_error;
|
|
|
|
|
2010-09-22 02:47:55 -07:00
|
|
|
worker->u.feedback_run.func_heavy = func_heavy;
|
|
|
|
worker->u.feedback_run.func_notify = func_notify;
|
2010-07-23 06:08:38 -07:00
|
|
|
worker->hash = NULL;
|
2010-06-30 06:25:28 -07:00
|
|
|
worker->func_cancel = func_cancel;
|
|
|
|
worker->func_end = func_end;
|
|
|
|
worker->data = data;
|
|
|
|
worker->cancel = EINA_FALSE;
|
2012-02-20 07:57:18 -08:00
|
|
|
worker->message_run = EINA_FALSE;
|
2010-09-22 02:47:55 -07:00
|
|
|
worker->feedback_run = EINA_TRUE;
|
2010-10-06 04:48:45 -07:00
|
|
|
worker->kill = EINA_FALSE;
|
2011-03-16 06:16:14 -07:00
|
|
|
worker->reschedule = EINA_FALSE;
|
2012-01-11 05:45:34 -08:00
|
|
|
worker->self = 0;
|
2011-03-16 06:16:14 -07:00
|
|
|
|
2010-10-06 04:48:45 -07:00
|
|
|
worker->u.feedback_run.send = 0;
|
|
|
|
worker->u.feedback_run.received = 0;
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2010-11-23 10:32:17 -08:00
|
|
|
worker->u.feedback_run.direct_worker = NULL;
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2012-03-09 04:49:30 -08:00
|
|
|
if (try_no_queue)
|
2010-06-30 06:25:28 -07:00
|
|
|
{
|
2010-11-23 08:52:18 -08:00
|
|
|
PH(t);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2010-11-23 10:32:17 -08:00
|
|
|
worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
|
2011-06-28 08:53:19 -07:00
|
|
|
worker->no_queue = EINA_TRUE;
|
2010-11-23 10:32:17 -08:00
|
|
|
|
2011-10-20 22:40:39 -07:00
|
|
|
eina_threads_init();
|
2011-06-29 01:24:13 -07:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
retry_direct:
|
2020-05-21 10:00:47 -07:00
|
|
|
if (PHC2(t, _ecore_direct_worker, worker))
|
2018-06-25 11:19:59 -07:00
|
|
|
{
|
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
|
|
|
_ecore_thread_count_no_queue++;
|
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
|
|
|
return (Ecore_Thread *)worker;
|
|
|
|
}
|
2018-06-20 14:12:51 -07:00
|
|
|
if (!tried)
|
|
|
|
{
|
|
|
|
_ecore_main_call_flush();
|
|
|
|
tried = EINA_TRUE;
|
|
|
|
goto retry_direct;
|
|
|
|
}
|
2011-06-29 01:24:13 -07:00
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
if (worker->u.feedback_run.direct_worker)
|
|
|
|
{
|
|
|
|
_ecore_thread_worker_free(worker->u.feedback_run.direct_worker);
|
|
|
|
worker->u.feedback_run.direct_worker = NULL;
|
|
|
|
}
|
|
|
|
|
2011-10-20 22:40:39 -07:00
|
|
|
eina_threads_shutdown();
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
|
|
|
|
2011-06-28 08:53:19 -07:00
|
|
|
worker->no_queue = EINA_FALSE;
|
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2010-09-22 02:47:55 -07:00
|
|
|
_ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2018-06-25 13:53:40 -07:00
|
|
|
if (_ecore_thread_count == _ecore_thread_count_max)
|
2010-06-30 06:25:28 -07:00
|
|
|
{
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2011-10-20 22:40:39 -07:00
|
|
|
return (Ecore_Thread *)worker;
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
|
|
|
/* One more thread could be created. */
|
2010-10-13 09:44:15 -07:00
|
|
|
eina_threads_init();
|
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2018-06-20 14:12:51 -07:00
|
|
|
retry:
|
2012-10-19 03:45:25 -07:00
|
|
|
if (PHC(thread, _ecore_thread_worker, NULL))
|
2011-06-28 08:53:19 -07:00
|
|
|
{
|
|
|
|
_ecore_thread_count++;
|
2018-06-20 14:12:51 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2011-10-20 22:40:39 -07:00
|
|
|
return (Ecore_Thread *)worker;
|
2011-06-28 08:53:19 -07:00
|
|
|
}
|
2012-05-30 05:14:34 -07:00
|
|
|
if (!tried)
|
|
|
|
{
|
|
|
|
_ecore_main_call_flush();
|
2018-06-20 14:12:51 -07:00
|
|
|
tried = EINA_TRUE;
|
|
|
|
goto retry;
|
2012-05-30 05:14:34 -07:00
|
|
|
}
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2010-10-13 09:44:15 -07:00
|
|
|
eina_threads_shutdown();
|
|
|
|
|
2011-10-20 22:40:39 -07:00
|
|
|
on_error:
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2010-06-30 06:25:28 -07:00
|
|
|
if (_ecore_thread_count == 0)
|
|
|
|
{
|
2010-10-14 09:45:48 -07:00
|
|
|
_ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
|
|
|
|
worker);
|
|
|
|
|
2011-10-20 22:40:39 -07:00
|
|
|
if (func_cancel) func_cancel((void *)data, NULL);
|
2010-07-20 21:26:57 -07:00
|
|
|
|
|
|
|
if (worker)
|
|
|
|
{
|
2012-02-20 07:57:18 -08:00
|
|
|
CDD(worker->cond);
|
|
|
|
LKD(worker->mutex);
|
2010-07-20 21:26:57 -07:00
|
|
|
free(worker);
|
|
|
|
worker = NULL;
|
|
|
|
}
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2011-10-20 22:40:39 -07:00
|
|
|
return (Ecore_Thread *)worker;
|
2010-06-30 06:25:28 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Eina_Bool
|
2011-10-20 22:40:39 -07:00
|
|
|
ecore_thread_feedback(Ecore_Thread *thread,
|
2018-06-20 14:12:51 -07:00
|
|
|
const void *data)
|
2010-06-30 06:25:28 -07:00
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
|
2010-06-30 06:25:28 -07:00
|
|
|
|
|
|
|
if (!worker) return EINA_FALSE;
|
|
|
|
|
2010-11-23 08:52:18 -08:00
|
|
|
if (!PHE(worker->self, PHS())) return EINA_FALSE;
|
2010-06-30 06:25:28 -07:00
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
if (worker->feedback_run)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Notify *notify;
|
|
|
|
|
|
|
|
notify = malloc(sizeof (Ecore_Pthread_Notify));
|
|
|
|
if (!notify) return EINA_FALSE;
|
|
|
|
|
|
|
|
notify->user_data = data;
|
|
|
|
notify->work = worker;
|
|
|
|
worker->u.feedback_run.send++;
|
|
|
|
|
|
|
|
ecore_main_loop_thread_safe_call_async(_ecore_notify_handler, notify);
|
|
|
|
}
|
|
|
|
else if (worker->message_run)
|
|
|
|
{
|
|
|
|
Ecore_Pthread_Message *msg;
|
|
|
|
Ecore_Pthread_Notify *notify;
|
|
|
|
|
2012-10-02 22:55:32 -07:00
|
|
|
msg = malloc(sizeof (Ecore_Pthread_Message));
|
2012-02-20 07:57:18 -08:00
|
|
|
if (!msg) return EINA_FALSE;
|
|
|
|
msg->data = data;
|
|
|
|
msg->callback = EINA_FALSE;
|
|
|
|
msg->sync = EINA_FALSE;
|
|
|
|
|
|
|
|
notify = malloc(sizeof (Ecore_Pthread_Notify));
|
|
|
|
if (!notify)
|
|
|
|
{
|
|
|
|
free(msg);
|
|
|
|
return EINA_FALSE;
|
|
|
|
}
|
|
|
|
notify->work = worker;
|
|
|
|
notify->user_data = msg;
|
|
|
|
|
|
|
|
worker->u.message_run.from.send++;
|
|
|
|
ecore_main_loop_thread_safe_call_async(_ecore_message_notify_handler, notify);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return EINA_FALSE;
|
2010-06-30 06:25:28 -07:00
|
|
|
|
|
|
|
return EINA_TRUE;
|
|
|
|
}
|
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
#if 0
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Ecore_Thread *
|
2012-02-20 07:57:18 -08:00
|
|
|
ecore_thread_message_run(Ecore_Thread_Cb func_main,
|
2018-06-20 14:12:51 -07:00
|
|
|
Ecore_Thread_Notify_Cb func_notify,
|
|
|
|
Ecore_Thread_Cb func_end,
|
|
|
|
Ecore_Thread_Cb func_cancel,
|
|
|
|
const void *data)
|
2012-02-20 07:57:18 -08:00
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
Ecore_Pthread_Worker *worker;
|
|
|
|
PH(t);
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
if (!func_main) return NULL;
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
worker = _ecore_thread_worker_new();
|
|
|
|
if (!worker) return NULL;
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
worker->u.message_run.func_main = func_main;
|
|
|
|
worker->u.message_run.func_notify = func_notify;
|
|
|
|
worker->u.message_run.direct_worker = _ecore_thread_worker_new();
|
|
|
|
worker->u.message_run.send = ecore_pipe_add(_ecore_nothing_handler, worker);
|
|
|
|
worker->u.message_run.from.send = 0;
|
|
|
|
worker->u.message_run.from.received = 0;
|
|
|
|
worker->u.message_run.to.send = 0;
|
|
|
|
worker->u.message_run.to.received = 0;
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
ecore_pipe_freeze(worker->u.message_run.send);
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
worker->func_cancel = func_cancel;
|
|
|
|
worker->func_end = func_end;
|
|
|
|
worker->hash = NULL;
|
|
|
|
worker->data = data;
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
worker->cancel = EINA_FALSE;
|
|
|
|
worker->message_run = EINA_TRUE;
|
|
|
|
worker->feedback_run = EINA_FALSE;
|
|
|
|
worker->kill = EINA_FALSE;
|
|
|
|
worker->reschedule = EINA_FALSE;
|
|
|
|
worker->no_queue = EINA_FALSE;
|
|
|
|
worker->self = 0;
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
eina_threads_init();
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
if (PHC(t, _ecore_direct_worker, worker))
|
|
|
|
return (Ecore_Thread *)worker;
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
eina_threads_shutdown();
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
if (worker->u.message_run.direct_worker) _ecore_thread_worker_free(worker->u.message_run.direct_worker);
|
|
|
|
if (worker->u.message_run.send) ecore_pipe_del(worker->u.message_run.send);
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
CDD(worker->cond);
|
|
|
|
LKD(worker->mutex);
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
func_cancel((void *)data, NULL);
|
2012-02-20 07:57:18 -08:00
|
|
|
|
2018-06-20 14:12:51 -07:00
|
|
|
return NULL;
|
2012-02-20 07:57:18 -08:00
|
|
|
}
|
2018-06-20 14:12:51 -07:00
|
|
|
|
2012-02-20 07:57:18 -08:00
|
|
|
#endif
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Eina_Bool
|
2011-03-16 06:16:14 -07:00
|
|
|
ecore_thread_reschedule(Ecore_Thread *thread)
|
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
|
2011-03-16 06:16:14 -07:00
|
|
|
|
|
|
|
if (!worker) return EINA_FALSE;
|
|
|
|
|
|
|
|
if (!PHE(worker->self, PHS())) return EINA_FALSE;
|
|
|
|
|
|
|
|
worker->reschedule = EINA_TRUE;
|
|
|
|
return EINA_TRUE;
|
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI int
|
2010-07-20 02:40:18 -07:00
|
|
|
ecore_thread_active_get(void)
|
|
|
|
{
|
2012-03-29 01:52:25 -07:00
|
|
|
EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
|
2010-07-23 06:08:38 -07:00
|
|
|
return _ecore_thread_count;
|
2010-07-20 02:40:18 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI int
|
2010-07-20 02:40:18 -07:00
|
|
|
ecore_thread_pending_get(void)
|
|
|
|
{
|
2011-11-20 06:44:05 -08:00
|
|
|
int ret;
|
|
|
|
|
2012-03-29 01:52:25 -07:00
|
|
|
EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2010-07-21 01:33:25 -07:00
|
|
|
ret = eina_list_count(_ecore_pending_job_threads);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-07-21 01:33:25 -07:00
|
|
|
return ret;
|
2010-07-20 02:40:18 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI int
|
2010-09-22 02:47:55 -07:00
|
|
|
ecore_thread_pending_feedback_get(void)
|
2010-07-20 02:40:18 -07:00
|
|
|
{
|
2011-11-20 06:44:05 -08:00
|
|
|
int ret;
|
|
|
|
|
2012-03-29 01:52:25 -07:00
|
|
|
EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2010-09-22 02:47:55 -07:00
|
|
|
ret = eina_list_count(_ecore_pending_job_threads_feedback);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-07-21 01:33:25 -07:00
|
|
|
return ret;
|
2010-07-20 02:40:18 -07:00
|
|
|
}
|
2010-07-20 18:04:28 -07:00
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI int
|
2010-07-20 21:03:40 -07:00
|
|
|
ecore_thread_pending_total_get(void)
|
|
|
|
{
|
2011-11-20 06:44:05 -08:00
|
|
|
int ret;
|
|
|
|
|
2012-03-29 01:52:25 -07:00
|
|
|
EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2010-09-22 02:47:55 -07:00
|
|
|
ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-07-21 01:33:25 -07:00
|
|
|
return ret;
|
2010-07-20 21:03:40 -07:00
|
|
|
}
|
2010-07-20 18:04:28 -07:00
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI int
|
2010-07-20 18:04:28 -07:00
|
|
|
ecore_thread_max_get(void)
|
|
|
|
{
|
2012-03-29 01:52:25 -07:00
|
|
|
EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
|
2010-07-20 21:26:57 -07:00
|
|
|
return _ecore_thread_count_max;
|
2010-07-20 18:04:28 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI void
|
2010-07-20 18:04:28 -07:00
|
|
|
ecore_thread_max_set(int num)
|
|
|
|
{
|
2012-03-29 01:52:25 -07:00
|
|
|
EINA_MAIN_LOOP_CHECK_RETURN;
|
2010-07-20 18:04:28 -07:00
|
|
|
if (num < 1) return;
|
|
|
|
/* avoid doing something hilarious by blocking dumb users */
|
2017-02-04 20:23:05 -08:00
|
|
|
if (num > (32 * eina_cpu_count())) num = 32 * eina_cpu_count();
|
2010-07-20 18:04:28 -07:00
|
|
|
|
|
|
|
_ecore_thread_count_max = num;
|
|
|
|
}
|
2010-07-20 19:12:10 -07:00
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI void
|
2010-07-20 19:12:10 -07:00
|
|
|
ecore_thread_max_reset(void)
|
|
|
|
{
|
2012-06-06 03:52:45 -07:00
|
|
|
EINA_MAIN_LOOP_CHECK_RETURN;
|
2017-02-04 20:23:05 -08:00
|
|
|
_ecore_thread_count_max = eina_cpu_count() * 4;
|
2010-07-20 19:12:10 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI int
|
2010-07-21 00:09:51 -07:00
|
|
|
ecore_thread_available_get(void)
|
2010-07-20 19:12:10 -07:00
|
|
|
{
|
2011-11-20 06:44:05 -08:00
|
|
|
int ret;
|
|
|
|
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKL(_ecore_pending_job_threads_mutex);
|
2010-07-21 01:33:25 -07:00
|
|
|
ret = _ecore_thread_count_max - _ecore_thread_count;
|
2013-10-10 02:02:00 -07:00
|
|
|
SLKU(_ecore_pending_job_threads_mutex);
|
2010-07-21 01:33:25 -07:00
|
|
|
return ret;
|
2010-07-20 19:12:10 -07:00
|
|
|
}
|
2010-07-22 13:28:34 -07:00
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Eina_Bool
|
2011-10-20 22:40:39 -07:00
|
|
|
ecore_thread_local_data_add(Ecore_Thread *thread,
|
2018-06-20 14:12:51 -07:00
|
|
|
const char *key,
|
|
|
|
void *value,
|
|
|
|
Eina_Free_Cb cb,
|
|
|
|
Eina_Bool direct)
|
2010-07-22 13:28:34 -07:00
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
|
2010-07-30 01:52:18 -07:00
|
|
|
Ecore_Thread_Data *d;
|
2010-07-23 10:30:21 -07:00
|
|
|
Eina_Bool ret;
|
|
|
|
|
2010-07-22 13:28:34 -07:00
|
|
|
if ((!thread) || (!key) || (!value))
|
|
|
|
return EINA_FALSE;
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2017-09-28 18:17:57 -07:00
|
|
|
LKL(worker->mutex);
|
2010-07-23 06:08:38 -07:00
|
|
|
if (!worker->hash)
|
2010-07-30 01:52:18 -07:00
|
|
|
worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
|
2017-09-28 18:17:57 -07:00
|
|
|
LKU(worker->mutex);
|
2010-07-22 13:28:34 -07:00
|
|
|
|
2010-07-23 06:08:38 -07:00
|
|
|
if (!worker->hash)
|
2010-07-22 13:28:34 -07:00
|
|
|
return EINA_FALSE;
|
2010-07-30 01:52:18 -07:00
|
|
|
|
|
|
|
if (!(d = malloc(sizeof(Ecore_Thread_Data))))
|
|
|
|
return EINA_FALSE;
|
|
|
|
|
|
|
|
d->data = value;
|
|
|
|
d->cb = cb;
|
|
|
|
|
2017-09-28 18:17:57 -07:00
|
|
|
LKL(worker->mutex);
|
2010-07-23 10:30:21 -07:00
|
|
|
if (direct)
|
2010-07-30 01:52:18 -07:00
|
|
|
ret = eina_hash_direct_add(worker->hash, key, d);
|
2010-07-23 10:30:21 -07:00
|
|
|
else
|
2010-07-30 01:52:18 -07:00
|
|
|
ret = eina_hash_add(worker->hash, key, d);
|
2017-09-28 18:17:57 -07:00
|
|
|
LKU(worker->mutex);
|
2010-11-23 08:52:18 -08:00
|
|
|
CDB(worker->cond);
|
2010-07-23 10:30:21 -07:00
|
|
|
return ret;
|
2010-07-22 13:28:34 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI void *
|
2011-10-20 22:40:39 -07:00
|
|
|
ecore_thread_local_data_set(Ecore_Thread *thread,
|
2018-06-20 14:12:51 -07:00
|
|
|
const char *key,
|
|
|
|
void *value,
|
|
|
|
Eina_Free_Cb cb)
|
2010-07-22 21:17:41 -07:00
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
|
2010-07-30 08:56:18 -07:00
|
|
|
Ecore_Thread_Data *d, *r;
|
|
|
|
void *ret;
|
2011-11-20 06:44:05 -08:00
|
|
|
|
2010-07-22 21:17:41 -07:00
|
|
|
if ((!thread) || (!key) || (!value))
|
|
|
|
return NULL;
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2017-09-28 18:17:57 -07:00
|
|
|
LKL(worker->mutex);
|
2010-07-23 06:08:38 -07:00
|
|
|
if (!worker->hash)
|
2010-07-30 01:52:18 -07:00
|
|
|
worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
|
2017-09-28 18:17:57 -07:00
|
|
|
LKU(worker->mutex);
|
2010-07-22 21:17:41 -07:00
|
|
|
|
2010-07-23 06:08:38 -07:00
|
|
|
if (!worker->hash)
|
2010-07-22 21:17:41 -07:00
|
|
|
return NULL;
|
|
|
|
|
2010-07-30 01:52:18 -07:00
|
|
|
if (!(d = malloc(sizeof(Ecore_Thread_Data))))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
d->data = value;
|
|
|
|
d->cb = cb;
|
|
|
|
|
2017-09-28 18:17:57 -07:00
|
|
|
LKL(worker->mutex);
|
2010-07-30 08:56:18 -07:00
|
|
|
r = eina_hash_set(worker->hash, key, d);
|
2017-09-28 18:17:57 -07:00
|
|
|
LKU(worker->mutex);
|
2010-11-23 08:52:18 -08:00
|
|
|
CDB(worker->cond);
|
2017-09-28 18:17:57 -07:00
|
|
|
|
2013-09-02 20:44:27 -07:00
|
|
|
if (r)
|
|
|
|
{
|
2018-06-20 14:12:51 -07:00
|
|
|
ret = r->data;
|
|
|
|
free(r);
|
|
|
|
return ret;
|
2013-09-02 20:44:27 -07:00
|
|
|
}
|
|
|
|
return NULL;
|
2010-07-22 21:17:41 -07:00
|
|
|
}
|
2010-07-22 20:39:52 -07:00
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI void *
|
2011-10-20 22:40:39 -07:00
|
|
|
ecore_thread_local_data_find(Ecore_Thread *thread,
|
2018-06-20 14:12:51 -07:00
|
|
|
const char *key)
|
2010-07-22 13:28:34 -07:00
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
|
2010-07-30 01:52:18 -07:00
|
|
|
Ecore_Thread_Data *d;
|
|
|
|
|
2010-07-22 13:28:34 -07:00
|
|
|
if ((!thread) || (!key))
|
|
|
|
return NULL;
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2010-07-23 06:08:38 -07:00
|
|
|
if (!worker->hash)
|
2010-07-22 13:28:34 -07:00
|
|
|
return NULL;
|
|
|
|
|
2017-09-28 18:17:57 -07:00
|
|
|
LKL(worker->mutex);
|
2010-07-30 01:52:18 -07:00
|
|
|
d = eina_hash_find(worker->hash, key);
|
2017-09-28 18:17:57 -07:00
|
|
|
LKU(worker->mutex);
|
2011-07-12 06:36:19 -07:00
|
|
|
if (d)
|
|
|
|
return d->data;
|
|
|
|
return NULL;
|
2010-07-22 13:28:34 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Eina_Bool
|
2011-10-20 22:40:39 -07:00
|
|
|
ecore_thread_local_data_del(Ecore_Thread *thread,
|
2018-06-20 14:12:51 -07:00
|
|
|
const char *key)
|
2010-07-22 13:28:34 -07:00
|
|
|
{
|
2011-10-20 22:40:39 -07:00
|
|
|
Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
|
2017-09-28 18:17:57 -07:00
|
|
|
Eina_Bool r;
|
2011-11-20 06:44:05 -08:00
|
|
|
|
2010-07-22 13:28:34 -07:00
|
|
|
if ((!thread) || (!key))
|
|
|
|
return EINA_FALSE;
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2010-07-23 06:08:38 -07:00
|
|
|
if (!worker->hash)
|
2010-07-22 13:28:34 -07:00
|
|
|
return EINA_FALSE;
|
2017-09-28 18:17:57 -07:00
|
|
|
|
|
|
|
LKL(worker->mutex);
|
|
|
|
r = eina_hash_del_by_key(worker->hash, key);
|
|
|
|
LKU(worker->mutex);
|
|
|
|
return r;
|
2010-07-22 13:28:34 -07:00
|
|
|
}
|
2010-07-23 06:12:42 -07:00
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Eina_Bool
|
2018-06-20 14:12:51 -07:00
|
|
|
ecore_thread_global_data_add(const char *key,
|
|
|
|
void *value,
|
2011-10-20 22:40:39 -07:00
|
|
|
Eina_Free_Cb cb,
|
2018-06-20 14:12:51 -07:00
|
|
|
Eina_Bool direct)
|
2010-07-23 08:33:22 -07:00
|
|
|
{
|
2010-07-30 01:52:18 -07:00
|
|
|
Ecore_Thread_Data *d;
|
2011-11-20 06:44:05 -08:00
|
|
|
Eina_Bool ret;
|
2010-07-30 01:52:18 -07:00
|
|
|
|
2010-07-23 08:33:22 -07:00
|
|
|
if ((!key) || (!value))
|
|
|
|
return EINA_FALSE;
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKWL(_ecore_thread_global_hash_lock);
|
2010-07-23 08:33:22 -07:00
|
|
|
if (!_ecore_thread_global_hash)
|
2010-07-30 01:52:18 -07:00
|
|
|
_ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKU(_ecore_thread_global_hash_lock);
|
2010-07-23 08:33:22 -07:00
|
|
|
|
2010-07-30 01:52:18 -07:00
|
|
|
if (!(d = malloc(sizeof(Ecore_Thread_Data))))
|
|
|
|
return EINA_FALSE;
|
|
|
|
|
|
|
|
d->data = value;
|
|
|
|
d->cb = cb;
|
|
|
|
|
2010-07-23 08:33:22 -07:00
|
|
|
if (!_ecore_thread_global_hash)
|
2013-07-08 04:54:42 -07:00
|
|
|
{
|
|
|
|
free(d);
|
|
|
|
return EINA_FALSE;
|
|
|
|
}
|
|
|
|
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKWL(_ecore_thread_global_hash_lock);
|
2010-07-23 08:33:22 -07:00
|
|
|
if (direct)
|
2010-07-30 01:52:18 -07:00
|
|
|
ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
|
2010-07-23 08:33:22 -07:00
|
|
|
else
|
2010-07-30 01:52:18 -07:00
|
|
|
ret = eina_hash_add(_ecore_thread_global_hash, key, d);
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKU(_ecore_thread_global_hash_lock);
|
|
|
|
CDB(_ecore_thread_global_hash_cond);
|
2010-07-23 08:33:22 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI void *
|
2018-06-20 14:12:51 -07:00
|
|
|
ecore_thread_global_data_set(const char *key,
|
|
|
|
void *value,
|
2011-10-20 22:40:39 -07:00
|
|
|
Eina_Free_Cb cb)
|
2010-07-23 08:33:22 -07:00
|
|
|
{
|
2010-07-30 08:56:18 -07:00
|
|
|
Ecore_Thread_Data *d, *r;
|
|
|
|
void *ret;
|
2010-07-30 01:52:18 -07:00
|
|
|
|
2010-07-23 08:33:22 -07:00
|
|
|
if ((!key) || (!value))
|
|
|
|
return NULL;
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKWL(_ecore_thread_global_hash_lock);
|
2010-07-23 08:33:22 -07:00
|
|
|
if (!_ecore_thread_global_hash)
|
2010-07-30 01:52:18 -07:00
|
|
|
_ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKU(_ecore_thread_global_hash_lock);
|
2010-07-23 08:33:22 -07:00
|
|
|
|
|
|
|
if (!_ecore_thread_global_hash)
|
|
|
|
return NULL;
|
|
|
|
|
2010-07-30 01:52:18 -07:00
|
|
|
if (!(d = malloc(sizeof(Ecore_Thread_Data))))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
d->data = value;
|
|
|
|
d->cb = cb;
|
|
|
|
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKWL(_ecore_thread_global_hash_lock);
|
2010-07-30 08:56:18 -07:00
|
|
|
r = eina_hash_set(_ecore_thread_global_hash, key, d);
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKU(_ecore_thread_global_hash_lock);
|
|
|
|
CDB(_ecore_thread_global_hash_cond);
|
2010-07-30 01:52:18 -07:00
|
|
|
|
2013-09-02 20:44:27 -07:00
|
|
|
if (r)
|
|
|
|
{
|
|
|
|
ret = r->data;
|
|
|
|
free(r);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return NULL;
|
2010-07-23 08:33:22 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI void *
|
2010-07-23 08:33:22 -07:00
|
|
|
ecore_thread_global_data_find(const char *key)
|
|
|
|
{
|
2010-07-30 01:52:18 -07:00
|
|
|
Ecore_Thread_Data *ret;
|
2011-11-20 06:44:05 -08:00
|
|
|
|
2010-07-23 08:33:22 -07:00
|
|
|
if (!key)
|
|
|
|
return NULL;
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2010-07-23 08:33:22 -07:00
|
|
|
if (!_ecore_thread_global_hash) return NULL;
|
|
|
|
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKRL(_ecore_thread_global_hash_lock);
|
2010-07-23 08:33:22 -07:00
|
|
|
ret = eina_hash_find(_ecore_thread_global_hash, key);
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKU(_ecore_thread_global_hash_lock);
|
2011-07-12 06:36:19 -07:00
|
|
|
if (ret)
|
|
|
|
return ret->data;
|
|
|
|
return NULL;
|
2010-07-23 08:33:22 -07:00
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI Eina_Bool
|
2010-07-23 08:33:22 -07:00
|
|
|
ecore_thread_global_data_del(const char *key)
|
|
|
|
{
|
|
|
|
Eina_Bool ret;
|
2010-07-30 01:52:18 -07:00
|
|
|
|
2010-07-23 08:33:22 -07:00
|
|
|
if (!key)
|
|
|
|
return EINA_FALSE;
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2010-07-23 08:33:22 -07:00
|
|
|
if (!_ecore_thread_global_hash)
|
|
|
|
return EINA_FALSE;
|
|
|
|
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKWL(_ecore_thread_global_hash_lock);
|
2010-07-23 08:33:22 -07:00
|
|
|
ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKU(_ecore_thread_global_hash_lock);
|
2010-07-23 08:33:22 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-05-26 07:44:41 -07:00
|
|
|
EAPI void *
|
2011-10-20 22:40:39 -07:00
|
|
|
ecore_thread_global_data_wait(const char *key,
|
2018-06-20 14:12:51 -07:00
|
|
|
double seconds)
|
2010-07-23 09:24:35 -07:00
|
|
|
{
|
2011-05-26 19:06:26 -07:00
|
|
|
double tm = 0;
|
2010-07-30 01:52:18 -07:00
|
|
|
Ecore_Thread_Data *ret = NULL;
|
2011-05-26 19:06:26 -07:00
|
|
|
|
2010-07-23 09:24:35 -07:00
|
|
|
if (!key)
|
|
|
|
return NULL;
|
2012-12-31 08:14:40 -08:00
|
|
|
|
2010-07-23 09:24:35 -07:00
|
|
|
if (seconds > 0)
|
2011-05-26 19:06:26 -07:00
|
|
|
tm = ecore_time_get() + seconds;
|
2010-07-23 09:24:35 -07:00
|
|
|
|
|
|
|
while (1)
|
|
|
|
{
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKRL(_ecore_thread_global_hash_lock);
|
2014-05-15 05:23:51 -07:00
|
|
|
if (_ecore_thread_global_hash)
|
|
|
|
ret = eina_hash_find(_ecore_thread_global_hash, key);
|
2010-11-23 08:52:18 -08:00
|
|
|
LRWKU(_ecore_thread_global_hash_lock);
|
2016-12-19 16:32:20 -08:00
|
|
|
if ((ret) ||
|
2017-03-21 11:11:19 -07:00
|
|
|
(!EINA_DBL_EQ(seconds, 0.0)) ||
|
2016-12-19 16:32:20 -08:00
|
|
|
((seconds > 0) && (tm <= ecore_time_get())))
|
2010-07-23 09:24:35 -07:00
|
|
|
break;
|
2010-11-23 08:52:18 -08:00
|
|
|
LKL(_ecore_thread_global_hash_mutex);
|
2015-10-14 10:57:53 -07:00
|
|
|
CDW(_ecore_thread_global_hash_cond, tm - ecore_time_get());
|
2010-11-23 08:52:18 -08:00
|
|
|
LKU(_ecore_thread_global_hash_mutex);
|
2010-07-23 09:24:35 -07:00
|
|
|
}
|
2010-07-30 01:52:18 -07:00
|
|
|
if (ret) return ret->data;
|
|
|
|
return NULL;
|
2010-07-23 09:24:35 -07:00
|
|
|
}
|
2018-06-20 14:12:51 -07:00
|
|
|
|