fix ecore-thread scheduler starvation issue.

SVN revision: 71404
This commit is contained in:
Carsten Haitzler 2012-05-24 09:51:17 +00:00
parent 01e40c8253
commit 4bf005eede
3 changed files with 73 additions and 77 deletions

View File

@ -671,4 +671,7 @@
accessind already deleted ecore-con clients. use client
ref/unref to fix it. No backport of this fix as it requires a
new feature.
* Fix ecore-thread scheduling issue where re-scheduled threads
will hold a loop busy and not allow feedback workers to run,
so now have fairer scheduling.
* Allow 16 * cpu num for worker threads (default still cpu num)

View File

@ -2120,7 +2120,7 @@ EAPI int ecore_thread_max_get(void);
* @param num The new maximum
*
* This sets a new value for the maximum number of concurrently running
* Ecore_Thread's. It @b must an integer between 1 and (2 * @c x), where @c x
* Ecore_Thread's. It @b must an integer between 1 and (16 * @c x), where @c x
* is the number for CPUs available.
*
* @see ecore_thread_max_get()

View File

@ -1,3 +1,4 @@
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
@ -398,44 +399,40 @@ static void
_ecore_short_job(PH(thread))
{
Ecore_Pthread_Worker *work;
int cancel;
while (_ecore_pending_job_threads)
LKL(_ecore_pending_job_threads_mutex);
if (!_ecore_pending_job_threads)
{
int cancel;
LKL(_ecore_pending_job_threads_mutex);
if (!_ecore_pending_job_threads)
{
LKU(_ecore_pending_job_threads_mutex);
break;
}
work = eina_list_data_get(_ecore_pending_job_threads);
_ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
_ecore_pending_job_threads);
LKU(_ecore_pending_job_threads_mutex);
LKL(work->cancel_mutex);
cancel = work->cancel;
LKU(work->cancel_mutex);
work->self = thread;
if (!cancel)
work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
if (work->reschedule)
{
work->reschedule = EINA_FALSE;
LKL(_ecore_pending_job_threads_mutex);
_ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
LKU(_ecore_pending_job_threads_mutex);
}
else
{
ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
}
return;
}
work = eina_list_data_get(_ecore_pending_job_threads);
_ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
_ecore_pending_job_threads);
LKU(_ecore_pending_job_threads_mutex);
LKL(work->cancel_mutex);
cancel = work->cancel;
LKU(work->cancel_mutex);
work->self = thread;
if (!cancel)
work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
if (work->reschedule)
{
work->reschedule = EINA_FALSE;
LKL(_ecore_pending_job_threads_mutex);
_ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
LKU(_ecore_pending_job_threads_mutex);
}
else
{
ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
}
}
@ -443,44 +440,40 @@ static void
_ecore_feedback_job(PH(thread))
{
Ecore_Pthread_Worker *work;
while (_ecore_pending_job_threads_feedback)
int cancel;
LKL(_ecore_pending_job_threads_mutex);
if (!_ecore_pending_job_threads_feedback)
{
int cancel;
LKL(_ecore_pending_job_threads_mutex);
if (!_ecore_pending_job_threads_feedback)
{
LKU(_ecore_pending_job_threads_mutex);
break;
}
work = eina_list_data_get(_ecore_pending_job_threads_feedback);
_ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
_ecore_pending_job_threads_feedback);
LKU(_ecore_pending_job_threads_mutex);
LKL(work->cancel_mutex);
cancel = work->cancel;
LKU(work->cancel_mutex);
work->self = thread;
if (!cancel)
work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
if (work->reschedule)
{
work->reschedule = EINA_FALSE;
LKL(_ecore_pending_job_threads_mutex);
_ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
LKU(_ecore_pending_job_threads_mutex);
}
else
{
ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
}
return;
}
work = eina_list_data_get(_ecore_pending_job_threads_feedback);
_ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
_ecore_pending_job_threads_feedback);
LKU(_ecore_pending_job_threads_mutex);
LKL(work->cancel_mutex);
cancel = work->cancel;
LKU(work->cancel_mutex);
work->self = thread;
if (!cancel)
work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
if (work->reschedule)
{
work->reschedule = EINA_FALSE;
LKL(_ecore_pending_job_threads_mutex);
_ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
LKU(_ecore_pending_job_threads_mutex);
}
else
{
ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
}
}
@ -519,8 +512,8 @@ _ecore_thread_worker(void *data __UNUSED__)
eina_sched_prio_drop();
restart:
if (_ecore_pending_job_threads) _ecore_short_job(PHS());
if (_ecore_pending_job_threads_feedback) _ecore_feedback_job(PHS());
_ecore_short_job(PHS());
_ecore_feedback_job(PHS());
/* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
@ -536,7 +529,7 @@ restart:
#ifdef _WIN32
Sleep(1); /* around 50ms */
#else
usleep(200);
usleep(50);
#endif
LKL(_ecore_pending_job_threads_mutex);
@ -1188,7 +1181,7 @@ ecore_thread_max_set(int num)
EINA_MAIN_LOOP_CHECK_RETURN;
if (num < 1) return;
/* avoid doing something hilarious by blocking dumb users */
if (num >= (2 * eina_cpu_count())) return;
if (num >= (16 * eina_cpu_count())) num = 16 * eina_cpu_count();
_ecore_thread_count_max = num;
}