* evas: Complete rewrite of evas image preload code.

This code should be cleaner and easier to understand. It also provides
	the ability to spread image decompression on all CPU core. I currently
	set it to the exact number of CPU core you have in your machine, if you
	find case where it slow down your EFL apps too much, we can reduce this
	to give at least one core to evas.
	   All previous bugs related with async preload are gone, hopefully no
	new one are in. Please report any problem with backtrace to me.


SVN revision: 44537
This commit is contained in:
Cedric BAIL 2009-12-18 11:03:31 +00:00
parent fe0f51ab21
commit b0ffd95878
8 changed files with 514 additions and 439 deletions

View File

@ -15,7 +15,8 @@ AM_CPPFLAGS = -I. \
noinst_LTLIBRARIES = libevas_cache.la
libevas_cache_la_SOURCES = \
evas_cache_image.c \
evas_cache_engine_image.c
evas_cache_engine_image.c \
evas_preload.c
libevas_cache_la_LIBAD = @EVIL_LIBS@

View File

@ -46,6 +46,9 @@ struct _Evas_Cache_Image
{
Evas_Cache_Image_Func func;
Eina_List *preload;
Eina_List *pending;
Eina_Inlist *dirty;
Eina_Inlist *lru;
@ -111,7 +114,6 @@ extern "C" {
EAPI Evas_Cache_Image* evas_cache_image_init(const Evas_Cache_Image_Func *cb);
EAPI void evas_cache_image_shutdown(Evas_Cache_Image *cache);
EAPI Image_Entry* evas_cache_image_request(Evas_Cache_Image *cache, const char *file, const char *key, RGBA_Image_Loadopts *lo, int *error);
EAPI void evas_cache_pending_process(void);
EAPI void evas_cache_image_drop(Image_Entry *im);
EAPI void evas_cache_image_data_not_needed(Image_Entry *im);
EAPI int evas_cache_image_flush(Evas_Cache_Image *cache);
@ -157,6 +159,8 @@ EAPI void evas_cache_engine_image_load_data(Engine_Image_Ent
EAPI void evas_cache_image_preload_data(Image_Entry *im, const void *target);
EAPI void evas_cache_image_preload_cancel(Image_Entry *im, const void *target);
EAPI void evas_cache_image_wakeup(void);
#ifdef __cplusplus
}
#endif

View File

@ -24,8 +24,6 @@
#endif
#ifdef BUILD_ASYNC_PRELOAD
#include <pthread.h>
typedef struct _Evas_Cache_Preload Evas_Cache_Preload;
struct _Evas_Cache_Preload
@ -34,23 +32,12 @@ struct _Evas_Cache_Preload
Image_Entry *ie;
};
static Eina_Inlist *preload = NULL;
static Image_Entry *current = NULL;
static Eina_List *pending = NULL;
static LK(engine_lock) = PTHREAD_MUTEX_INITIALIZER;
static LK(wakeup) = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond_done = PTHREAD_COND_INITIALIZER;
static pthread_cond_t cond_new = PTHREAD_COND_INITIALIZER;
static pthread_mutex_t mutex_new = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t mutex_pending = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t mutex_surface_alloc = PTHREAD_MUTEX_INITIALIZER;
static pthread_t tid = 0;
static pthread_cond_t cond_wakeup = PTHREAD_COND_INITIALIZER;
static Eina_Bool running = EINA_FALSE;
static void *_evas_cache_background_load(void *);
static void _evas_cache_image_entry_clear_preloaders(Image_Entry *ie);
static int _evas_cache_image_entry_preload_remove(Image_Entry *ie, const void *target);
static void _evas_cache_image_entry_preload_remove(Image_Entry *ie, const void *target);
#endif
#define FREESTRC(Var) \
@ -84,16 +71,7 @@ _evas_cache_image_make_activ(Evas_Cache_Image *cache,
Image_Entry *im,
const char *key)
{
#ifdef BUILD_ASYNC_PRELOAD
pthread_mutex_lock(&mutex);
if (im->flags.pending)
{
im->flags.pending = 0;
pending = eina_list_remove(pending, im);
}
pthread_mutex_unlock(&mutex);
#endif
/* FIXME: Handle case when image is being processed anyway and don't do a double decode. */
im->cache_key = key;
if (key != NULL)
{
@ -189,8 +167,17 @@ _evas_cache_image_entry_delete(Evas_Cache_Image *cache, Image_Entry *ie)
if (cache->func.debug)
cache->func.debug("deleting", ie);
if (ie->flags.delete_me == 1)
return ;
#ifdef BUILD_ASYNC_PRELOAD
_evas_cache_image_entry_preload_remove(ie, NULL);
if (ie->preload)
{
ie->flags.delete_me = 1;
_evas_cache_image_entry_preload_remove(ie, NULL);
return ;
}
#endif
cache->func.destructor(ie);
@ -268,6 +255,8 @@ _evas_cache_image_entry_new(Evas_Cache_Image *cache,
#ifdef BUILD_ASYNC_PRELOAD
LKI(ie->lock);
ie->targets = NULL;
ie->preload = NULL;
ie->flags.delete_me = 0;
#endif
if (lo)
@ -322,52 +311,97 @@ _evas_cache_image_entry_surface_alloc(Evas_Cache_Image *cache,
hmin = h > 0 ? h : 1;
#ifdef BUILD_ASYNC_PRELOAD
pthread_mutex_lock(&mutex_surface_alloc);
LKL(engine_lock);
#endif
_evas_cache_image_entry_surface_alloc__locked(cache, ie, wmin, hmin);
#ifdef BUILD_ASYNC_PRELOAD
pthread_mutex_unlock(&mutex_surface_alloc);
LKU(engine_lock);
#endif
}
#ifdef BUILD_ASYNC_PRELOAD
static void
_evas_cache_image_async_call_process(void *obj, Evas_Callback_Type type, void *data)
_evas_cache_image_async_heavy(void *data)
{
Image_Entry *ie = (Image_Entry *) obj;
Evas_Cache_Image *cache;
Image_Entry *current;
int error;
int pchannel;
ie->flags.in_pipe = 0;
current = data;
LKL(current->lock);
pchannel = current->channel;
current->channel++;
cache = current->cache;
if (!current->flags.loaded)
{
error = cache->func.load(current);
if (cache->func.debug)
cache->func.debug("load", current);
if (error)
{
current->flags.loaded = 0;
_evas_cache_image_entry_surface_alloc(cache, current,
current->w, current->h);
}
else
current->flags.loaded = 1;
}
current->channel = pchannel;
LKU(current->lock);
}
static void
_evas_cache_image_async_end(void *data)
{
Image_Entry *ie = (Image_Entry *) data;
Evas_Cache_Target *tmp;
ie->cache->preload = eina_list_remove(ie->cache->preload, ie);
ie->cache->pending = eina_list_remove(ie->cache->pending, ie);
ie->preload = NULL;
ie->flags.preload_done = 1;
while (ie->targets)
{
Evas_Cache_Target *tmp = ie->targets;
tmp = ie->targets;
evas_object_event_callback_call((Evas_Object*) tmp->target, EVAS_CALLBACK_IMAGE_PRELOADED, NULL);
evas_object_inform_call_image_preloaded((Evas_Object*) tmp->target);
ie->targets = (Evas_Cache_Target*) eina_inlist_remove(EINA_INLIST_GET(ie->targets), EINA_INLIST_GET(ie->targets));
free(tmp);
}
}
static void
_evas_cache_image_entry_clear_preloaders(Image_Entry *ie)
_evas_cache_image_async_cancel(void *data)
{
ie->flags.preload = 0;
ie->flags.in_pipe = 0;
evas_async_target_del(ie);
}
Image_Entry *ie = (Image_Entry *) data;
static void
_evas_cache_image_async_call__locked(Image_Entry *im)
{
evas_async_events_put(im, EVAS_CALLBACK_IMAGE_PRELOADED, NULL,
_evas_cache_image_async_call_process);
}
ie->preload = NULL;
ie->cache->pending = eina_list_remove(ie->cache->pending, ie);
static void
_evas_cache_image_async_call(Image_Entry *im)
{
pthread_mutex_lock(&mutex);
_evas_cache_image_async_call__locked(im);
pthread_mutex_unlock(&mutex);
if (ie->flags.delete_me || ie->flags.dirty)
{
ie->flags.delete_me = 0;
_evas_cache_image_entry_delete(ie->cache, ie);
return ;
}
if (ie->flags.loaded)
{
_evas_cache_image_async_end(ie);
}
if (ie->references == 0)
{
_evas_cache_image_remove_activ(ie->cache, ie);
_evas_cache_image_make_inactiv(ie->cache, ie, ie->cache_key);
evas_cache_image_flush(ie->cache);
}
}
static int
@ -375,66 +409,32 @@ _evas_cache_image_entry_preload_add(Image_Entry *ie,
const void *target)
{
Evas_Cache_Target *tg;
int ret = 0;
pthread_mutex_lock(&mutex);
if (ie->flags.preload_done) return 0;
if (!ie->flags.loaded)
{
tg = malloc(sizeof (Evas_Cache_Target));
if (!tg) goto end;
tg = malloc(sizeof (Evas_Cache_Target));
if (!tg) return 0;
tg->target = target;
ie->targets = (Evas_Cache_Target*) eina_inlist_append(EINA_INLIST_GET(ie->targets), EINA_INLIST_GET(tg));
tg->target = target;
if (!ie->flags.preload)
{
Evas_Cache_Preload *tmp;
ie->targets = (Evas_Cache_Target*) eina_inlist_append(EINA_INLIST_GET(ie->targets), EINA_INLIST_GET(tg));
tmp = malloc(sizeof (Evas_Cache_Preload));
if (!tmp) goto end;
if (!ie->preload) {
ie->cache->preload = eina_list_append(ie->cache->preload, ie);
ie->flags.pending = 0;
tmp->ie = ie;
preload = eina_inlist_append(preload, EINA_INLIST_GET(tmp));
ie->preload = evas_preload_thread_run(_evas_cache_image_async_heavy,
_evas_cache_image_async_end,
_evas_cache_image_async_cancel,
ie);
}
ie->flags.preload = 1;
if (ie->flags.pending)
{
ie->flags.pending = 0;
pending = eina_list_remove(pending, ie);
}
if (!running)
{
if (tid)
{
running = EINA_TRUE;
pthread_cond_signal(&cond_new);
}
else
{
if (pthread_create(&tid, NULL, _evas_cache_background_load, NULL) == 0)
running = EINA_TRUE;
}
}
ret = 2;
}
ret = 1;
}
end:
pthread_mutex_unlock(&mutex);
return ret;
return 1;
}
static int
static void
_evas_cache_image_entry_preload_remove(Image_Entry *ie, const void *target)
{
int ret = 0;
pthread_mutex_lock(&mutex);
if (target)
{
Evas_Cache_Target *tg;
@ -450,140 +450,28 @@ _evas_cache_image_entry_preload_remove(Image_Entry *ie, const void *target)
}
}
}
if (ie->flags.in_pipe)
else
{
if (!ie->targets)
_evas_cache_image_entry_clear_preloaders(ie);
}
else if (ie->flags.preload)
{
if (current == ie)
Evas_Cache_Target *tg;
while (ie->targets)
{
// dont wait. simply handle "ie->flags.preload" nicely
// /* Wait until ie is processed. */
// pthread_cond_wait(&cond_done, &mutex);
}
else
{
Evas_Cache_Preload *l;
tg = ie->targets;
EINA_INLIST_FOREACH(preload, l)
{
if (l->ie == ie)
{
if (target)
{
// FIXME: No callback when we cancel only for one target ?
if (!ie->targets)
_evas_cache_image_entry_clear_preloaders(ie);
}
else
{
_evas_cache_image_async_call__locked(ie);
}
if (!ie->targets)
{
ie->flags.preload = 0;
preload = eina_inlist_remove(preload,
EINA_INLIST_GET(l));
free(l);
}
break;
}
}
ret = 1;
ie->targets = (Evas_Cache_Target*) eina_inlist_remove(EINA_INLIST_GET(ie->targets), EINA_INLIST_GET(tg));
free(tg);
}
}
pthread_mutex_unlock(&mutex);
return ret;
}
static void*
_evas_cache_background_load(void *data)
{
(void) data;
restart:
while (preload)
if (ie->targets == NULL && ie->preload && !ie->flags.pending)
{
pthread_mutex_lock(&mutex);
if (preload)
{
Evas_Cache_Preload *tmp = (Evas_Cache_Preload*) preload;
current = tmp->ie;
preload = eina_inlist_remove(preload, preload);
ie->cache->preload = eina_list_remove(ie->cache->preload, ie);
ie->cache->pending = eina_list_append(ie->cache->pending, ie);
free(tmp);
}
ie->flags.pending = 1;
pthread_mutex_unlock(&mutex);
if (current)
{
Evas_Cache_Image *cache;
int error;
int pchannel;
LKL(current->lock);
pchannel = current->channel;
current->channel++;
cache = current->cache;
if (!current->flags.loaded)
{
error = cache->func.load(current);
if (cache->func.debug)
cache->func.debug("load", current);
if (error)
{
_evas_cache_image_entry_surface_alloc
(cache, current, current->w, current->h);
current->flags.loaded = 0;
}
else
current->flags.loaded = 1;
}
pthread_mutex_lock(&mutex);
pthread_mutex_lock(&mutex_pending);
current->flags.preload = 0;
current->flags.in_pipe = 1;
current->channel = pchannel;
LKU(current->lock);
pthread_mutex_unlock(&mutex_pending);
pthread_mutex_unlock(&mutex);
_evas_cache_image_async_call(current);
pthread_mutex_lock(&mutex);
current = NULL;
pthread_mutex_unlock(&mutex);
}
pthread_cond_signal(&cond_done);
evas_preload_thread_cancel(ie->preload);
}
pthread_mutex_lock(&mutex);
if (preload)
{
pthread_mutex_unlock(&mutex);
goto restart;
}
running = EINA_FALSE;
pthread_mutex_unlock(&mutex);
pthread_mutex_lock(&mutex_new);
pthread_cond_wait(&cond_new, &mutex_new);
pthread_mutex_unlock(&mutex_new);
goto restart;
return NULL;
}
#endif
@ -634,6 +522,9 @@ evas_cache_image_init(const Evas_Cache_Image_Func *cb)
new->references = 1;
new->preload = NULL;
new->pending = NULL;
return new;
}
@ -650,8 +541,8 @@ _evas_cache_image_free_cb(__UNUSED__ const Eina_Hash *hash, __UNUSED__ const voi
EAPI void
evas_cache_image_shutdown(Evas_Cache_Image *cache)
{
Image_Entry *im;
Eina_List *delete_list;
Image_Entry *im;
assert(cache != NULL);
cache->references--;
@ -660,27 +551,14 @@ evas_cache_image_shutdown(Evas_Cache_Image *cache)
return ;
#ifdef BUILD_ASYNC_PRELOAD
pthread_mutex_lock(&mutex);
Eina_Inlist *l, *l_next;
for (l = preload; l != NULL; l = l_next)
EINA_LIST_FREE(cache->preload, im)
{
Evas_Cache_Preload *tmp = (Evas_Cache_Preload *)l;
Image_Entry *ie = tmp->ie;
l_next = l->next;
if (ie->cache != cache)
continue;
preload = eina_inlist_remove(preload, l);
_evas_cache_image_entry_clear_preloaders(ie);
free(l);
/* By doing that we are protecting us from destroying image when the cache is no longuer available. */
im->flags.delete_me = 1;
_evas_cache_image_entry_preload_remove(im, NULL);
}
if (current && current->cache == cache)
_evas_cache_image_entry_clear_preloaders(current);
pthread_mutex_unlock(&mutex);
evas_async_events_process();
#endif
while (cache->lru)
@ -711,6 +589,19 @@ evas_cache_image_shutdown(Evas_Cache_Image *cache)
delete_list = eina_list_remove_list(delete_list, delete_list);
}
#ifdef BUILD_ASYNC_PRELOAD
/* Now wait for all pending image to die */
while (cache->pending)
{
evas_async_events_process();
LKL(wakeup);
if (cache->pending)
pthread_cond_wait(&cond_wakeup, &wakeup);
LKU(wakeup);
}
#endif
eina_hash_free(cache->activ);
eina_hash_free(cache->inactiv);
@ -891,43 +782,6 @@ evas_cache_image_request(Evas_Cache_Image *cache, const char *file, const char *
return NULL;
}
EAPI void
evas_cache_pending_process(void)
{
Image_Entry *im;
#ifdef BUILD_ASYNC_PRELOAD
pthread_mutex_lock(&mutex_pending);
EINA_LIST_FREE(pending, im)
{
Evas_Cache_Image *cache = im->cache;
if (!im->flags.pending) continue;
im->flags.pending = 0;
if (im->flags.preload) continue;
if (im->flags.activ)
{
if (im->flags.dirty)
{
_evas_cache_image_entry_delete(cache, im);
}
else
{
_evas_cache_image_remove_activ(cache, im);
_evas_cache_image_make_inactiv(cache, im, im->cache_key);
evas_cache_image_flush(cache);
}
}
else if (im->flags.cached)
{
evas_cache_image_flush(cache);
}
}
pthread_mutex_unlock(&mutex_pending);
#endif
}
EAPI void
evas_cache_image_drop(Image_Entry *im)
{
@ -942,23 +796,10 @@ evas_cache_image_drop(Image_Entry *im)
if (im->references == 0)
{
#ifdef BUILD_ASYNC_PRELOAD
if (!im->flags.pending)
if (im->preload)
{
pthread_mutex_lock(&mutex);
if (im->flags.preload || im->flags.in_pipe)
{
pthread_mutex_unlock(&mutex);
_evas_cache_image_entry_preload_remove(im, NULL);
pthread_mutex_lock(&mutex_pending);
if (!im->flags.pending)
{
im->flags.pending = 1;
pending = eina_list_append(pending, im);
}
pthread_mutex_unlock(&mutex_pending);
return;
}
pthread_mutex_unlock(&mutex);
_evas_cache_image_entry_preload_remove(im, NULL);
return ;
}
#endif
@ -967,7 +808,7 @@ evas_cache_image_drop(Image_Entry *im)
_evas_cache_image_entry_delete(cache, im);
return;
}
_evas_cache_image_remove_activ(cache, im);
_evas_cache_image_make_inactiv(cache, im, im->cache_key);
evas_cache_image_flush(cache);
@ -1242,8 +1083,11 @@ evas_cache_image_size_set(Image_Entry *im, int w, int h)
EAPI void
evas_cache_image_load_data(Image_Entry *im)
{
Evas_Cache_Image *cache;
int error, preload;
#ifdef BUILD_ASYNC_PRELOAD
Eina_Bool preload = EINA_FALSE;
#endif
Evas_Cache_Image *cache;
int error;
assert(im);
assert(im->cache);
@ -1253,20 +1097,36 @@ evas_cache_image_load_data(Image_Entry *im)
{
return;
}
#ifdef BUILD_ASYNC_PRELOAD
pthread_mutex_lock(&mutex);
preload = im->flags.preload;
/* We check a first time, to prevent useless lock. */
if (preload)
if (im->preload)
{
if (current == im)
{
/* Wait until ie is processed. */
pthread_cond_wait(&cond_done, &mutex);
}
preload = EINA_TRUE;
if (!im->flags.pending)
{
im->cache->preload = eina_list_remove(im->cache->preload, im);
im->cache->pending = eina_list_append(im->cache->pending, im);
im->flags.pending = 1;
evas_preload_thread_cancel(im->preload);
}
evas_async_events_process();
LKL(wakeup);
while (im->preload)
{
pthread_cond_wait(&cond_wakeup, &wakeup);
LKU(wakeup);
evas_async_events_process();
LKL(wakeup);
}
LKU(wakeup);
}
pthread_mutex_unlock(&mutex);
// _evas_cache_image_entry_preload_remove(im, NULL);
if (im->flags.loaded) return ;
LKL(im->lock);
#endif
@ -1276,21 +1136,21 @@ evas_cache_image_load_data(Image_Entry *im)
LKU(im->lock);
#endif
im->flags.loaded = 1;
if (cache->func.debug)
cache->func.debug("load", im);
#ifdef BUILD_ASYNC_PRELOAD
if (preload)
_evas_cache_image_async_call(im);
#endif
if (error)
{
_evas_cache_image_entry_surface_alloc(cache, im, im->w, im->h);
im->flags.loaded = 0;
return ;
}
im->flags.loaded = 1;
#ifdef BUILD_ASYNC_PRELOAD
if (preload)
_evas_cache_image_async_end(im);
#endif
}
EAPI void
@ -1304,18 +1164,18 @@ evas_cache_image_preload_data(Image_Entry *im, const void *target)
if (im->flags.loaded)
{
evas_object_event_callback_call((Evas_Object*) target, EVAS_CALLBACK_IMAGE_PRELOADED, NULL);
evas_object_inform_call_image_preloaded((Evas_Object*) target);
return ;
}
cache = im->cache;
if (!_evas_cache_image_entry_preload_add(im, target))
evas_object_event_callback_call((Evas_Object*) target, EVAS_CALLBACK_IMAGE_PRELOADED, NULL);
evas_object_inform_call_image_preloaded((Evas_Object*) target);
#else
evas_cache_image_load_data(im);
evas_object_event_callback_call((Evas_Object*) target, EVAS_CALLBACK_IMAGE_PRELOADED, NULL);
evas_object_inform_call_image_preloaded((Evas_Object*) target);
#endif
}
@ -1332,15 +1192,6 @@ evas_cache_image_preload_cancel(Image_Entry *im, const void *target)
if (target == NULL) return ;
_evas_cache_image_entry_preload_remove(im, target);
pthread_mutex_lock(&mutex_pending);
if (!im->flags.pending && im->flags.in_pipe)
{
im->flags.pending = 1;
pending = eina_list_append(pending, im);
}
pthread_mutex_unlock(&mutex_pending);
#else
(void) im;
#endif
@ -1447,3 +1298,11 @@ evas_cache_image_pixels(Image_Entry *im)
return cache->func.surface_pixels(im);
}
EAPI void
evas_cache_image_wakeup(void)
{
#ifdef BUILD_ASYNC_PRELOAD
pthread_cond_broadcast(&cond_wakeup);
#endif
}

268
legacy/evas/src/lib/cache/evas_preload.c vendored Normal file
View File

@ -0,0 +1,268 @@
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#ifdef HAVE_EVIL
# include <Evil.h>
#endif
#ifdef BUILD_ASYNC_PRELOAD
# include <pthread.h>
#endif
#include "evas_common.h"
#include "evas_private.h"
#include "Evas.h"
#ifdef BUILD_ASYNC_PRELOAD
typedef struct _Evas_Preload_Pthread_Worker Evas_Preload_Pthread_Worker;
typedef struct _Evas_Preload_Pthread_Data Evas_Preload_Pthread_Data;
struct _Evas_Preload_Pthread_Worker
{
void (*func_heavy)(void *data);
void (*func_end)(void *data);
void (*func_cancel)(void *data);
const void *data;
Eina_Bool cancel : 1;
};
struct _Evas_Preload_Pthread_Data
{
pthread_t thread;
};
#endif
static int _evas_preload_thread_count_max = 0;
#ifdef BUILD_ASYNC_PRELOAD
static int _evas_preload_thread_count = 0;
static Eina_List *_evas_preload_thread_data = NULL;
static Eina_List *_evas_preload_thread = NULL;
static LK(_mutex) = PTHREAD_MUTEX_INITIALIZER;
static void
_evas_preload_thread_end(Evas_Preload_Pthread_Data *pth)
{
Evas_Preload_Pthread_Data *p;
if (pthread_join(pth->thread, (void**) &p) != 0)
return ;
_evas_preload_thread = eina_list_remove(_evas_preload_thread, pth);
}
static void
_evas_preload_thread_done(void *target, Evas_Callback_Type type, void *event_info)
{
Evas_Preload_Pthread_Worker *work;
work = event_info;
if (work->cancel)
{
if (work->func_cancel)
work->func_cancel((void*) work->data);
}
else
{
work->func_end((void*) work->data);
}
free(work);
}
static void *
_evas_preload_thread_worker(Evas_Preload_Pthread_Data *pth)
{
Evas_Preload_Pthread_Worker *work;
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
on_error:
while (_evas_preload_thread_data)
{
LKL(_mutex);
if (!_evas_preload_thread_data)
{
LKU(_mutex);
break;
}
work = eina_list_data_get(_evas_preload_thread_data);
_evas_preload_thread_data = eina_list_remove_list(_evas_preload_thread_data, _evas_preload_thread_data);
LKU(_mutex);
work->func_heavy((void*) work->data);
evas_async_events_put(pth, 0, work, _evas_preload_thread_done);
}
LKL(_mutex);
if (_evas_preload_thread_data)
{
LKU(_mutex);
goto on_error;
}
_evas_preload_thread_count--;
LKU(_mutex);
work = malloc(sizeof (Evas_Preload_Pthread_Worker));
if (!work) return NULL;
work->data = pth;
work->func_heavy = NULL;
work->func_end = (void*) _evas_preload_thread_end;
work->func_cancel = NULL;
work->cancel = EINA_FALSE;
evas_async_events_put(pth, 0, work, _evas_preload_thread_done);
return pth;
}
#endif
void
_evas_preload_thread_init(void)
{
_evas_preload_thread_count_max = eina_cpu_count();
if (_evas_preload_thread_count_max <= 0)
_evas_preload_thread_count_max = 1;
}
void
_evas_preload_thread_shutdown(void)
{
/* FIXME: If function are still running in the background, should we kill them ? */
#ifdef BUILD_ASYNC_PRELOAD
Evas_Preload_Pthread_Worker *work;
Evas_Preload_Pthread_Data *pth;
/* Force processing of async events. */
evas_async_events_process();
LKL(_mutex);
EINA_LIST_FREE(_evas_preload_thread_data, work)
{
if (work->func_cancel)
work->func_cancel((void*)work->data);
free(work);
}
LKU(_mutex);
EINA_LIST_FREE(_evas_preload_thread, pth)
{
Evas_Preload_Pthread_Data *p;
pthread_cancel(pth->thread);
pthread_join(pth->thread, (void **) &p);
}
#endif
}
Evas_Preload_Pthread *
evas_preload_thread_run(void (*func_heavy)(void *data),
void (*func_end)(void *data),
void (*func_cancel)(void *data),
const void *data)
{
#ifdef BUILD_ASYNC_PRELOAD
Evas_Preload_Pthread_Worker *work;
Evas_Preload_Pthread_Data *pth;
work = malloc(sizeof (Evas_Preload_Pthread_Worker));
if (!work) return NULL;
work->func_heavy = func_heavy;
work->func_end = func_end;
work->func_cancel = func_cancel;
work->cancel = EINA_FALSE;
work->data = data;
LKL(_mutex);
_evas_preload_thread_data = eina_list_append(_evas_preload_thread_data, work);
if (_evas_preload_thread_count == _evas_preload_thread_count_max)
{
pthread_mutex_unlock(&_mutex);
return (Evas_Preload_Pthread*) work;
}
LKU(_mutex);
/* One more thread could be created. */
pth = malloc(sizeof (Evas_Preload_Pthread_Data));
if (!pth)
goto on_error;
if (pthread_create(&pth->thread, NULL, (void*) _evas_preload_thread_worker, pth) == 0)
{
LKL(_mutex);
_evas_preload_thread_count++;
LKU(_mutex);
return (Evas_Preload_Pthread*) work;
}
on_error:
if (_evas_preload_thread_count == 0)
{
if (work->func_cancel)
work->func_cancel((void*) work->data);
free(work);
}
return NULL;
#else
/*
If no thread and as we don't want to break app that rely on this
facility, we will lock the interface until we are done.
*/
func_heavy((void*) data);
func_end((void*) data);
return EINA_TRUE;
#endif
}
Eina_Bool
evas_preload_thread_cancel(Evas_Preload_Pthread *thread)
{
#ifdef BUILD_ASYNC_PRELOAD
Evas_Preload_Pthread_Worker *work;
Eina_List *l;
LKL(_mutex);
EINA_LIST_FOREACH(_evas_preload_thread_data, l, work)
if ((void*) work == (void*) thread)
{
_evas_preload_thread_data = eina_list_remove_list(_evas_preload_thread_data, l);
LKU(_mutex);
if (work->func_cancel)
work->func_cancel((void*) work->data);
free(work);
return EINA_TRUE;
}
LKU(_mutex);
/* Delay the destruction */
work = (Evas_Preload_Pthread_Worker *) thread;
work->cancel = EINA_TRUE;
return EINA_FALSE;
#else
return EINA_TRUE;
#endif
}

View File

@ -12,7 +12,6 @@ static int _fd_write = -1;
static int _fd_read = -1;
static int _init_evas_event = 0;
static pthread_mutex_t _mutex = PTHREAD_MUTEX_INITIALIZER;
typedef struct _Evas_Event_Async Evas_Event_Async;
@ -24,29 +23,25 @@ struct _Evas_Event_Async
Evas_Callback_Type type;
};
static int queue_num = 0;
static int queue_alloc = 0;
static Evas_Event_Async *queue = NULL;
int
evas_async_events_init(void)
{
int filedes[2];
_init_evas_event++;
if (_init_evas_event > 1) return _init_evas_event;
if (pipe(filedes) == -1)
{
_init_evas_event = 0;
return 0;
}
_fd_read = filedes[0];
_fd_write = filedes[1];
fcntl(_fd_read, F_SETFL, O_NONBLOCK);
return _init_evas_event;
}
@ -55,7 +50,7 @@ evas_async_events_shutdown(void)
{
_init_evas_event--;
if (_init_evas_event > 0) return _init_evas_event;
close(_fd_read);
close(_fd_write);
_fd_read = -1;
@ -64,36 +59,6 @@ evas_async_events_shutdown(void)
return _init_evas_event;
}
int
evas_async_target_del(const void *target)
{
int i, j, d = 0;
pthread_mutex_lock(&_mutex);
if (queue)
{
for (i = 0; i < queue_num; i++)
{
if (queue[i].target == target)
{
for (j = i + 1; j < queue_num; j++)
memcpy(&(queue[j - 1]), &(queue[j]), sizeof(Evas_Event_Async));
i--;
queue_num--;
d++;
}
}
if (queue_num == 0)
{
free(queue);
queue = NULL;
queue_alloc = 0;
}
}
pthread_mutex_unlock(&_mutex);
return d;
}
#endif
/**
@ -145,44 +110,24 @@ evas_async_events_process(void)
{
#ifdef BUILD_ASYNC_EVENTS
Evas_Event_Async *ev;
unsigned char buf[1];
int i;
int check;
int count = 0;
int myqueue_num = 0;
int myqueue_alloc = 0;
Evas_Event_Async *myqueue = NULL;
if (_fd_read == -1) return 0;
pthread_mutex_lock(&_mutex);
do
{
check = read(_fd_read, buf, 1);
}
while (check > 0);
if (queue)
{
myqueue_num = queue_num;
myqueue_alloc = queue_alloc;
myqueue = queue;
queue_num = 0;
queue_alloc = 0;
queue = NULL;
pthread_mutex_unlock(&_mutex);
for (i = 0; i < myqueue_num; i++)
{
ev = &(myqueue[i]);
do {
check = read(_fd_read, &ev, sizeof (Evas_Event_Async *));
if (check == sizeof (Evas_Event_Async *))
{
if (ev->func) ev->func((void *)ev->target, ev->type, ev->event_info);
count++;
}
free(myqueue);
}
else
pthread_mutex_unlock(&_mutex);
free(ev);
count++;
}
} while (check > 0);
evas_cache_image_wakeup();
if (check < 0)
switch (errno)
{
@ -192,8 +137,7 @@ evas_async_events_process(void)
case EISDIR:
_fd_read = -1;
}
evas_cache_pending_process();
return count;
#else
return 0;
@ -225,38 +169,21 @@ evas_async_events_put(const void *target, Evas_Callback_Type type, void *event_i
if (!func) return 0;
if (_fd_write == -1) return 0;
pthread_mutex_lock(&_mutex);
queue_num++;
if (queue_num > queue_alloc)
{
Evas_Event_Async *q2;
queue_alloc += 32; // 32 slots at a time for async events
q2 = realloc(queue, queue_alloc * sizeof(Evas_Event_Async));
if (!q2)
{
queue_alloc -= 32;
queue_num--;
pthread_mutex_unlock(&_mutex);
return 0;
}
queue = q2;
}
ev = &(queue[queue_num - 1]);
memset(ev, 0, sizeof(Evas_Event_Async));
ev = calloc(1, sizeof (Evas_Event_Async));
if (!ev) return 0;
ev->func = func;
ev->target = target;
ev->type = type;
ev->event_info = event_info;
do
{
unsigned char buf[1] = { 0xf0 };
check = write(_fd_write, buf, 1);
} while ((check != 1) && ((errno == EINTR) || (errno == EAGAIN)));
do {
check = write(_fd_write, &ev, sizeof (Evas_Event_Async*));
} while ((check != sizeof (Evas_Event_Async)) && ((errno == EINTR) || (errno == EAGAIN)));
if (check == 1)
evas_cache_image_wakeup();
if (check == sizeof (Evas_Event_Async*))
result = EINA_TRUE;
else
switch (errno)
@ -267,8 +194,7 @@ evas_async_events_put(const void *target, Evas_Callback_Type type, void *event_i
case EPIPE:
_fd_write = -1;
}
pthread_mutex_unlock(&_mutex);
return result;
#else
func(target, type, event_info);

View File

@ -39,6 +39,9 @@ evas_init(void)
#ifdef EVAS_CSERVE
if (getenv("EVAS_CSERVE")) evas_cserve_init();
#endif
#ifdef BUILD_ASYNC_PRELOAD
_evas_preload_thread_init();
#endif
return _evas_init_count;
@ -71,6 +74,9 @@ evas_shutdown(void)
if (--_evas_init_count != 0)
return _evas_init_count;
#ifdef BUILD_ASYNC_EVENTS
_evas_preload_thread_shutdown();
#endif
#ifdef EVAS_CSERVE
if (getenv("EVAS_CSERVE")) evas_cserve_shutdown();
#endif

View File

@ -335,6 +335,7 @@ typedef struct _Image_Entry Image_Entry;
typedef struct _Image_Entry_Flags Image_Entry_Flags;
typedef struct _Engine_Image_Entry Engine_Image_Entry;
typedef struct _Evas_Cache_Target Evas_Cache_Target;
typedef struct _Evas_Preload_Pthread Evas_Preload_Pthread;
typedef struct _RGBA_Image_Loadopts RGBA_Image_Loadopts;
#ifdef BUILD_PIPE_RENDER
@ -468,9 +469,9 @@ struct _Image_Entry_Flags
Eina_Bool alpha : 1;
Eina_Bool alpha_sparse : 1;
#ifdef BUILD_ASYNC_PRELOAD
Eina_Bool preload : 1;
Eina_Bool preload_done : 1;
Eina_Bool delete_me : 1;
Eina_Bool pending : 1;
Eina_Bool in_pipe : 1;
#endif
};
@ -478,6 +479,7 @@ struct _Evas_Cache_Target
{
EINA_INLIST;
const void *target;
void *data;
};
struct _Image_Entry
@ -492,6 +494,7 @@ struct _Image_Entry
const char *key;
Evas_Cache_Target *targets;
Evas_Preload_Pthread *preload;
time_t timestamp;
time_t laststat;

View File

@ -822,7 +822,15 @@ struct _Evas_Imaging_Font
int evas_async_events_init(void);
int evas_async_events_shutdown(void);
int evas_async_target_del(const void *target);
void _evas_preload_thread_init(void);
void _evas_preload_thread_shutdown(void);
Evas_Preload_Pthread *evas_preload_thread_run(void (*func_heavy)(void *data),
void (*func_end)(void *data),
void (*func_cancel)(void *data),
const void *data);
Eina_Bool evas_preload_thread_cancel(Evas_Preload_Pthread *thread);
void _evas_walk(Evas *e);
void _evas_unwalk(Evas *e);