eo - make eo id table TLS private data for thread safety and speed

This moved all the eoid tables, eoid lookup caches, generation count
information ad eo_isa cache into a TLS segment of memory that is
thread private. There is also a shared domain for EO objects that all
threads can access, but it has an added cost of a lock. This means
objects accessed outside the thread they were created in cannot be
accessed by another thread unless they are adopted in temporarily, or
create4d with the shared domain active at the time of creation. child
objects will use their parent object domain if created with a parent
object passed in. If you were accessing EO (EFL) objects across threads
before then this will actually now cause your code to fail as it was
invalid before to do this as no actual objects were threadsafe in EFL,
so this will force things to "fail early".
ecore_thread_main_loop_begin() and end() still work as this uses the
eo domain adoption features to temporarily adopt a domain during this
section and then return it when done.

This returns speed back to eo brining the overhead in my tests of
lookup for the elm genlist autobounce test in elementary from about
5-7% down to 2.5-2.6%. A steep drop.

This does not mean everything is perfect. Still to do are:

1. Tests in the test suite
2. Some API's to help for sending objects from thread to thread
3. Make the eo call cache TLS data to make it also safe
4. Look at other locks in eo and probably move them to TLS data
5. Make eo resolve and call wrappers that call the real method func do
   recursive mutex wrapping of the given object IF it is a shared object
   to provide threadsafety transparently for shared objects (but adding
   some overhead as a result)
6. Test test est, and that is why this commit is going in now for wider
   testing
7. Decide how to make this work with sending IPC (between threads)
8. Deciding what makes an object sendable (a sendable property in base?)
9. Deciding what makes an object shareable (a sharable property in base?)
This commit is contained in:
Carsten Haitzler 2016-09-07 17:53:33 +09:00
parent 59bdf3461c
commit 09f19c3c73
8 changed files with 910 additions and 238 deletions

View File

@ -79,7 +79,8 @@ struct _Ecore_Safe_Call
Eina_Lock m;
Eina_Condition c;
int current_id;
Efl_Domain_Data *eo_domain_data;
int current_id;
Eina_Bool sync : 1;
Eina_Bool suspend : 1;
@ -610,6 +611,8 @@ ecore_main_loop_thread_safe_call_wait(double wait)
ecore_pipe_wait(_thread_call, 1, wait);
}
static Efl_Id_Domain _ecore_main_domain = EFL_ID_DOMAIN_INVALID;
EAPI int
ecore_thread_main_loop_begin(void)
{
@ -635,12 +638,22 @@ ecore_thread_main_loop_begin(void)
eina_lock_new(&order->m);
eina_condition_new(&order->c, &order->m);
order->suspend = EINA_TRUE;
order->eo_domain_data = NULL;
_ecore_main_loop_thread_safe_call(order);
eina_lock_take(&order->m);
while (order->current_id != _thread_id)
eina_condition_wait(&order->c);
if (order->eo_domain_data)
{
_ecore_main_domain =
efl_domain_data_adopt(order->eo_domain_data);
if (_ecore_main_domain == EFL_ID_DOMAIN_INVALID)
ERR("Cannot adopt mainloop eo domain");
}
eina_lock_release(&order->m);
eina_main_loop_define();
@ -672,6 +685,12 @@ ecore_thread_main_loop_end(void)
if (_thread_loop > 0)
return _thread_loop;
if (_ecore_main_domain != EFL_ID_DOMAIN_INVALID)
{
efl_domain_data_return(_ecore_main_domain);
_ecore_main_domain = EFL_ID_DOMAIN_INVALID;
}
current_id = _thread_id;
eina_lock_take(&_thread_mutex);
@ -974,6 +993,7 @@ _ecore_main_call_flush(void)
eina_lock_take(&call->m);
_thread_id = call->current_id;
call->eo_domain_data = efl_domain_data_get();
eina_condition_broadcast(&call->c);
eina_lock_release(&call->m);

View File

@ -525,6 +525,192 @@ EAPI Eina_Bool efl_object_init(void);
*/
EAPI Eina_Bool efl_object_shutdown(void);
/**
* The virtual allocation domain where an object lives
*
* You cannot mix objects between domains in the object tree or as direct
* or indirect references unless you explicitly handle it and ensure the
* other domain is adopted into your local thread space
*/
typedef enum
{
EFL_ID_DOMAIN_INVALID = -1, /**< Invalid */
EFL_ID_DOMAIN_MAIN = 0, /**< The main loop domain where eo_init() is called */
EFL_ID_DOMAIN_SHARED = 1, /**< A special shared domain that all threads can see but has extra locking and unlocking costs to access */
EFL_ID_DOMAIN_THREAD, /**< The normal domain for threads so they can adopt the main loop domain at times */
EFL_ID_DOMAIN_OTHER /**< A 'spare extra domain that regular threads can talk to or even set themselves to be */
} Efl_Id_Domain;
/**
* @typedef Efl_Domain_Data
* An opaque handle for private domain data
*/
typedef struct _Efl_Domain_Data Efl_Domain_Data;
/**
* @brief Get the native domain for the current thread
*
* @return The native domain
*
* This will return the native eo object allocation domain for the current
* thread. This can only be changed with efl_domain_switch() and this can
* only be called before any objects are created/allocated on the thread
* where it is called. Calling it after this point will result in
* undefined behavior, so be sure to call this immediaetly after a thread
* begins to execute, before anything else. You must not change the domain
* of the main thread.
*
* @see efl_domain_switch()
* @see efl_domain_current_get()
* @see efl_domain_current_set()
* @see efl_domain_current_push()
* @see efl_domain_current_pop()
* @see efl_domain_data_get()
* @see efl_domain_data_adopt()
* @see efl_domain_data_return()
* @see efl_compatible()
*/
EAPI Efl_Id_Domain efl_domain_get(void);
/**
* @brief Get the native domain for the current thread
* @param domain The domain to switch to
* @return EINA_TRUE if the switch succeeds, and EINA_FALSE if it fails
*
* Permanently switch the native domain for allocated objects for the calling
* thread. All objects created on this thread UNLESS it has switched to a
* new domain temporarily with efl_domain_current_set(),
* efl_domain_current_push() or efl_domain_current_pop(),
* efl_domain_data_adopt() and efl_domain_data_return().
*
* @see efl_domain_get()
*/
EAPI Eina_Bool efl_domain_switch(Efl_Id_Domain domain);
/**
* @brief Get the current domain used for allocating new objects
* @return The current domain
*
* Get the currently used domain that is at the top of the domain stack.
* There is actually a stack of domans to use you can alter via
* efl_domain_current_push() and efl_domain_current_pop(). This only gets
* the domain for the current thread.
*
* @see efl_domain_get()
*/
EAPI Efl_Id_Domain efl_domain_current_get(void);
/**
* @brief Set the current domain used for allocating new objects
* @return EINA_TRUE if it succeeds, and EINA_FALSE on failure
*
* Temporarily switch the current domain being used for allocation. There
* is actually a stack of domans to use you can alter via
* efl_domain_current_push() and efl_domain_current_pop(). This only applies
* to the calling thread.
*
* @see efl_domain_get()
*/
EAPI Eina_Bool efl_domain_current_set(Efl_Id_Domain domain);
/**
* @brief Push a new domain onto the domain stack
* @param domain The domain to push
* @return EINA_TRUE if it succeeds, and EINA_FALSE on failure
*
* This pushes a domain on the domain stack that can be popped later with
* efl_domain_current_pop(). If the stack is full this may fail and return
* EINA_FALSE in that case. This applies only to the calling thread.
*
* @see efl_domain_get()
*/
EAPI Eina_Bool efl_domain_current_push(Efl_Id_Domain domain);
/**
* @brief Pop a previously pushed domain from the domain stack
*
* This pops the top domain off the domain stack for the calling thread
* that was pushed with efl_domain_current_push().
*
* @see efl_domain_get()
*/
EAPI void efl_domain_current_pop(void);
/**
* @brief Get an opaque handle to the local native domain eoid data
* @return A handle to the local native domain data or NULl on failure
*
* This gets a handle to the domain data for the current thread, intended
* to be used by another thread to adopt with efl_domain_data_adopt().
* Once you use efl_domain_data_adopt() the thread that called
* efl_domain_data_get() should suspend and not execute anything
* related to eo or efl objects until the thread that adopted the data
* called efl_domain_data_return() to return the data to its owner and
* stop making it available to that thread.
*
* @see efl_domain_get()
*/
EAPI Efl_Domain_Data *efl_domain_data_get(void);
/**
* @brief Adopt a single extra domain to be the current domain
* @param datas_in The domain data to adopt
* @return The domain that was adopted or EFL_ID_DOMAIN_INVALID on failure
*
* This will adopt the given domain data pointed to by @p data_in
* as an extra domain locally. The adopted domain must have a domain ID
* that is not the same as the current thread domain or local domain. You
* may not adopt a domain that clashes with the current domain. If you
* set, push or pop domains so these might clash (be the same) then
* undefined behaviour will occur.
*
* This will also push the adopted domain as the current domain so that
* all newly created objects (unless their parent is of a differing domain)
* will be part of this adopted domain. You can still access objects from
* your local domain as well, but be aware that creation will require
* some switch of domain by push, pop or set. Return the domain with
* efl_domain_data_return() when done.
*
* @see efl_domain_get()
*/
EAPI Efl_Id_Domain efl_domain_data_adopt(Efl_Domain_Data *data_in);
/**
* @brief Return a domain to its original owning thread
* @param domain The domain to return
* @return EINA_TRUE on success EINA_FALSE on failure
*
* This returns the domain specified by @p domain to the thread it came
* from, allowing that thread after this to continue execution. This
* will implicitly pop the current domain from the stack, assuming that
* the current domain is the same one pushed implicitly by
* efl_domain_data_adopt(). You cannot return your own native local
* domain, only the one that was adopted by efl_domain_data_adopt().
*
* @see efl_domain_get()
*/
EAPI Eina_Bool efl_domain_data_return(Efl_Id_Domain domain);
/**
* @prief Check if 2 objects are compatible
* @param obj The basic object
* @param obj_target The alternat object that may be referenced by @p obj
* @return EINA_TRUE if compatible, EINA_FALSE if not
*
* This checks to see if 2 objects are compatible and could be parent or
* children of eachoter, could reference eachoter etc.. There is only a
* need to call this if you got objects from multiple domains (an
* adopted domain with efl_domain_data_adopt() or the shared domain
* EFL_ID_DOMAIN_SHARED where objects may be accessed by any thread).
*
* @see efl_domain_get()
*/
EAPI Eina_Bool efl_compatible(const Eo *obj, const Eo *obj_target);
// to fetch internal function and object data at once
typedef struct _Efl_Object_Op_Call_Data
{

View File

@ -733,7 +733,7 @@ _efl_add_internal_start(const char *file, int line, const Efl_Class *klass_id, E
#ifndef HAVE_EO_ID
EINA_MAGIC_SET((Eo_Header *) obj, EO_EINA_MAGIC);
#endif
obj->header.id = _eo_id_allocate(obj);
obj->header.id = _eo_id_allocate(obj, parent_id);
Eo *eo_id = _eo_obj_id_get(obj);
_eo_condtor_reset(obj);
@ -1402,40 +1402,69 @@ efl_object_override(Eo *eo_id, const Efl_Object_Ops *ops)
return EINA_TRUE;
}
const Eo *cached_isa_id = NULL;
static const Efl_Class *cached_klass = NULL;
static Eina_Bool cached_isa = EINA_FALSE;
EAPI Eina_Bool
efl_isa(const Eo *eo_id, const Efl_Class *klass_id)
{
eina_spinlock_take(&_eoid_lock);
if (cached_isa_id == eo_id && cached_klass == klass_id)
Efl_Id_Domain domain;
Eo_Id_Data *data;
Eo_Id_Table_Data *tdata;
Eina_Bool isa = EINA_FALSE;
domain = ((Eo_Id)eo_id >> SHIFT_DOMAIN) & MASK_DOMAIN;
data = _eo_table_data_get();
tdata = _eo_table_data_table_get(data, domain);
if (!tdata) return EINA_FALSE;
if (EINA_LIKELY(domain != EFL_ID_DOMAIN_SHARED))
{
eina_spinlock_release(&_eoid_lock);
return cached_isa;
if ((tdata->cache.isa_id == eo_id) &&
(tdata->cache.klass == klass_id))
{
isa = tdata->cache.isa;
return isa;
}
EO_OBJ_POINTER_RETURN_VAL(eo_id, obj, EINA_FALSE);
EO_CLASS_POINTER_RETURN_VAL(klass_id, klass, EINA_FALSE);
const op_type_funcs *func = _vtable_func_get
(obj->vtable, klass->base_id + klass->ops_count);
// Caching the result as we do a lot of serial efl_isa due to evas_object_image using it.
tdata->cache.isa_id = eo_id;
tdata->cache.klass = klass_id;
// Currently implemented by reusing the LAST op id. Just marking it with
// _eo_class_isa_func.
isa = tdata->cache.isa = (func && (func->func == _eo_class_isa_func));
}
eina_spinlock_release(&_eoid_lock);
else
{
eina_spinlock_take(&(tdata->lock));
EO_OBJ_POINTER_RETURN_VAL(eo_id, obj, EINA_FALSE);
EO_CLASS_POINTER_RETURN_VAL(klass_id, klass, EINA_FALSE);
const op_type_funcs *func = _vtable_func_get(obj->vtable,
klass->base_id + klass->ops_count);
if ((tdata->cache.isa_id == eo_id) &&
(tdata->cache.klass == klass_id))
{
isa = tdata->cache.isa;
goto shared_ok;
}
eina_spinlock_release(&(tdata->lock));
eina_spinlock_take(&_eoid_lock);
EO_OBJ_POINTER_RETURN_VAL(eo_id, obj, EINA_FALSE);
EO_CLASS_POINTER_RETURN_VAL(klass_id, klass, EINA_FALSE);
const op_type_funcs *func = _vtable_func_get
(obj->vtable, klass->base_id + klass->ops_count);
// Caching the result as we do a lot of serial efl_isa due to evas_object_image using it.
cached_isa_id = eo_id;
cached_klass = klass_id;
eina_spinlock_take(&(tdata->lock));
/* Currently implemented by reusing the LAST op id. Just marking it with
* _eo_class_isa_func. */
cached_isa = (func && (func->func == _eo_class_isa_func));
eina_spinlock_release(&_eoid_lock);
return cached_isa;
// Caching the result as we do a lot of serial efl_isa due to evas_object_image using it.
tdata->cache.isa_id = eo_id;
tdata->cache.klass = klass_id;
// Currently implemented by reusing the LAST op id. Just marking it with
// _eo_class_isa_func.
isa = tdata->cache.isa = (func && (func->func == _eo_class_isa_func));
shared_ok:
eina_spinlock_release(&(tdata->lock));
}
return isa;
}
EAPI Eo *
@ -1750,6 +1779,13 @@ efl_data_xunref_internal(const Eo *obj_id, void *data, const Eo *ref_obj_id)
_efl_data_xunref_internal(obj, data, ref_obj);
}
static void
_eo_table_del_cb(void *in)
{
Eo_Id_Data *data = in;
_eo_free_ids_tables(data);
}
EAPI Eina_Bool
efl_object_init(void)
{
@ -1772,11 +1808,6 @@ efl_object_init(void)
return EINA_FALSE;
}
if (!eina_spinlock_new(&_eoid_lock))
{
EINA_LOG_ERR("Could not init lock.");
return EINA_FALSE;
}
if (!eina_spinlock_new(&_efl_class_creation_lock))
{
EINA_LOG_ERR("Could not init lock.");
@ -1807,6 +1838,27 @@ efl_object_init(void)
_ops_storage = eina_hash_string_superfast_new(NULL);
#endif
_eo_table_data_shared = _eo_table_data_new(EFL_ID_DOMAIN_SHARED);
if (!_eo_table_data_shared)
{
EINA_LOG_ERR("Could not allocate shared table data");
return EINA_FALSE;
}
// specially force eoid data to be creanted so we can switch it to domain 0
Eo_Id_Data *data = _eo_table_data_new(EFL_ID_DOMAIN_MAIN);
if (!data)
{
EINA_LOG_ERR("Could not allocate main table data");
return EINA_FALSE;
}
if (!eina_tls_cb_new(&_eo_table_data, _eo_table_del_cb))
{
EINA_LOG_ERR("Could not allocate TLS for eo domain data");
return EINA_FALSE;
}
eina_tls_set(_eo_table_data, data);
#ifdef EO_DEBUG
/* Call it just for coverage purposes. Ugly I know, but I like it better than
* casting everywhere else. */
@ -1856,9 +1908,13 @@ efl_object_shutdown(void)
eina_spinlock_free(&_ops_storage_lock);
eina_spinlock_free(&_efl_class_creation_lock);
_eo_free_ids_tables();
eina_spinlock_free(&_eoid_lock);
_eo_free_ids_tables(_eo_table_data_get());
eina_tls_free(_eo_table_data);
if (_eo_table_data_shared)
{
_eo_free_ids_tables(_eo_table_data_shared);
_eo_table_data_shared = NULL;
}
eina_log_domain_unregister(_eo_log_dom);
_eo_log_dom = -1;
@ -1869,6 +1925,149 @@ efl_object_shutdown(void)
return EINA_FALSE;
}
EAPI Efl_Id_Domain
efl_domain_get(void)
{
Eo_Id_Data *data = _eo_table_data_get();
return data->local_domain;
}
EAPI Efl_Id_Domain
efl_domain_current_get(void)
{
Eo_Id_Data *data = _eo_table_data_get();
return data->domain_stack[data->stack_top];
}
EAPI Eina_Bool
efl_domain_switch(Efl_Id_Domain domain)
{
Eo_Id_Data *data = _eo_table_data_get();
if ((domain < EFL_ID_DOMAIN_MAIN) || (domain > EFL_ID_DOMAIN_THREAD) ||
(domain == EFL_ID_DOMAIN_SHARED))
{
ERR("Invalid domain %i being switched to", domain);
return EINA_FALSE;
}
if (data)
{
if (data->local_domain == domain) return EINA_TRUE;
_eo_free_ids_tables(data);
}
data = _eo_table_data_new(domain);
eina_tls_set(_eo_table_data, data);
return EINA_TRUE;
}
static inline Eina_Bool
_efl_domain_push(Eo_Id_Data *data, Efl_Id_Domain domain)
{
if (data->stack_top >= (sizeof(data->domain_stack) - 1))
{
ERR("Failed to push domain %i on stack. Out of stack space at %i",
domain, data->stack_top);
return EINA_FALSE;
}
data->stack_top++;
data->domain_stack[data->stack_top] = domain;
return EINA_TRUE;
}
static inline void
_efl_domain_pop(Eo_Id_Data *data)
{
if (data->stack_top > 0) data->stack_top--;
}
EAPI Eina_Bool
efl_domain_current_push(Efl_Id_Domain domain)
{
Eo_Id_Data *data = _eo_table_data_get();
return _efl_domain_push(data, domain);
}
EAPI void
efl_domain_current_pop(void)
{
Eo_Id_Data *data = _eo_table_data_get();
_efl_domain_pop(data);
}
EAPI Eina_Bool
efl_domain_current_set(Efl_Id_Domain domain)
{
Eo_Id_Data *data = _eo_table_data_get();
if ((domain < EFL_ID_DOMAIN_MAIN) || (domain > EFL_ID_DOMAIN_THREAD))
{
ERR("Invalid domain %i being set", domain);
return EINA_FALSE;
}
data->domain_stack[data->stack_top] = domain;
return EINA_TRUE;
}
EAPI Efl_Domain_Data *
efl_domain_data_get(void)
{
Eo_Id_Data *data = _eo_table_data_get();
return (Efl_Domain_Data *)data;
}
EAPI Efl_Id_Domain
efl_domain_data_adopt(Efl_Domain_Data *data_in)
{
Eo_Id_Data *data = _eo_table_data_get();
Eo_Id_Data *data_foreign = (Eo_Id_Data *)data_in;
if (!data_foreign)
{
ERR("Trying to adopt NULL domain data");
return EFL_ID_DOMAIN_INVALID;
}
if (data_foreign->local_domain == data->local_domain)
{
ERR("Trying to adopt EO ID domain %i, is the same as the local %i",
data_foreign->local_domain, data->local_domain);
return EFL_ID_DOMAIN_INVALID;
}
if (data->tables[data_foreign->local_domain])
{
ERR("Trying to adopt an already adopted domain");
return EFL_ID_DOMAIN_INVALID;
}
data->tables[data_foreign->local_domain] =
data_foreign->tables[data_foreign->local_domain];
_efl_domain_push(data, data_foreign->local_domain);
return data->domain_stack[data->stack_top];
}
EAPI Eina_Bool
efl_domain_data_return(Efl_Id_Domain domain)
{
Eo_Id_Data *data = _eo_table_data_get();
if ((domain < EFL_ID_DOMAIN_MAIN) || (domain > EFL_ID_DOMAIN_THREAD))
{
ERR("Invalid domain %i being returned to owning thread", domain);
return EINA_FALSE;
}
if (domain == data->local_domain)
{
ERR("Cannot return the local domain back to its owner");
return EINA_FALSE;
}
data->tables[domain] = NULL;
_efl_domain_pop(data);
return EINA_TRUE;
}
EAPI Eina_Bool
efl_compatible(const Eo *obj, const Eo *obj_target)
{
return _eo_id_domain_compatible(obj, obj_target);
}
EAPI Eina_Bool
efl_destructed_is(const Eo *obj_id)
{
@ -1911,3 +2110,114 @@ efl_callbacks_cmp(const Efl_Callback_Array_Item *a, const Efl_Callback_Array_Ite
{
return (const unsigned char *) a->desc - (const unsigned char *) b->desc;
}
_Eo_Object *
_eo_obj_pointer_get(const Eo_Id obj_id)
{
#ifdef HAVE_EO_ID
_Eo_Id_Entry *entry;
_Eo_Object *ptr;
Generation_Counter generation;
Table_Index mid_table_id, table_id, entry_id;
Eo_Id tag_bit;
Eo_Id_Data *data;
Eo_Id_Table_Data *tdata;
unsigned char domain;
// NULL objects will just be sensibly ignored. not worth complaining
// every single time.
domain = (obj_id >> SHIFT_DOMAIN) & MASK_DOMAIN;
data = _eo_table_data_get();
tdata = _eo_table_data_table_get(data, domain);
if (!tdata) goto err_invalid;
if (EINA_LIKELY(domain != EFL_ID_DOMAIN_SHARED))
{
if (obj_id == tdata->cache.id)
{
ptr = tdata->cache.object;
return ptr;
}
// get tag bit to check later down below - pipelining
tag_bit = (obj_id) & MASK_OBJ_TAG;
if (!obj_id) goto err_null;
else if (!tag_bit) goto err_invalid;
EO_DECOMPOSE_ID(obj_id, mid_table_id, table_id, entry_id, generation);
// Check the validity of the entry
if (tdata->eo_ids_tables[mid_table_id])
{
_Eo_Ids_Table *tab = TABLE_FROM_IDS;
if (tab)
{
entry = &(tab->entries[entry_id]);
if (entry->active && (entry->generation == generation))
{
// Cache the result of that lookup
tdata->cache.object = entry->ptr;
tdata->cache.id = obj_id;
ptr = entry->ptr;
return ptr;
}
}
}
goto err;
}
else
{
eina_spinlock_take(&(tdata->lock));
if (obj_id == tdata->cache.id)
{
ptr = tdata->cache.object;
goto shared_ok;
}
// get tag bit to check later down below - pipelining
tag_bit = (obj_id) & MASK_OBJ_TAG;
if (!obj_id) goto err_null;
else if (!tag_bit) goto err_invalid;
EO_DECOMPOSE_ID(obj_id, mid_table_id, table_id, entry_id, generation);
// Check the validity of the entry
if (tdata->eo_ids_tables[mid_table_id])
{
_Eo_Ids_Table *tab = TABLE_FROM_IDS;
if (tab)
{
entry = &(tab->entries[entry_id]);
if (entry->active && (entry->generation == generation))
{
// Cache the result of that lookup
tdata->cache.object = entry->ptr;
tdata->cache.id = obj_id;
ptr = entry->ptr;
goto shared_ok;
}
}
}
eina_spinlock_release(&(tdata->lock));
goto err;
shared_ok:
eina_spinlock_release(&(tdata->lock));
return ptr;
}
err_null:
DBG("obj_id is NULL. Possibly unintended access?");
return NULL;
err_invalid:
DBG("obj_id is not a valid object id.");
return NULL;
err:
ERR("obj_id %p is not a valid object. Maybe it has been freed or does not belong to your thread?",
(void *)obj_id);
return NULL;
#else
return (_Eo_Object *) obj_id;
#endif
}

View File

@ -11,10 +11,6 @@
static int event_freeze_count = 0;
Eina_Spinlock _eoid_lock;
_Eo_Object *cached_object = NULL;
Eo_Id cached_id = 0;
typedef struct _Eo_Callback_Description Eo_Callback_Description;
typedef struct
@ -265,6 +261,7 @@ _efl_object_key_ref_set(Eo *obj EINA_UNUSED, Efl_Object_Data *pd, const char *ke
{
Eo_Generic_Data_Node *node;
if (!_eo_id_domain_compatible(obj, objdata)) return;
node = _key_generic_set(obj, pd, key, objdata, DATA_OBJ, EINA_TRUE);
if (node)
{
@ -284,6 +281,7 @@ _efl_object_key_wref_set(Eo *obj, Efl_Object_Data *pd, const char * key, const E
{
Eo_Generic_Data_Node *node;
if (!_eo_id_domain_compatible(obj, objdata)) return;
node = _key_generic_set(obj, pd, key, objdata, DATA_OBJ_WEAK, EINA_TRUE);
if (node)
{
@ -535,6 +533,10 @@ _efl_object_parent_set(Eo *obj, Efl_Object_Data *pd, Eo *parent_id)
{
if (pd->parent == parent_id)
return;
if (parent_id)
{
if (!_eo_id_domain_compatible(parent_id, obj)) return;
}
EO_OBJ_POINTER(obj, eo_obj);

View File

@ -61,18 +61,12 @@ typedef uintptr_t Eo_Id;
typedef struct _Efl_Class _Efl_Class;
typedef struct _Eo_Header Eo_Header;
/* Retrieves the pointer to the object from the id */
static inline _Eo_Object *_eo_obj_pointer_get(const Eo_Id obj_id);
/* Allocates an entry for the given object */
static inline Eo_Id _eo_id_allocate(const _Eo_Object *obj);
static inline Eo_Id _eo_id_allocate(const _Eo_Object *obj, const Eo *parent_id);
/* Releases an entry by the object id */
static inline void _eo_id_release(const Eo_Id obj_id);
/* Free all the entries and the tables */
static inline void _eo_free_ids_tables(void);
void _eo_condtor_done(Eo *obj);
typedef struct _Dich_Chain1 Dich_Chain1;
@ -212,6 +206,9 @@ Eo *_eo_header_id_get(const Eo_Header *header)
#endif
}
/* Retrieves the pointer to the object from the id */
_Eo_Object *_eo_obj_pointer_get(const Eo_Id obj_id);
static inline
Efl_Class *_eo_class_id_get(const _Efl_Class *klass)
{

View File

@ -4,17 +4,12 @@
#include "eo_ptr_indirection.h"
/* Tables handling pointers indirection */
_Eo_Ids_Table **_eo_ids_tables[MAX_MID_TABLE_ID] = { NULL };
//////////////////////////////////////////////////////////////////////////
/* Current table used for following allocations */
_Eo_Ids_Table *_current_table = NULL;
Eina_TLS _eo_table_data;
Eo_Id_Data *_eo_table_data_shared = NULL;
/* Spare empty table */
_Eo_Ids_Table *_empty_table = NULL;
/* Next generation to use when assigning a new entry to a Eo pointer */
Generation_Counter _eo_generation_counter = 0;
//////////////////////////////////////////////////////////////////////////
void
_eo_pointer_error(const char *msg)

View File

@ -103,7 +103,11 @@ void _eo_pointer_error(const char *msg);
void _eo_print();
#endif
extern Eina_TLS _eo_table_data;
#include "eo_ptr_indirection.x"
extern Eo_Id_Data *_eo_table_data_shared;
#endif

View File

@ -61,8 +61,9 @@
/* 32 bits */
# define BITS_MID_TABLE_ID 5
# define BITS_TABLE_ID 5
# define BITS_ENTRY_ID 12
# define BITS_GENERATION_COUNTER 8
# define BITS_ENTRY_ID 11
# define BITS_GENERATION_COUNTER 7
# define BITS_DOMAIN 2
# define REF_TAG_SHIFT 30
# define SUPER_TAG_SHIFT 31
# define DROPPED_TABLES 0
@ -73,8 +74,9 @@ typedef uint16_t Generation_Counter;
/* 64 bits */
# define BITS_MID_TABLE_ID 11
# define BITS_TABLE_ID 11
# define BITS_ENTRY_ID 12
# define BITS_GENERATION_COUNTER 28
# define BITS_ENTRY_ID 11
# define BITS_GENERATION_COUNTER 27
# define BITS_DOMAIN 2
# define REF_TAG_SHIFT 62
# define SUPER_TAG_SHIFT 63
# define DROPPED_TABLES 2
@ -84,6 +86,8 @@ typedef uint32_t Generation_Counter;
#endif
/* Shifts macros to manipulate the Eo id */
#define SHIFT_DOMAIN (BITS_MID_TABLE_ID + BITS_TABLE_ID + \
BITS_ENTRY_ID + BITS_GENERATION_COUNTER)
#define SHIFT_MID_TABLE_ID (BITS_TABLE_ID + \
BITS_ENTRY_ID + BITS_GENERATION_COUNTER)
#define SHIFT_TABLE_ID (BITS_ENTRY_ID + BITS_GENERATION_COUNTER)
@ -91,12 +95,14 @@ typedef uint32_t Generation_Counter;
/* Maximum ranges - a few tables and entries are dropped to minimize the amount
* of wasted bytes, see _eo_id_mem_alloc */
#define MAX_DOMAIN (1 << BITS_DOMAIN)
#define MAX_MID_TABLE_ID (1 << BITS_MID_TABLE_ID)
#define MAX_TABLE_ID ((1 << BITS_TABLE_ID) - DROPPED_TABLES )
#define MAX_ENTRY_ID ((1 << BITS_ENTRY_ID) - DROPPED_ENTRIES)
#define MAX_GENERATIONS (1 << BITS_GENERATION_COUNTER)
/* Masks */
#define MASK_DOMAIN (MAX_DOMAIN - 1)
#define MASK_MID_TABLE_ID (MAX_MID_TABLE_ID - 1)
#define MASK_TABLE_ID ((1 << BITS_TABLE_ID) - 1)
#define MASK_ENTRY_ID ((1 << BITS_ENTRY_ID) - 1)
@ -236,110 +242,155 @@ typedef struct
_Eo_Id_Entry entries[MAX_ENTRY_ID];
} _Eo_Ids_Table;
/* Tables handling pointers indirection */
extern _Eo_Ids_Table **_eo_ids_tables[MAX_MID_TABLE_ID];
//////////////////////////////////////////////////////////////////////////
/* Current table used for following allocations */
extern _Eo_Ids_Table *_current_table;
typedef struct _Eo_Id_Data Eo_Id_Data;
typedef struct _Eo_Id_Table_Data Eo_Id_Table_Data;
/* Spare empty table */
extern _Eo_Ids_Table *_empty_table;
struct _Eo_Id_Table_Data
{
/* Tables handling pointers indirection */
_Eo_Ids_Table **eo_ids_tables[MAX_MID_TABLE_ID];
/* Current table used for following allocations */
_Eo_Ids_Table *current_table;
/* Spare empty table */
_Eo_Ids_Table *empty_table;
/* Cached eoid lookups */
struct
{
Eo_Id id;
_Eo_Object *object;
const Eo *isa_id;
const Efl_Class *klass;
Eina_Bool isa;
}
cache;
/* Next generation to use when assigning a new entry to a Eo pointer */
Generation_Counter generation;
/* Optional lock around objects and eoid table - only used if shared */
Eina_Spinlock lock;
/* are we shared so we need lock/unlock? */
Eina_Bool shared : 1;
};
struct _Eo_Id_Data
{
Eo_Id_Table_Data *tables[4];
unsigned char local_domain;
unsigned char stack_top;
unsigned char domain_stack[255 - (sizeof(void *) * 4) - 2];
};
extern Eina_TLS _eo_table_data;
extern Eo_Id_Data *_eo_table_data_shared;
static inline Eo_Id_Table_Data *
_eo_table_data_table_new(Efl_Id_Domain domain)
{
Eo_Id_Table_Data *tdata;
tdata = calloc(1, sizeof(Eo_Id_Table_Data));
if (!tdata) return NULL;
if (domain == EFL_ID_DOMAIN_SHARED)
{
if (!eina_spinlock_new(&(tdata->lock)))
{
free(tdata);
return NULL;
}
tdata->shared = EINA_TRUE;
}
// XXX: randomize generation count and allocation methods
return tdata;
}
static inline Eo_Id_Data *
_eo_table_data_new(Efl_Id_Domain domain)
{
Eo_Id_Data *data;
data = calloc(1, sizeof(Eo_Id_Data));
if (!data) return NULL;
data->local_domain = domain;
data->domain_stack[data->stack_top] = data->local_domain;
data->tables[data->local_domain] =
_eo_table_data_table_new(data->local_domain);
if (domain != EFL_ID_DOMAIN_SHARED)
data->tables[EFL_ID_DOMAIN_SHARED] =
_eo_table_data_shared->tables[EFL_ID_DOMAIN_SHARED];
return data;
}
static void
_eo_table_data_table_free(Eo_Id_Table_Data *tdata)
{
if (tdata->shared) eina_spinlock_free(&(tdata->lock));
free(tdata);
}
static inline Eo_Id_Data *
_eo_table_data_get(void)
{
Eo_Id_Data *data = eina_tls_get(_eo_table_data);
if (EINA_LIKELY(data != NULL)) return data;
data = _eo_table_data_new(EFL_ID_DOMAIN_THREAD);
if (!data) return NULL;
eina_tls_set(_eo_table_data, data);
return data;
}
static inline Eo_Id_Table_Data *
_eo_table_data_current_table_get(Eo_Id_Data *data)
{
return data->tables[data->domain_stack[data->stack_top]];
}
static inline Eo_Id_Table_Data *
_eo_table_data_table_get(Eo_Id_Data *data, Efl_Id_Domain domain)
{
return data->tables[domain];
}
static inline Eina_Bool
_eo_id_domain_compatible(const Eo *o1, const Eo *o2)
{
Efl_Id_Domain domain1 = ((Eo_Id)o1 >> SHIFT_DOMAIN) & MASK_DOMAIN;
Efl_Id_Domain domain2 = ((Eo_Id)o2 >> SHIFT_DOMAIN) & MASK_DOMAIN;
if (domain1 != domain2)
{
ERR("Object %p and %p are not compatible. Domain %i and %i do not match",
o1, o2, domain1, domain2);
return EINA_FALSE;
}
return EINA_TRUE;
}
//////////////////////////////////////////////////////////////////////////
/* Next generation to use when assigning a new entry to a Eo pointer */
extern Generation_Counter _eo_generation_counter;
/* Macro used to compose an Eo id */
#define EO_COMPOSE_PARTIAL_ID(MID_TABLE, TABLE) \
(((Eo_Id) 0x1 << REF_TAG_SHIFT) | \
((Eo_Id)(MID_TABLE & MASK_MID_TABLE_ID) << SHIFT_MID_TABLE_ID) | \
#define EO_COMPOSE_PARTIAL_ID(MID_TABLE, TABLE) \
(((Eo_Id) 0x1 << REF_TAG_SHIFT) | \
((Eo_Id)(MID_TABLE & MASK_MID_TABLE_ID) << SHIFT_MID_TABLE_ID) | \
((Eo_Id)(TABLE & MASK_TABLE_ID) << SHIFT_TABLE_ID))
#define EO_COMPOSE_FINAL_ID(PARTIAL_ID, ENTRY, GENERATION) \
(PARTIAL_ID | \
((ENTRY & MASK_ENTRY_ID) << SHIFT_ENTRY_ID) | \
#define EO_COMPOSE_FINAL_ID(PARTIAL_ID, ENTRY, DOMAIN, GENERATION) \
(PARTIAL_ID | \
(((Eo_Id)DOMAIN & MASK_DOMAIN) << SHIFT_DOMAIN) | \
((ENTRY & MASK_ENTRY_ID) << SHIFT_ENTRY_ID) | \
(GENERATION & MASK_GENERATIONS ))
/* Macro to extract from an Eo id the indexes of the tables */
#define EO_DECOMPOSE_ID(ID, MID_TABLE, TABLE, ENTRY, GENERATION) \
MID_TABLE = (ID >> SHIFT_MID_TABLE_ID) & MASK_MID_TABLE_ID; \
TABLE = (ID >> SHIFT_TABLE_ID) & MASK_TABLE_ID; \
ENTRY = (ID >> SHIFT_ENTRY_ID) & MASK_ENTRY_ID; \
#define EO_DECOMPOSE_ID(ID, MID_TABLE, TABLE, ENTRY, GENERATION) \
MID_TABLE = (ID >> SHIFT_MID_TABLE_ID) & MASK_MID_TABLE_ID; \
TABLE = (ID >> SHIFT_TABLE_ID) & MASK_TABLE_ID; \
ENTRY = (ID >> SHIFT_ENTRY_ID) & MASK_ENTRY_ID; \
GENERATION = ID & MASK_GENERATIONS;
/* Macro used for readability */
#define TABLE_FROM_IDS _eo_ids_tables[mid_table_id][table_id]
extern _Eo_Object *cached_object;
extern Eo_Id cached_id;
extern Eina_Spinlock _eoid_lock;
static inline _Eo_Object *
_eo_obj_pointer_get(const Eo_Id obj_id)
{
#ifdef HAVE_EO_ID
_Eo_Id_Entry *entry;
_Eo_Object *ptr;
Generation_Counter generation;
Table_Index mid_table_id, table_id, entry_id;
Eo_Id tag_bit;
// NULL objects will just be sensibly ignored. not worth complaining
// every single time.
eina_spinlock_take(&_eoid_lock);
if (obj_id == cached_id)
{
ptr = cached_object;
eina_spinlock_release(&_eoid_lock);
return ptr;
}
// get tag bit to check later down below - pipelining
tag_bit = (obj_id) & MASK_OBJ_TAG;
if (!obj_id)
{
eina_spinlock_release(&_eoid_lock);
DBG("obj_id is NULL. Possibly unintended access?");
return NULL;
}
else if (!tag_bit)
{
eina_spinlock_release(&_eoid_lock);
DBG("obj_id is not a valid object id.");
return NULL;
}
EO_DECOMPOSE_ID(obj_id, mid_table_id, table_id, entry_id, generation);
/* Check the validity of the entry */
if (_eo_ids_tables[mid_table_id])
{
_Eo_Ids_Table *tab = TABLE_FROM_IDS;
if (tab)
{
entry = &(tab->entries[entry_id]);
if (entry->active && (entry->generation == generation))
{
// Cache the result of that lookup
cached_object = entry->ptr;
cached_id = obj_id;
ptr = cached_object;
eina_spinlock_release(&_eoid_lock);
return ptr;
}
}
}
ERR("obj_id %p is not pointing to a valid object. Maybe it has already been freed.",
(void *)obj_id);
eina_spinlock_release(&_eoid_lock);
return NULL;
#else
return (_Eo_Object *) obj_id;
#endif
}
#define TABLE_FROM_IDS tdata->eo_ids_tables[mid_table_id][table_id]
static inline _Eo_Id_Entry *
_get_available_entry(_Eo_Ids_Table *table)
@ -370,17 +421,18 @@ _get_available_entry(_Eo_Ids_Table *table)
}
static inline _Eo_Id_Entry *
_search_tables(void)
_search_tables(Eo_Id_Table_Data *tdata)
{
_Eo_Ids_Table *table;
_Eo_Id_Entry *entry;
if (!tdata) return NULL;
for (Table_Index mid_table_id = 0; mid_table_id < MAX_MID_TABLE_ID; mid_table_id++)
{
if (!_eo_ids_tables[mid_table_id])
if (!tdata->eo_ids_tables[mid_table_id])
{
/* Allocate a new intermediate table */
_eo_ids_tables[mid_table_id] = _eo_id_mem_calloc(MAX_TABLE_ID, sizeof(_Eo_Ids_Table*));
tdata->eo_ids_tables[mid_table_id] = _eo_id_mem_calloc(MAX_TABLE_ID, sizeof(_Eo_Ids_Table*));
}
for (Table_Index table_id = 0; table_id < MAX_TABLE_ID; table_id++)
@ -389,11 +441,11 @@ _search_tables(void)
if (!table)
{
if (_empty_table)
if (tdata->empty_table)
{
/* Recycle the available empty table */
table = _empty_table;
_empty_table = NULL;
table = tdata->empty_table;
tdata->empty_table = NULL;
UNPROTECT(table);
}
else
@ -407,9 +459,9 @@ _search_tables(void)
table->fifo_head = table->fifo_tail = -1;
table->partial_id = EO_COMPOSE_PARTIAL_ID(mid_table_id, table_id);
entry = &(table->entries[0]);
UNPROTECT(_eo_ids_tables[mid_table_id]);
UNPROTECT(tdata->eo_ids_tables[mid_table_id]);
TABLE_FROM_IDS = table;
PROTECT(_eo_ids_tables[mid_table_id]);
PROTECT(tdata->eo_ids_tables[mid_table_id]);
}
else
entry = _get_available_entry(table);
@ -417,60 +469,99 @@ _search_tables(void)
if (entry)
{
/* Store table info into current table */
_current_table = table;
tdata->current_table = table;
return entry;
}
}
}
ERR("no more available entries to store eo objects");
_current_table = NULL;
tdata->current_table = NULL;
return NULL;
}
/* Gives a fake id that serves as a marker if eo id is off. */
static inline Eo_Id
_eo_id_allocate(const _Eo_Object *obj)
_eo_id_allocate(const _Eo_Object *obj, const Eo *parent_id)
{
#ifdef HAVE_EO_ID
_Eo_Id_Entry *entry = NULL;
Eo_Id_Data *data;
Eo_Id_Table_Data *tdata;
Eo_Id id;
eina_spinlock_take(&_eoid_lock);
if (_current_table)
entry = _get_available_entry(_current_table);
if (!entry)
data = _eo_table_data_get();
if (parent_id)
{
entry = _search_tables();
Efl_Id_Domain domain = ((Eo_Id)parent_id >> SHIFT_DOMAIN) & MASK_DOMAIN;
tdata = _eo_table_data_table_get(data, domain);
}
else tdata = _eo_table_data_current_table_get(data);
if (!tdata) return 0;
if (!_current_table || !entry)
if (EINA_LIKELY(!tdata->shared))
{
eina_spinlock_release(&_eoid_lock);
return 0;
}
if (tdata->current_table)
entry = _get_available_entry(tdata->current_table);
/* [1;max-1] thus we never generate an Eo_Id equal to 0 */
_eo_generation_counter++;
if (_eo_generation_counter == MAX_GENERATIONS)
_eo_generation_counter = 1;
/* Fill the entry and return it's Eo Id */
entry->ptr = (_Eo_Object *)obj;
entry->active = 1;
entry->generation = _eo_generation_counter;
PROTECT(_current_table);
eina_spinlock_release(&_eoid_lock);
return EO_COMPOSE_FINAL_ID(_current_table->partial_id,
(entry - _current_table->entries),
entry->generation);
if (!entry) entry = _search_tables(tdata);
if (!tdata->current_table || !entry)
{
return 0;
}
UNPROTECT(tdata->current_table);
/* [1;max-1] thus we never generate an Eo_Id equal to 0 */
tdata->generation++;
if (tdata->generation == MAX_GENERATIONS) tdata->generation = 1;
/* Fill the entry and return it's Eo Id */
entry->ptr = (_Eo_Object *)obj;
entry->active = 1;
entry->generation = tdata->generation;
PROTECT(tdata->current_table);
id = EO_COMPOSE_FINAL_ID(tdata->current_table->partial_id,
(entry - tdata->current_table->entries),
data->domain_stack[data->stack_top],
entry->generation);
}
else
{
eina_spinlock_take(&(tdata->lock));
if (tdata->current_table)
entry = _get_available_entry(tdata->current_table);
if (!entry) entry = _search_tables(tdata);
if (!tdata->current_table || !entry)
{
id = 0;
goto shared_err;
}
UNPROTECT(tdata->current_table);
/* [1;max-1] thus we never generate an Eo_Id equal to 0 */
tdata->generation++;
if (tdata->generation == MAX_GENERATIONS) tdata->generation = 1;
/* Fill the entry and return it's Eo Id */
entry->ptr = (_Eo_Object *)obj;
entry->active = 1;
entry->generation = tdata->generation;
PROTECT(tdata->current_table);
id = EO_COMPOSE_FINAL_ID(tdata->current_table->partial_id,
(entry - tdata->current_table->entries),
data->domain_stack[data->stack_top],
entry->generation);
shared_err:
eina_spinlock_release(&(tdata->lock));
}
return id;
#else
(void) obj;
return MASK_OBJ_TAG;
#endif
}
extern const Eo *cached_isa_id;
static inline void
_eo_id_release(const Eo_Id obj_id)
{
@ -479,59 +570,118 @@ _eo_id_release(const Eo_Id obj_id)
_Eo_Id_Entry *entry;
Generation_Counter generation;
Table_Index mid_table_id, table_id, entry_id;
Efl_Id_Domain domain;
Eo_Id_Data *data;
Eo_Id_Table_Data *tdata;
domain = (obj_id >> SHIFT_DOMAIN) & MASK_DOMAIN;
data = _eo_table_data_get();
tdata = _eo_table_data_table_get(data, domain);
if (!tdata) return;
EO_DECOMPOSE_ID(obj_id, mid_table_id, table_id, entry_id, generation);
eina_spinlock_take(&_eoid_lock);
/* Check the validity of the entry */
if (_eo_ids_tables[mid_table_id] && (table = TABLE_FROM_IDS))
if (EINA_LIKELY(domain != EFL_ID_DOMAIN_SHARED))
{
entry = &(table->entries[entry_id]);
if (entry && entry->active && (entry->generation == generation))
// Check the validity of the entry
if (tdata->eo_ids_tables[mid_table_id] && (table = TABLE_FROM_IDS))
{
UNPROTECT(table);
table->free_entries++;
/* Disable the entry */
entry->active = 0;
entry->next_in_fifo = -1;
/* Push the entry into the fifo */
if (table->fifo_tail == -1)
entry = &(table->entries[entry_id]);
if (entry && entry->active && (entry->generation == generation))
{
table->fifo_head = table->fifo_tail = entry_id;
}
else
{
table->entries[table->fifo_tail].next_in_fifo = entry_id;
table->fifo_tail = entry_id;
}
PROTECT(table);
if (table->free_entries == MAX_ENTRY_ID)
{
UNPROTECT(_eo_ids_tables[mid_table_id]);
TABLE_FROM_IDS = NULL;
PROTECT(_eo_ids_tables[mid_table_id]);
/* Recycle or free the empty table */
if (!_empty_table)
_empty_table = table;
UNPROTECT(table);
table->free_entries++;
// Disable the entry
entry->active = 0;
entry->next_in_fifo = -1;
// Push the entry into the fifo
if (table->fifo_tail == -1)
table->fifo_head = table->fifo_tail = entry_id;
else
_eo_id_mem_free(table);
if (_current_table == table)
_current_table = NULL;
{
table->entries[table->fifo_tail].next_in_fifo = entry_id;
table->fifo_tail = entry_id;
}
PROTECT(table);
if (table->free_entries == MAX_ENTRY_ID)
{
UNPROTECT(tdata->eo_ids_tables[mid_table_id]);
TABLE_FROM_IDS = NULL;
PROTECT(tdata->eo_ids_tables[mid_table_id]);
// Recycle or free the empty table
if (!tdata->empty_table) tdata->empty_table = table;
else _eo_id_mem_free(table);
if (tdata->current_table == table)
tdata->current_table = NULL;
}
// In case an object is destroyed, wipe out the cache
if (tdata->cache.id == obj_id)
{
tdata->cache.id = 0;
tdata->cache.object = NULL;
}
if ((Eo_Id)tdata->cache.isa_id == obj_id)
{
tdata->cache.isa_id = NULL;
tdata->cache.klass = NULL;;
tdata->cache.isa = EINA_FALSE;
}
return;
}
// In case an object is destroyed, wipe out the cache
if (cached_id == obj_id)
{
cached_id = 0;
cached_object = NULL;
}
cached_isa_id = NULL;
eina_spinlock_release(&_eoid_lock);
return;
}
}
eina_spinlock_release(&_eoid_lock);
else
{
eina_spinlock_take(&(tdata->lock));
// Check the validity of the entry
if (tdata->eo_ids_tables[mid_table_id] && (table = TABLE_FROM_IDS))
{
entry = &(table->entries[entry_id]);
if (entry && entry->active && (entry->generation == generation))
{
UNPROTECT(table);
table->free_entries++;
// Disable the entry
entry->active = 0;
entry->next_in_fifo = -1;
// Push the entry into the fifo
if (table->fifo_tail == -1)
table->fifo_head = table->fifo_tail = entry_id;
else
{
table->entries[table->fifo_tail].next_in_fifo = entry_id;
table->fifo_tail = entry_id;
}
PROTECT(table);
if (table->free_entries == MAX_ENTRY_ID)
{
UNPROTECT(tdata->eo_ids_tables[mid_table_id]);
TABLE_FROM_IDS = NULL;
PROTECT(tdata->eo_ids_tables[mid_table_id]);
// Recycle or free the empty table
if (!tdata->empty_table) tdata->empty_table = table;
else _eo_id_mem_free(table);
if (tdata->current_table == table)
tdata->current_table = NULL;
}
// In case an object is destroyed, wipe out the cache
if (tdata->cache.id == obj_id)
{
tdata->cache.id = 0;
tdata->cache.object = NULL;
}
if ((Eo_Id)tdata->cache.isa_id == obj_id)
{
tdata->cache.isa_id = NULL;
tdata->cache.klass = NULL;;
tdata->cache.isa = EINA_FALSE;
}
eina_spinlock_release(&(tdata->lock));
return;
}
}
eina_spinlock_release(&(tdata->lock));
}
ERR("obj_id %p is not pointing to a valid object. Maybe it has already been freed.", (void *)obj_id);
#else
EINA_MAGIC_SET((Eo_Header *) obj_id, EO_FREED_EINA_MAGIC);
@ -539,11 +689,15 @@ _eo_id_release(const Eo_Id obj_id)
}
static inline void
_eo_free_ids_tables(void)
_eo_free_ids_tables(Eo_Id_Data *data)
{
Eo_Id_Table_Data *tdata;
if (!data) return;
tdata = data->tables[data->local_domain];
for (Table_Index mid_table_id = 0; mid_table_id < MAX_MID_TABLE_ID; mid_table_id++)
{
if (_eo_ids_tables[mid_table_id])
if (tdata->eo_ids_tables[mid_table_id])
{
for (Table_Index table_id = 0; table_id < MAX_TABLE_ID; table_id++)
{
@ -552,23 +706,27 @@ _eo_free_ids_tables(void)
_eo_id_mem_free(TABLE_FROM_IDS);
}
}
_eo_id_mem_free(_eo_ids_tables[mid_table_id]);
_eo_id_mem_free(tdata->eo_ids_tables[mid_table_id]);
}
_eo_ids_tables[mid_table_id] = NULL;
tdata->eo_ids_tables[mid_table_id] = NULL;
}
if (_empty_table) _eo_id_mem_free(_empty_table);
_empty_table = _current_table = NULL;
if (tdata->empty_table) _eo_id_mem_free(tdata->empty_table);
tdata->empty_table = tdata->current_table = NULL;
_eo_table_data_table_free(tdata);
data->tables[data->local_domain] = NULL;
free(data);
}
#ifdef EFL_DEBUG
static inline void
_eo_print(void)
_eo_print(Eo_Id_Table_Data *tdata)
{
_Eo_Id_Entry *entry;
unsigned long obj_number = 0;
for (Table_Index mid_table_id = 0; mid_table_id < MAX_MID_TABLE_ID; mid_table_id++)
{
if (_eo_ids_tables[mid_table_id])
if (tdata->eo_ids_tables[mid_table_id])
{
for (Table_Index table_id = 0; table_id < MAX_TABLE_ID; table_id++)
{