#include "private.h" #include "grid_save.h" #include "grid.h" #include "lz4/lz4.h" #include #if defined (__MacOSX__) || (defined (__MACH__) && defined (__APPLE__)) # ifndef MAP_ANONYMOUS # define MAP_ANONYMOUS MAP_ANON # endif #elif _WIN32 # define MAP_ANONYMOUS 0 #endif #define MEM_ALLOC_ALIGN 16 #define MEM_BLOCKS 1024 #define GS_MMAP_SIZE 131072 #define GS_ALLOC_MASK (TS_MMAP_SIZE - 1) typedef struct _Alloc Alloc; struct _Alloc { unsigned int size, last, count, allocated; short slot; unsigned char gen; unsigned char __pad; }; /* local variables */ static int freeze = 0; static int comp = 0; static int uncomp = 0; static int freeops = 0; static int compfreeze = 0; static unsigned char cur_gen = 0; static Ecore_Idler *_save_idler = NULL; static Ecore_Timer *_save_timer = NULL; static Eina_List *_grids = NULL; static uint64_t _allocated = 0; static Alloc *alloc[MEM_BLOCKS] = { 0 }; static int _alloc_roundup_block_size(int sz) { return MEM_ALLOC_ALIGN * ((sz + MEM_ALLOC_ALIGN - 1) / MEM_ALLOC_ALIGN); } static Alloc * _alloc_find(void *mem) { unsigned char *memptr; int i; memptr = mem; for (i = 0; i < MEM_BLOCKS; i++) { unsigned char *al; al = (unsigned char *)alloc[i]; if (!al) continue; if (memptr < al) continue; if ((al + GS_MMAP_SIZE) <= memptr) continue; return alloc[i]; } return NULL; } static void * _alloc_new(int size, unsigned char gen) { Alloc *al; unsigned char *ptr; unsigned int newsize, sz; int i, firstnull = -1; // allocations sized up to nearest size alloc alignment newsize = _alloc_roundup_block_size(size); for (i = 0; i < MEM_BLOCKS; i++) { if (!alloc[i]) { if (firstnull < 0) firstnull = i; continue; } // if generation count matches if (alloc[i]->gen == gen) { // if there is space in the block if ((alloc[i]->size - alloc[i]->last) >= newsize) { ptr = (unsigned char *)alloc[i]; ptr += alloc[i]->last; alloc[i]->last += newsize; alloc[i]->count++; alloc[i]->allocated += newsize; _allocated += newsize; return ptr; } } } // out of slots for new blocks - no null blocks if (firstnull < 0) { ERR("Cannot find new null blocks"); return NULL; } // so allocate a new block sz = GS_MMAP_SIZE; // get mmaped anonymous memory so when freed it goes away from the system ptr = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (ptr == MAP_FAILED) { ERR("Cannot allocate more memory with mmap MAP_ANONYMOUS"); return NULL; } // note - we SHOULD memset to 0, but we are assuming mmap anon give 0 pages //memset(ptr, 0, newsize); al = (Alloc *)ptr; al->size = sz; al->last = sizeof(Alloc) + newsize; al->count = 1; al->allocated = newsize; al->slot = firstnull; al->gen = gen; _allocated += newsize; alloc[al->slot] = al; ptr = (unsigned char *)al; ptr += sizeof(Alloc); return ptr; } static void * _gs_new(int size) { void *ptr; if (!size) return NULL; ptr = _alloc_new(size, cur_gen); return ptr; } static void _gs_free(void *ptr) { Alloc *al; unsigned int sz; Grid_Save_Comp *gs; if (!ptr) return; gs = ptr; if (gs->comp) sz = sizeof(Grid_Save_Comp) + gs->w; else sz = sizeof(Grid_Save) + ((gs->w - 1) * sizeof(Grid_Cell)); sz = _alloc_roundup_block_size(sz); _allocated -= sz; al = _alloc_find(ptr); if (!al) { ERR("Cannot find %p in alloc blocks", ptr); return; } al->count--; al->allocated -= sz; if (al->count > 0) return; alloc[al->slot] = NULL; munmap(al, al->size); } static void _mem_gen_next(void) { cur_gen++; } static unsigned char _mem_gen_get(void) { return cur_gen; } static void _mem_defrag(void) { int i, j = 0; Alloc *alloc2[MEM_BLOCKS]; for (i = 0; i < MEM_BLOCKS; i++) { if (alloc[i]) { alloc2[j] = alloc[i]; alloc2[j]->slot = j; j++; } } // XXX: quicksort blocks with most space at start for (i = 0; i < j; i++) alloc[i] = alloc2[i]; for (; i < MEM_BLOCKS; i++) alloc[i] = NULL; } static Grid_Save * _grid_save_comp(Grid_Save *gs) { Grid_Save *gs2; Grid_Save_Comp *gsc; if (gs->comp) return gs; compfreeze++; if (!gs->z) { int bytes; char *buf; buf = alloca(LZ4_compressBound(gs->w * sizeof(Grid_Cell))); bytes = LZ4_compress((char *)(&(gs->cell[0])), buf, gs->w * sizeof(Grid_Cell)); gsc = _gs_new(sizeof(Grid_Save_Comp) + bytes); if (!gsc) { ERR("Big problem. Can't allocate backscroll compress buffer"); gs2 = gs; goto done; } gsc->comp = 1; gsc->z = 1; gsc->gen = _mem_gen_get(); gsc->w = bytes; gsc->wout = gs->w; memcpy(((char *)gsc) + sizeof(Grid_Save_Comp), buf, bytes); gs2 = (Grid_Save *)gsc; } else { gsc = (Grid_Save_Comp *)gs; gs2 = _gs_new(sizeof(Grid_Save_Comp) + gsc->w); if (!gs2) { ERR("Big problem. Can't allocate backscroll compress/copy buffer"); gs2 = gs; goto done; } memcpy(gs2, gs, sizeof(Grid_Save_Comp) + gsc->w); gs2->gen = _mem_gen_get(); gs2->comp = 1; } _grid_save_free(gs); done: compfreeze--; return gs2; } static void _grid_walk(Evas_Object *obj) { Grid *sd; int i = 0; if (!(sd = evas_object_smart_data_get(obj))) return; if (!sd->back) return; for (; i < sd->backmax; i++) { Grid_Save_Comp *gsc; gsc = (Grid_Save_Comp *)sd->back[i]; if (gsc) { sd->back[i] = _grid_save_comp(sd->back[i]); if (!sd->back[i]) continue; gsc = (Grid_Save_Comp *)sd->back[i]; if (gsc->comp) comp++; else uncomp++; } } } static Eina_Bool _cb_save_idler(void *data EINA_UNUSED) { Eina_List *l; Evas_Object *obj; _mem_gen_next(); comp = 0; uncomp = 0; EINA_LIST_FOREACH(_grids, l, obj) _grid_walk(obj); _mem_defrag(); freeops = 0; _mem_gen_next(); _save_idler = NULL; return EINA_FALSE; } static Eina_Bool _cb_save_timer(void *data EINA_UNUSED) { if (!_save_idler) _save_idler = ecore_idler_add(_cb_save_idler, NULL); _save_timer = NULL; return EINA_FALSE; } static inline void _grid_save_compressor_check(Eina_Bool frozen) { if (freeze) return; if (_save_idler) return; if ((uncomp > 256) || (freeops > 256)) { if ((_save_timer) && (!frozen)) ecore_timer_reset(_save_timer); else if (!_save_timer) _save_timer = ecore_timer_add(0.2, _cb_save_timer, NULL); } } void _grid_save_register(Evas_Object *obj) { _grid_save_freeze(); _grids = eina_list_append(_grids, obj); _grid_save_thaw(); } void _grid_save_unregister(Evas_Object *obj) { _grid_save_freeze(); _grids = eina_list_remove(_grids, obj); _grid_save_thaw(); } void _grid_save_freeze(void) { if (!freeze++) { if (_save_timer) ecore_timer_freeze(_save_timer); } if (_save_idler) ecore_idler_del(_save_idler); _save_idler = NULL; } void _grid_save_thaw(void) { freeze--; if (freeze <= 0) { if (_save_timer) ecore_timer_thaw(_save_timer); _grid_save_compressor_check(EINA_TRUE); } } Grid_Save * _grid_save_new(int w) { Grid_Save *gs; gs = _gs_new(sizeof(Grid_Save) + ((w - 1) * sizeof(Grid_Cell))); if (!gs) return NULL; gs->gen = _mem_gen_get(); gs->w = w; if (!compfreeze) uncomp++; _grid_save_compressor_check(EINA_FALSE); return gs; } void _grid_save_free(Grid_Save *gs) { if (!gs) return; if (!compfreeze) { if (gs->comp) comp--; else uncomp--; freeops++; } _gs_free(gs); _grid_save_compressor_check(EINA_FALSE); } Grid_Save * _grid_save_extract(Grid_Save *gs) { if (!gs) return NULL; if (gs->z) { Grid_Save_Comp *gsc; Grid_Save *gs2; char *buf; int bytes; gsc = (Grid_Save_Comp *)gs; gs2 = _gs_new(sizeof(Grid_Save) + ((gsc->wout - 1) * sizeof(Grid_Cell))); if (!gs2) return NULL; gs2->gen = _mem_gen_get(); gs2->w = gsc->wout; buf = ((char *)gsc) + sizeof(Grid_Save_Comp); bytes = LZ4_uncompress(buf, (char *)(&(gs2->cell[0])), gsc->wout * sizeof(Grid_Cell)); if (bytes < 0) { memset(&(gs2->cell[0]), 0, gsc->wout * sizeof(Grid_Cell)); } if (gs->comp) comp--; else uncomp--; uncomp++; freeops++; compfreeze++; _gs_free(gs); compfreeze--; _grid_save_compressor_check(EINA_FALSE); return gs2; } _grid_save_compressor_check(EINA_FALSE); return gs; }