aboutsummaryrefslogblamecommitdiffstats
path: root/src/lib/efl/interfaces/efl_io_queue.c
blob: a4dcf3216bae10c2ec06b198c311e7d74be38e8b (plain) (tree)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430













































































































































































































































































































































































































































                                                                                                         
#define EFL_IO_READER_PROTECTED 1
#define EFL_IO_WRITER_PROTECTED 1

#include "config.h"
#include "Efl.h"

#define MY_CLASS EFL_IO_QUEUE_CLASS

/*
 * This queue is simple and based on a single buffer that is
 * reallocated as needed up to some limit, keeping some pre-allocated
 * amount of bytes.
 *
 * Writes appends to the buffer. Reads consume and remove data from
 * buffer head.
 *
 * To avoid too much memmove(), reads won't immediately remove data,
 * instead will only increment position_read and allow some
 * slack. When the slack limit is reached or the buffer needs more
 * memory for write, then the memmove() happens.
 *
 * A more complex and possibly efficient version of this would be to
 * keep a list of internal buffers of fixed size. Writing would result
 * into segment and write into these chunks, creating new if
 * needed. Reading would consume from multiple chunks and if they're
 * all used, would be freed.
 */

typedef struct _Efl_Io_Queue_Data
{
   uint8_t *bytes;
   size_t allocated;
   size_t preallocated;
   size_t limit;
   size_t position_read; /* to avoid memmove(), allows some slack */
   size_t position_write;
   Eina_Bool pending_eos;
   Eina_Bool eos;
   Eina_Bool closed;
   Eina_Bool can_read;
   Eina_Bool can_write;
} Efl_Io_Queue_Data;

static Eina_Bool
_efl_io_queue_realloc(Eo *o, Efl_Io_Queue_Data *pd, size_t size)
{
   void *tmp;
   size_t limit = efl_io_queue_limit_get(o);

   if ((limit > 0) && (size > limit))
     size = limit;

   if (pd->allocated == size) return EINA_FALSE;

   if (size == 0)
     {
        free(pd->bytes);
        tmp = NULL;
     }
   else
     {
        tmp = realloc(pd->bytes, size);
        EINA_SAFETY_ON_NULL_RETURN_VAL(tmp, EINA_FALSE);
     }

   pd->bytes = tmp;
   pd->allocated = size;
   return EINA_TRUE;
}

static size_t
_efl_io_queue_slack_get(const Efl_Io_Queue_Data *pd)
{
   const size_t used = pd->position_write - pd->position_read;

   if (used >= 4096) return 4096;
   else if (used >= 1024) return 1024;
   else if (used >= 128) return 128;
   else return 32;
}

static Eina_Bool
_efl_io_queue_realloc_rounded(Eo *o, Efl_Io_Queue_Data *pd, size_t size)
{
   if ((size > 0) && (size < 128))
     size = ((size / 32) + 1) * 32;
   else if (size < 1024)
     size = ((size / 128) + 1) * 128;
   else if (size < 8192)
     size = ((size / 1024) + 1) * 1024;
   else
     size = ((size / 4096) + 1) * 4096;

   return _efl_io_queue_realloc(o, pd, size);
}

/* reset position_read to zero, allowing all memory for write */
static void
_efl_io_queue_adjust(Efl_Io_Queue_Data *pd)
{
   size_t used = pd->position_write - pd->position_read;
   memmove(pd->bytes, pd->bytes + pd->position_read, used);
   pd->position_write = used;
   pd->position_read = 0;
}

static void
_efl_io_queue_adjust_and_realloc_if_needed(Eo *o, Efl_Io_Queue_Data *pd)
{
   const size_t slack = _efl_io_queue_slack_get(pd);
   size_t spare;

   if (pd->limit > 0)
     {
        if (pd->position_write + slack >= pd->limit)
          _efl_io_queue_adjust(pd);
     }
   else if (pd->position_read > slack)
     _efl_io_queue_adjust(pd);

   spare = pd->allocated - pd->position_write;
   if (spare > slack)
     {
        size_t new_size = pd->position_write + slack;

        /*
         * this may result in going over slack again, no
         * problems with that.
         */
        if (new_size < pd->preallocated)
          new_size = pd->preallocated;

        /* use rounded so we avoid too many reallocs */
        _efl_io_queue_realloc_rounded(o, pd, new_size);
     }
}

static void
_efl_io_queue_update_cans(Eo *o, Efl_Io_Queue_Data *pd)
{
   size_t used = pd->position_write - pd->position_read;
   size_t limit;

   efl_io_reader_can_read_set(o, used > 0);

   limit = efl_io_queue_limit_get(o);
   if (pd->pending_eos)
     efl_io_writer_can_write_set(o, EINA_FALSE);
   else
     efl_io_writer_can_write_set(o, (limit == 0) || (used < limit));
}

EOLIAN static void
_efl_io_queue_preallocate(Eo *o, Efl_Io_Queue_Data *pd, size_t size)
{
   EINA_SAFETY_ON_TRUE_RETURN(efl_io_closer_closed_get(o));
   if (pd->allocated < size)
     _efl_io_queue_realloc_rounded(o, pd, size);
   pd->preallocated = size;
}

EOLIAN static void
_efl_io_queue_limit_set(Eo *o, Efl_Io_Queue_Data *pd, size_t limit)
{
   EINA_SAFETY_ON_TRUE_RETURN(efl_io_closer_closed_get(o));

   if (pd->limit == limit) return;
   pd->limit = limit;
   if (pd->limit == 0) goto end;

   _efl_io_queue_adjust(pd);

   if (pd->allocated > limit)
     _efl_io_queue_realloc(o, pd, limit);

   if (pd->position_write > limit)
     {
        pd->position_write = limit;
        if (pd->position_read > limit) pd->position_read = limit;
     }

   _efl_io_queue_adjust_and_realloc_if_needed(o, pd);
   efl_event_callback_call(o, EFL_IO_QUEUE_EVENT_SLICE_CHANGED, NULL);

 end:
   _efl_io_queue_update_cans(o, pd);
}

EOLIAN static size_t
_efl_io_queue_limit_get(Eo *o EINA_UNUSED, Efl_Io_Queue_Data *pd)
{
   return pd->limit;
}

EOLIAN static size_t
_efl_io_queue_usage_get(Eo *o EINA_UNUSED, Efl_Io_Queue_Data *pd)
{
   return pd->position_write - pd->position_read;
}

EOLIAN static Eina_Bool
_efl_io_queue_slice_get(Eo *o, Efl_Io_Queue_Data *pd, Eina_Slice *slice)
{
   if (slice)
     {
        slice->mem = pd->bytes + pd->position_read;
        slice->len = efl_io_queue_usage_get(o);
     }
   EINA_SAFETY_ON_TRUE_RETURN_VAL(efl_io_closer_closed_get(o), EINA_FALSE);
   return EINA_TRUE;
}

EOLIAN static void
_efl_io_queue_clear(Eo *o, Efl_Io_Queue_Data *pd)
{
   pd->position_read = 0;
   pd->position_write = 0;
   efl_io_reader_can_read_set(o, EINA_FALSE);
   efl_event_callback_call(o, EFL_IO_QUEUE_EVENT_SLICE_CHANGED, NULL);
   if (pd->pending_eos)
     efl_io_reader_eos_set(o, EINA_TRUE);
}

EOLIAN static void
_efl_io_queue_eos_mark(Eo *o, Efl_Io_Queue_Data *pd)
{
   if (pd->eos) return;

   if (efl_io_queue_usage_get(o) > 0)
     pd->pending_eos = EINA_TRUE;
   else
     efl_io_reader_eos_set(o, EINA_TRUE);
}

EOLIAN static Efl_Object *
_efl_io_queue_efl_object_finalize(Eo *o, Efl_Io_Queue_Data *pd EINA_UNUSED)
{
   o = efl_finalize(efl_super(o, MY_CLASS));
   if (!o) return NULL;

   _efl_io_queue_update_cans(o, pd);

   return o;
}

EOLIAN static void
_efl_io_queue_efl_object_destructor(Eo *o, Efl_Io_Queue_Data *pd)
{
   if (!efl_io_closer_closed_get(o))
     efl_io_closer_close(o);

   efl_destructor(efl_super(o, MY_CLASS));

   if (pd->bytes)
     {
        free(pd->bytes);
        pd->bytes = NULL;
        pd->allocated = 0;
        pd->position_read = 0;
        pd->position_write = 0;
     }
}

EOLIAN static Eina_Error
_efl_io_queue_efl_io_reader_read(Eo *o, Efl_Io_Queue_Data *pd, Eina_Rw_Slice *rw_slice)
{
   Eina_Slice ro_slice;
   size_t available;

   EINA_SAFETY_ON_NULL_RETURN_VAL(rw_slice, EINVAL);
   EINA_SAFETY_ON_TRUE_GOTO(efl_io_closer_closed_get(o), error);

   available = pd->position_write - pd->position_read;
   if (rw_slice->len > available)
     {
        rw_slice->len = available;
        if (rw_slice->len == 0)
          return EAGAIN;
     }

   ro_slice.len = rw_slice->len;
   ro_slice.mem = pd->bytes + pd->position_read;

   *rw_slice = eina_rw_slice_copy(*rw_slice, ro_slice);
   pd->position_read += ro_slice.len;

   efl_io_reader_can_read_set(o, pd->position_read < pd->position_write);
   efl_event_callback_call(o, EFL_IO_QUEUE_EVENT_SLICE_CHANGED, NULL);

   if ((pd->pending_eos) && (efl_io_queue_usage_get(o) == 0))
     efl_io_reader_eos_set(o, EINA_TRUE);

   return 0;

 error:
   rw_slice->len = 0;
   return EINVAL;
}

EOLIAN static Eina_Bool
_efl_io_queue_efl_io_reader_can_read_get(Eo *o EINA_UNUSED, Efl_Io_Queue_Data *pd)
{
   return pd->can_read;
}

EOLIAN static void
_efl_io_queue_efl_io_reader_can_read_set(Eo *o, Efl_Io_Queue_Data *pd, Eina_Bool can_read)
{
   EINA_SAFETY_ON_TRUE_RETURN(efl_io_closer_closed_get(o));
   if (pd->can_read == can_read) return;
   pd->can_read = can_read;
   efl_event_callback_call(o, EFL_IO_READER_EVENT_CAN_READ_CHANGED, NULL);
}

EOLIAN static Eina_Bool
_efl_io_queue_efl_io_reader_eos_get(Eo *o EINA_UNUSED, Efl_Io_Queue_Data *pd EINA_UNUSED)
{
   return pd->eos;
}

EOLIAN static void
_efl_io_queue_efl_io_reader_eos_set(Eo *o, Efl_Io_Queue_Data *pd EINA_UNUSED, Eina_Bool is_eos)
{
   EINA_SAFETY_ON_TRUE_RETURN(efl_io_closer_closed_get(o));
   if (pd->eos == is_eos) return;
   pd->eos = is_eos;
   if (is_eos)
     {
        pd->pending_eos = EINA_FALSE;
        efl_event_callback_call(o, EFL_IO_READER_EVENT_EOS, NULL);
     }
}

EOLIAN static Eina_Error
_efl_io_queue_efl_io_writer_write(Eo *o, Efl_Io_Queue_Data *pd, Eina_Slice *slice, Eina_Slice *remaining)
{
   size_t available_write, available_total, todo, limit;
   int err = EINVAL;

   EINA_SAFETY_ON_NULL_RETURN_VAL(slice, EINVAL);
   EINA_SAFETY_ON_TRUE_GOTO(efl_io_closer_closed_get(o), error);

   err = EBADF;
   EINA_SAFETY_ON_TRUE_GOTO(pd->pending_eos, error);

   available_write = pd->allocated - pd->position_write;
   available_total = available_write + pd->position_read;
   limit = efl_io_queue_limit_get(o);

   err = ENOSPC;
   if (available_write >= slice->len)
     {
        todo = slice->len;
     }
   else if (available_total >= slice->len)
     {
        _efl_io_queue_adjust(pd);
        todo = slice->len;
     }
   else if ((limit > 0) && (pd->allocated == limit)) goto error;
   else
     {
        _efl_io_queue_adjust(pd);
        _efl_io_queue_realloc_rounded(o, pd, pd->position_write + slice->len);
        if (pd->allocated >= pd->position_write + slice->len)
          todo = slice->len;
        else
          todo = pd->allocated - pd->position_write;

        if (todo == 0) goto error;
     }

   memcpy(pd->bytes + pd->position_write, slice->mem, todo);
   if (remaining)
     {
        remaining->len = slice->len - todo;
        if (remaining->len)
          remaining->mem = slice->bytes + todo;
        else
          remaining->mem = NULL;
     }
   slice->len = todo;

   pd->position_write += todo;

   _efl_io_queue_adjust_and_realloc_if_needed(o, pd);
   efl_event_callback_call(o, EFL_IO_QUEUE_EVENT_SLICE_CHANGED, NULL);
   _efl_io_queue_update_cans(o, pd);

   return 0;

 error:
   if (remaining) *remaining = *slice;
   slice->len = 0;
   return err;
}

EOLIAN static Eina_Bool
_efl_io_queue_efl_io_writer_can_write_get(Eo *o EINA_UNUSED, Efl_Io_Queue_Data *pd)
{
   return pd->can_write;
}

EOLIAN static void
_efl_io_queue_efl_io_writer_can_write_set(Eo *o, Efl_Io_Queue_Data *pd, Eina_Bool can_write)
{
   EINA_SAFETY_ON_TRUE_RETURN(efl_io_closer_closed_get(o));
   if (pd->can_write == can_write) return;
   pd->can_write = can_write;
   efl_event_callback_call(o, EFL_IO_WRITER_EVENT_CAN_WRITE_CHANGED, NULL);
}

EOLIAN static Eina_Error
_efl_io_queue_efl_io_closer_close(Eo *o, Efl_Io_Queue_Data *pd)
{
   EINA_SAFETY_ON_TRUE_RETURN_VAL(efl_io_closer_closed_get(o), EINVAL);
   efl_io_queue_eos_mark(o);
   efl_io_queue_clear(o);
   pd->closed = EINA_TRUE;
   efl_event_callback_call(o, EFL_IO_CLOSER_EVENT_CLOSED, NULL);
   return 0;
}

EOLIAN static Eina_Bool
_efl_io_queue_efl_io_closer_closed_get(Eo *o EINA_UNUSED, Efl_Io_Queue_Data *pd)
{
   return pd->closed;
}

#include "interfaces/efl_io_queue.eo.c"