summaryrefslogtreecommitdiff
path: root/src/lib/ecore_wl2
diff options
context:
space:
mode:
authorDerek Foreman <derek.foreman.samsung@gmail.com>2018-09-10 10:34:30 -0500
committerMarcel Hollerbach <mail@marcel-hollerbach.de>2018-09-11 09:54:49 +0200
commitb43033e39397d3c9126bfd95e51dba4f91ec54db (patch)
tree52ffcfd284d42bb1ddae341e25d251f23fa87b01 /src/lib/ecore_wl2
parent0d506bf346bf0815fe4ce2b1d7a6471187d1e4bf (diff)
ecore_wl2: Move surface module file to modules directory
It is now similar to how other modules are handled. Differential Revision: https://phab.enlightenment.org/D7016 Signed-off-by: Derek Foreman <derek.foreman.samsung@gmail.com>
Diffstat (limited to 'src/lib/ecore_wl2')
-rw-r--r--src/lib/ecore_wl2/ecore_wl2_surface_module_dmabuf.c269
1 files changed, 0 insertions, 269 deletions
diff --git a/src/lib/ecore_wl2/ecore_wl2_surface_module_dmabuf.c b/src/lib/ecore_wl2/ecore_wl2_surface_module_dmabuf.c
deleted file mode 100644
index 02e77f97c5..0000000000
--- a/src/lib/ecore_wl2/ecore_wl2_surface_module_dmabuf.c
+++ /dev/null
@@ -1,269 +0,0 @@
1#ifdef HAVE_CONFIG_H
2# include <config.h>
3#endif
4
5#include "Ecore_Wl2.h"
6
7#include <sys/types.h>
8#include <sys/stat.h>
9
10#include "linux-dmabuf-unstable-v1-client-protocol.h"
11
12#define MAX_BUFFERS 4
13#define QUEUE_TRIM_DURATION 100
14
15int ECORE_WL2_SURFACE_DMABUF = 0;
16
17typedef struct _Ecore_Wl2_Dmabuf_Private
18{
19 Ecore_Wl2_Buffer *current;
20 Eina_List *buffers;
21 int unused_duration;
22} Ecore_Wl2_Dmabuf_Private;
23
24static void *
25_evas_dmabuf_surface_setup(Ecore_Wl2_Window *win)
26{
27 Ecore_Wl2_Dmabuf_Private *priv;
28 Ecore_Wl2_Display *ewd;
29 Ecore_Wl2_Buffer_Type types = 0;
30
31 priv = calloc(1, sizeof(*priv));
32 if (!priv) return NULL;
33
34 ewd = ecore_wl2_window_display_get(win);
35 if (ecore_wl2_display_shm_get(ewd))
36 types |= ECORE_WL2_BUFFER_SHM;
37 if (ecore_wl2_display_dmabuf_get(ewd))
38 types |= ECORE_WL2_BUFFER_DMABUF;
39
40 if (!ecore_wl2_buffer_init(ewd, types))
41 {
42 free(priv);
43 return NULL;
44 }
45
46 return priv;
47}
48
49static void
50_evas_dmabuf_surface_reconfigure(Ecore_Wl2_Surface *s EINA_UNUSED, void *priv_data, int w, int h, uint32_t flags EINA_UNUSED, Eina_Bool alpha EINA_UNUSED)
51{
52 Ecore_Wl2_Dmabuf_Private *p;
53 Ecore_Wl2_Buffer *b;
54 Eina_List *l, *tmp;
55// Eina_Bool alpha_change;
56
57 p = priv_data;
58
59 if ((!w) || (!h)) return;
60// alpha_change = ecore_wl2_surface_alpha_get(s) != alpha;
61 EINA_LIST_FOREACH_SAFE(p->buffers, l, tmp, b)
62 {
63/* This would be nice, but requires a partial create to follow,
64 and that partial create is buffer type specific.
65
66 if (!alpha_change && ecore_wl2_buffer_fit(b, w, h))
67 continue;
68*/
69 ecore_wl2_buffer_destroy(b);
70 p->buffers = eina_list_remove_list(p->buffers, l);
71 }
72}
73
74static void *
75_evas_dmabuf_surface_data_get(Ecore_Wl2_Surface *s EINA_UNUSED, void *priv_data, int *w, int *h)
76{
77 Ecore_Wl2_Dmabuf_Private *p;
78 Ecore_Wl2_Buffer *b;
79 void *ptr;
80 int stride;
81
82 p = priv_data;
83
84 b = p->current;
85 if (!b) return NULL;
86
87 ptr = ecore_wl2_buffer_map(b, NULL, h, &stride);
88 if (!ptr) return NULL;
89
90 /* We return stride/bpp because it may not match the allocated
91 * width. evas will figure out the clipping
92 */
93 if (w) *w = stride / 4;
94
95 return ptr;
96}
97
98static Ecore_Wl2_Buffer *
99_evas_dmabuf_surface_wait(Ecore_Wl2_Surface *s, Ecore_Wl2_Dmabuf_Private *p)
100{
101 Ecore_Wl2_Buffer *b, *best = NULL;
102 Eina_List *l;
103 int best_age = -1;
104 int age;
105 int num_required = 1, num_allocated = 0;
106
107 EINA_LIST_FOREACH(p->buffers, l, b)
108 {
109 num_allocated++;
110 if (ecore_wl2_buffer_busy_get(b))
111 {
112 num_required++;
113 continue;
114 }
115 age = ecore_wl2_buffer_age_get(b);
116 if (age > best_age)
117 {
118 best = b;
119 best_age = age;
120 }
121 }
122
123 if (num_required < num_allocated)
124 p->unused_duration++;
125 else
126 p->unused_duration = 0;
127
128 /* If we've had unused buffers for longer than QUEUE_TRIM_DURATION, then
129 * destroy the oldest buffer (currently in best) and recursively call
130 * ourself to get the next oldest.
131 */
132 if (best && (p->unused_duration > QUEUE_TRIM_DURATION))
133 {
134 p->unused_duration = 0;
135 p->buffers = eina_list_remove(p->buffers, best);
136 ecore_wl2_buffer_destroy(best);
137 best = _evas_dmabuf_surface_wait(s, p);
138 }
139
140 if (!best && (eina_list_count(p->buffers) < MAX_BUFFERS))
141 {
142 best = ecore_wl2_surface_buffer_create(s);
143 /* Start at -1 so it's age is incremented to 0 for first draw */
144 ecore_wl2_buffer_age_set(best, -1);
145 p->buffers = eina_list_append(p->buffers, best);
146 }
147 return best;
148}
149
150static int
151_evas_dmabuf_surface_assign(Ecore_Wl2_Surface *s, void *priv_data)
152{
153 Ecore_Wl2_Dmabuf_Private *p;
154 Ecore_Wl2_Buffer *b;
155 Eina_List *l;
156
157 p = priv_data;
158 p->current = _evas_dmabuf_surface_wait(s, p);
159 if (!p->current)
160 {
161 /* Should be unreachable and will result in graphical
162 * anomalies - we should probably blow away all the
163 * existing buffers and start over if we actually
164 * see this happen...
165 */
166// WRN("No free DMAbuf buffers, dropping a frame");
167 EINA_LIST_FOREACH(p->buffers, l, b)
168 ecore_wl2_buffer_age_set(b, 0);
169 return 0;
170 }
171 EINA_LIST_FOREACH(p->buffers, l, b)
172 ecore_wl2_buffer_age_inc(b);
173
174 return ecore_wl2_buffer_age_get(p->current);
175}
176
177static void
178_evas_dmabuf_surface_post(Ecore_Wl2_Surface *s, void *priv_data, Eina_Rectangle *rects, unsigned int count)
179{
180 Ecore_Wl2_Dmabuf_Private *p;
181 Ecore_Wl2_Buffer *b;
182 Ecore_Wl2_Window *win;
183 struct wl_buffer *wlb;
184
185 p = priv_data;
186
187 b = p->current;
188 if (!b) return;
189
190 ecore_wl2_buffer_unlock(b);
191
192 p->current = NULL;
193 ecore_wl2_buffer_busy_set(b);
194 ecore_wl2_buffer_age_set(b, 0);
195
196 win = ecore_wl2_surface_window_get(s);
197
198 wlb = ecore_wl2_buffer_wl_buffer_get(b);
199 ecore_wl2_window_buffer_attach(win, wlb, 0, 0, EINA_FALSE);
200 ecore_wl2_window_damage(win, rects, count);
201
202 ecore_wl2_window_commit(win, EINA_TRUE);
203}
204
205static void
206_evas_dmabuf_surface_destroy(Ecore_Wl2_Surface *s EINA_UNUSED, void *priv_data)
207{
208 Ecore_Wl2_Dmabuf_Private *p;
209 Ecore_Wl2_Buffer *b;
210
211 p = priv_data;
212
213 EINA_LIST_FREE(p->buffers, b)
214 ecore_wl2_buffer_destroy(b);
215
216 free(p);
217}
218
219static void
220_evas_dmabuf_surface_flush(Ecore_Wl2_Surface *surface EINA_UNUSED, void *priv_data, Eina_Bool purge)
221{
222 Ecore_Wl2_Dmabuf_Private *p;
223 Ecore_Wl2_Buffer *b;
224
225 p = priv_data;
226
227 EINA_LIST_FREE(p->buffers, b)
228 {
229 if (purge || !ecore_wl2_buffer_busy_get(b))
230 {
231 if (p->current == b)
232 p->current = NULL;
233 ecore_wl2_buffer_destroy(b);
234 }
235 }
236}
237
238static Ecore_Wl2_Surface_Interface dmabuf_smanager =
239{
240 .version = 1,
241 .setup = _evas_dmabuf_surface_setup,
242 .destroy = _evas_dmabuf_surface_destroy,
243 .reconfigure = _evas_dmabuf_surface_reconfigure,
244 .data_get = _evas_dmabuf_surface_data_get,
245 .assign = _evas_dmabuf_surface_assign,
246 .post = _evas_dmabuf_surface_post,
247 .flush = _evas_dmabuf_surface_flush
248};
249
250Eina_Bool
251ecore_wl2_surface_module_dmabuf_init(void)
252{
253 ECORE_WL2_SURFACE_DMABUF = ecore_wl2_surface_manager_add(&dmabuf_smanager);
254
255 if (ECORE_WL2_SURFACE_DMABUF < 1)
256 return EINA_FALSE;
257
258 return EINA_TRUE;
259}
260
261void
262ecore_wl2_surface_module_dmabuf_shutdown(void)
263{
264 ecore_wl2_surface_manager_del(&dmabuf_smanager);
265}
266
267EINA_MODULE_INIT(ecore_wl2_surface_module_dmabuf_init);
268EINA_MODULE_SHUTDOWN(ecore_wl2_surface_module_dmabuf_shutdown);
269