summaryrefslogtreecommitdiff
path: root/src/lib/evas/common
diff options
context:
space:
mode:
authorYury Usishchev <y.usishchev@samsung.com>2015-04-16 19:25:29 +0200
committerCedric BAIL <cedric@osg.samsung.com>2015-05-07 09:53:09 +0200
commitbe7c7c2c77c7b61f569532be7abb07858490bae6 (patch)
tree92507059d2a2d6bf135bc54f2a76bba297a1b5c1 /src/lib/evas/common
parenta0d0c9883995e0e04979f5382fc8954941b19edc (diff)
evas: implement _op_blend_p_c_dp_neon in NEON intrinsics.
Reviewers: cedric, raster Projects: #efl Differential Revision: https://phab.enlightenment.org/D2366 Signed-off-by: Cedric BAIL <cedric@osg.samsung.com>
Diffstat (limited to 'src/lib/evas/common')
-rw-r--r--src/lib/evas/common/evas_op_blend/op_blend_pixel_color_neon.c116
1 files changed, 106 insertions, 10 deletions
diff --git a/src/lib/evas/common/evas_op_blend/op_blend_pixel_color_neon.c b/src/lib/evas/common/evas_op_blend/op_blend_pixel_color_neon.c
index c47ec7c..b1bfc25 100644
--- a/src/lib/evas/common/evas_op_blend/op_blend_pixel_color_neon.c
+++ b/src/lib/evas/common/evas_op_blend/op_blend_pixel_color_neon.c
@@ -1,3 +1,8 @@
1#ifdef BUILD_NEON
2#ifdef BUILD_NEON_INTRINSICS
3#include <arm_neon.h>
4#endif
5#endif
1/* blend pixel x color --> dst */ 6/* blend pixel x color --> dst */
2#ifdef BUILD_NEON 7#ifdef BUILD_NEON
3 8
@@ -8,16 +13,107 @@
8static void 13static void
9_op_blend_p_c_dp_neon(DATA32 * __restrict s, DATA8 *m EINA_UNUSED, DATA32 c, DATA32 * __restrict d, int l) { 14_op_blend_p_c_dp_neon(DATA32 * __restrict s, DATA8 *m EINA_UNUSED, DATA32 c, DATA32 * __restrict d, int l) {
10#ifdef BUILD_NEON_INTRINSICS 15#ifdef BUILD_NEON_INTRINSICS
11 DATA32 *e; 16 uint16x8_t ad0_16x8;
12 int alpha; 17 uint16x8_t ad1_16x8;
13 UNROLL8_PLD_WHILE(d, l, e, 18 uint16x8_t sc0_16x8;
14 { 19 uint16x8_t sc1_16x8;
15 DATA32 sc = MUL4_SYM(c, *s); 20 uint16x8_t x255_16x8;
16 alpha = 256 - (sc >> 24); 21 uint32x2_t c_32x2;
17 *d = sc + MUL_256(alpha, *d); 22 uint32x4_t ad_32x4;
18 d++; 23 uint32x4_t alpha_32x4;
19 s++; 24 uint32x4_t cond_32x4;
20 }); 25 uint32x4_t d_32x4;
26 uint32x4_t s_32x4;
27 uint32x4_t sc_32x4;
28 uint32x4_t x0_32x4;
29 uint32x4_t x1_32x4;
30 uint8x16_t ad_8x16;
31 uint8x16_t alpha_8x16;
32 uint8x16_t d_8x16;
33 uint8x16_t s_8x16;
34 uint8x16_t sc_8x16;
35 uint8x16_t x0_8x16;
36 uint8x16_t x1_8x16;
37 uint8x8_t ad0_8x8;
38 uint8x8_t ad1_8x8;
39 uint8x8_t alpha0_8x8;
40 uint8x8_t alpha1_8x8;
41 uint8x8_t c_8x8;
42 uint8x8_t d0_8x8;
43 uint8x8_t d1_8x8;
44 uint8x8_t s0_8x8;
45 uint8x8_t s1_8x8;
46 uint8x8_t sc0_8x8;
47 uint8x8_t sc1_8x8;
48
49 c_32x2 = vdup_n_u32(c);
50 c_8x8 = vreinterpret_u8_u32(c_32x2);
51 x255_16x8 = vdupq_n_u16(0xff);
52 x0_8x16 = vdupq_n_u8(0x0);
53 x0_32x4 = vreinterpretq_u32_u8(x0_8x16);
54 x1_8x16 = vdupq_n_u8(0x1);
55 x1_32x4 = vreinterpretq_u32_u8(x1_8x16);
56 DATA32 *start = d;
57 int size = l;
58 DATA32 *end = start + (size & ~3);
59 while (start < end)
60 {
61
62 s_32x4 = vld1q_u32(s);
63 s_8x16 = vreinterpretq_u8_u32(s_32x4);
64
65 d_32x4 = vld1q_u32(start);
66 d_8x16 = vreinterpretq_u8_u32(d_32x4);
67 d0_8x8 = vget_low_u8(d_8x16);
68 d1_8x8 = vget_high_u8(d_8x16);
69
70 s0_8x8 = vget_low_u8(s_8x16);
71 s1_8x8 = vget_high_u8(s_8x16);
72
73 sc0_16x8 = vmull_u8(s0_8x8, c_8x8);
74 sc1_16x8 = vmull_u8(s1_8x8, c_8x8);
75 sc0_16x8 = vaddq_u16(sc0_16x8, x255_16x8);
76 sc1_16x8 = vaddq_u16(sc1_16x8, x255_16x8);
77 sc0_8x8 = vshrn_n_u16(sc0_16x8, 8);
78 sc1_8x8 = vshrn_n_u16(sc1_16x8, 8);
79 sc_8x16 = vcombine_u8(sc0_8x8, sc1_8x8);
80
81 alpha_32x4 = vreinterpretq_u32_u8(sc_8x16);
82 alpha_32x4 = vshrq_n_u32(alpha_32x4, 24);
83 alpha_32x4 = vmulq_u32(x1_32x4, alpha_32x4);
84 alpha_8x16 = vreinterpretq_u8_u32(alpha_32x4);
85 alpha_8x16 = vsubq_u8(x0_8x16, alpha_8x16);
86 alpha0_8x8 = vget_low_u8(alpha_8x16);
87 alpha1_8x8 = vget_high_u8(alpha_8x16);
88
89 ad0_16x8 = vmull_u8(alpha0_8x8, d0_8x8);
90 ad1_16x8 = vmull_u8(alpha1_8x8, d1_8x8);
91 ad0_8x8 = vshrn_n_u16(ad0_16x8,8);
92 ad1_8x8 = vshrn_n_u16(ad1_16x8,8);
93 ad_8x16 = vcombine_u8(ad0_8x8, ad1_8x8);
94 ad_32x4 = vreinterpretq_u32_u8(ad_8x16);
95
96 alpha_32x4 = vreinterpretq_u32_u8(alpha_8x16);
97 cond_32x4 = vceqq_u32(alpha_32x4, x0_32x4);
98 ad_32x4 = vbslq_u32(cond_32x4, d_32x4 , ad_32x4);
99
100 sc_32x4 = vreinterpretq_u32_u8(sc_8x16);
101 d_32x4 = vaddq_u32(sc_32x4, ad_32x4);
102
103 vst1q_u32(start, d_32x4);
104
105 s+=4;
106 start+=4;
107 }
108 end += (size & 3);
109 while (start < end)
110 {
111 DATA32 sc = MUL4_SYM(c, *s);
112 DATA32 alpha = 256 - (sc >> 24);
113 *start = sc + MUL_256(alpha, *start);
114 start++;
115 s++;
116 }
21#else 117#else
22#define AP "blend_p_c_dp_" 118#define AP "blend_p_c_dp_"
23 asm volatile ( 119 asm volatile (