evas: implement _op_blend_c_dp_neon in NEON intrinsics.
Reviewers: raster, cedric @feature Reviewed By: cedric Subscribers: jpeg, cedric Projects: #efl Differential Revision: https://phab.enlightenment.org/D2310 Signed-off-by: Cedric BAIL <cedric@osg.samsung.com>
This commit is contained in:
parent
71eec44ccc
commit
a30481d27b
|
@ -1,15 +1,95 @@
|
|||
#ifdef BUILD_NEON
|
||||
#ifdef BUILD_NEON_INTRINSICS
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
#endif
|
||||
/* blend color --> dst */
|
||||
|
||||
#ifdef BUILD_NEON
|
||||
static void
|
||||
_op_blend_c_dp_neon(DATA32 *s EINA_UNUSED, DATA8 *m EINA_UNUSED, DATA32 c, DATA32 *d, int l) {
|
||||
#ifdef BUILD_NEON_INTRINSICS
|
||||
DATA32 *e, a = 256 - (c >> 24);
|
||||
UNROLL8_PLD_WHILE(d, l, e,
|
||||
{
|
||||
*d = c + MUL_256(a, *d);
|
||||
d++;
|
||||
});
|
||||
uint16x8_t temp00_16x8;
|
||||
uint16x8_t temp01_16x8;
|
||||
uint16x8_t temp10_16x8;
|
||||
uint16x8_t temp11_16x8;
|
||||
uint32x4_t temp0_32x4;
|
||||
uint32x4_t temp1_32x4;
|
||||
uint32x4_t c_32x4;
|
||||
uint32x4_t d0_32x4;
|
||||
uint32x4_t d1_32x4;
|
||||
uint8x16_t d0_8x16;
|
||||
uint8x16_t d1_8x16;
|
||||
uint8x16_t temp0_8x16;
|
||||
uint8x16_t temp1_8x16;
|
||||
uint8x8_t alpha_8x8;
|
||||
uint8x8_t d00_8x8;
|
||||
uint8x8_t d01_8x8;
|
||||
uint8x8_t d10_8x8;
|
||||
uint8x8_t d11_8x8;
|
||||
uint8x8_t temp00_8x8;
|
||||
uint8x8_t temp01_8x8;
|
||||
uint8x8_t temp10_8x8;
|
||||
uint8x8_t temp11_8x8;
|
||||
|
||||
// alpha can only be 0 if color is 0x0. In that case we can just return.
|
||||
// Otherwise we can assume alpha != 0. This allows more optimization in
|
||||
// NEON code.
|
||||
|
||||
if(!c)
|
||||
return;
|
||||
|
||||
DATA32 *start = d;
|
||||
int size = l;
|
||||
DATA32 *end = start + (size & ~7);
|
||||
|
||||
unsigned char alpha;
|
||||
alpha = ~(c >> 24) + 1; // 256 - (c >> 24)
|
||||
alpha_8x8 = vdup_n_u8(alpha);
|
||||
|
||||
c_32x4 = vdupq_n_u32(c);
|
||||
|
||||
while (start < end)
|
||||
{
|
||||
d0_32x4 = vld1q_u32(start);
|
||||
d1_32x4 = vld1q_u32(start+4);
|
||||
d0_8x16 = vreinterpretq_u8_u32(d0_32x4);
|
||||
d1_8x16 = vreinterpretq_u8_u32(d1_32x4);
|
||||
|
||||
d00_8x8 = vget_low_u8(d0_8x16);
|
||||
d01_8x8 = vget_high_u8(d0_8x16);
|
||||
d10_8x8 = vget_low_u8(d1_8x16);
|
||||
d11_8x8 = vget_high_u8(d1_8x16);
|
||||
|
||||
temp00_16x8 = vmull_u8(alpha_8x8, d00_8x8);
|
||||
temp01_16x8 = vmull_u8(alpha_8x8, d01_8x8);
|
||||
temp10_16x8 = vmull_u8(alpha_8x8, d10_8x8);
|
||||
temp11_16x8 = vmull_u8(alpha_8x8, d11_8x8);
|
||||
|
||||
temp00_8x8 = vshrn_n_u16(temp00_16x8,8);
|
||||
temp01_8x8 = vshrn_n_u16(temp01_16x8,8);
|
||||
temp10_8x8 = vshrn_n_u16(temp10_16x8,8);
|
||||
temp11_8x8 = vshrn_n_u16(temp11_16x8,8);
|
||||
|
||||
temp0_8x16 = vcombine_u8(temp00_8x8, temp01_8x8);
|
||||
temp1_8x16 = vcombine_u8(temp10_8x8, temp11_8x8);
|
||||
|
||||
temp0_32x4 = vreinterpretq_u32_u8(temp0_8x16);
|
||||
temp1_32x4 = vreinterpretq_u32_u8(temp1_8x16);
|
||||
|
||||
d0_32x4 = vaddq_u32(c_32x4, temp0_32x4);
|
||||
d1_32x4 = vaddq_u32(c_32x4, temp1_32x4);
|
||||
|
||||
vst1q_u32(start, d0_32x4);
|
||||
vst1q_u32(start+4, d1_32x4);
|
||||
start+=8;
|
||||
}
|
||||
end += (size & 7);
|
||||
while (start < end)
|
||||
{
|
||||
*start = c + MUL_256(alpha, *start);
|
||||
start++;
|
||||
}
|
||||
#else
|
||||
DATA32 *e, *tmp = 0;
|
||||
#define AP "B_C_DP"
|
||||
|
|
Loading…
Reference in New Issue