evas: implement _op_blend_p_dp_neon and _op_blend_pas_dp_neon in NEON intrinsics.

Reviewers: raster, cedric

Reviewed By: cedric

Subscribers: cedric

Projects: #efl

Differential Revision: https://phab.enlightenment.org/D2311
This commit is contained in:
Yury Usishchev 2015-04-15 17:24:03 +02:00 committed by Cedric BAIL
parent a30481d27b
commit 9caa6a3597
1 changed files with 219 additions and 26 deletions

View File

@ -1,17 +1,121 @@
#ifdef BUILD_NEON
#ifdef BUILD_NEON_INTRINSICS
#include <arm_neon.h>
#endif
#endif
/* blend pixel --> dst */
#ifdef BUILD_NEON
static void
_op_blend_p_dp_neon(DATA32 *s, DATA8 *m, DATA32 c, DATA32 *d, int l) {
#ifdef BUILD_NEON_INTRINSICS
DATA32 *e;
int alpha;
UNROLL8_PLD_WHILE(d, l, e,
{
alpha = 256 - (*s >> 24);
*d = *s++ + MUL_256(alpha, *d);
d++;
});
uint16x8_t alpha00_16x8;
uint16x8_t alpha01_16x8;
uint16x8_t alpha10_16x8;
uint16x8_t alpha11_16x8;
uint16x8_t d00_16x8;
uint16x8_t d01_16x8;
uint16x8_t d10_16x8;
uint16x8_t d11_16x8;
uint32x4_t alpha0_32x4;
uint32x4_t alpha1_32x4;
uint32x4_t d0_32x4;
uint32x4_t d1_32x4;
uint32x4_t s0_32x4;
uint32x4_t s1_32x4;
uint32x4_t x1_32x4;
uint8x16_t alpha0_8x16;
uint8x16_t alpha1_8x16;
uint8x16_t d0_8x16;
uint8x16_t d1_8x16;
uint8x16_t s0_8x16;
uint8x16_t s1_8x16;
uint8x16_t x1_8x16;
uint8x16_t x255_8x16;
uint8x8_t alpha00_8x8;
uint8x8_t alpha01_8x8;
uint8x8_t alpha10_8x8;
uint8x8_t alpha11_8x8;
uint8x8_t d00_8x8;
uint8x8_t d01_8x8;
uint8x8_t d10_8x8;
uint8x8_t d11_8x8;
x1_8x16 = vdupq_n_u8(0x1);
x1_32x4 = vreinterpretq_u32_u8(x1_8x16);
x255_8x16 = vdupq_n_u8(0xff);
DATA32 *start = d;
int size = l;
DATA32 *end = start + (size & ~7);
while (start < end)
{
s0_32x4 = vld1q_u32(s);
s1_32x4 = vld1q_u32(s+4);
d0_32x4 = vld1q_u32(start);
d1_32x4 = vld1q_u32(start+4);
alpha0_32x4 = vshrq_n_u32(s0_32x4, 24);
alpha1_32x4 = vshrq_n_u32(s1_32x4, 24);
alpha0_32x4 = vmulq_u32(x1_32x4, alpha0_32x4);
alpha1_32x4 = vmulq_u32(x1_32x4, alpha1_32x4);
alpha0_8x16 = vreinterpretq_u8_u32(alpha0_32x4);
alpha1_8x16 = vreinterpretq_u8_u32(alpha1_32x4);
alpha0_8x16 = vsubq_u8(x255_8x16, alpha0_8x16);
alpha1_8x16 = vsubq_u8(x255_8x16, alpha1_8x16);
alpha10_8x8 = vget_low_u8(alpha1_8x16);
alpha11_8x8 = vget_high_u8(alpha1_8x16);
alpha00_8x8 = vget_low_u8(alpha0_8x16);
alpha01_8x8 = vget_high_u8(alpha0_8x16);
d0_8x16 = vreinterpretq_u8_u32(d0_32x4);
d1_8x16 = vreinterpretq_u8_u32(d1_32x4);
d00_8x8 = vget_low_u8(d0_8x16);
d01_8x8 = vget_high_u8(d0_8x16);
d10_8x8 = vget_low_u8(d1_8x16);
d11_8x8 = vget_high_u8(d1_8x16);
alpha00_16x8 = vmull_u8(alpha00_8x8, d00_8x8);
alpha01_16x8 = vmull_u8(alpha01_8x8, d01_8x8);
alpha10_16x8 = vmull_u8(alpha10_8x8, d10_8x8);
alpha11_16x8 = vmull_u8(alpha11_8x8, d11_8x8);
d00_16x8 = vmovl_u8(d00_8x8);
d01_16x8 = vmovl_u8(d01_8x8);
d10_16x8 = vmovl_u8(d10_8x8);
d11_16x8 = vmovl_u8(d11_8x8);
alpha00_16x8 = vaddq_u16(alpha00_16x8, d00_16x8);
alpha01_16x8 = vaddq_u16(alpha01_16x8, d01_16x8);
alpha10_16x8 = vaddq_u16(alpha10_16x8, d10_16x8);
alpha11_16x8 = vaddq_u16(alpha11_16x8, d11_16x8);
alpha00_8x8 = vshrn_n_u16(alpha00_16x8,8);
alpha01_8x8 = vshrn_n_u16(alpha01_16x8,8);
alpha10_8x8 = vshrn_n_u16(alpha10_16x8,8);
alpha11_8x8 = vshrn_n_u16(alpha11_16x8,8);
alpha0_8x16 = vcombine_u8(alpha00_8x8, alpha01_8x8);
alpha1_8x16 = vcombine_u8(alpha10_8x8, alpha11_8x8);
s0_8x16 = vreinterpretq_u8_u32(s0_32x4);
s1_8x16 = vreinterpretq_u8_u32(s1_32x4);
d0_8x16 = vaddq_u8(s0_8x16, alpha0_8x16);
d1_8x16 = vaddq_u8(s1_8x16, alpha1_8x16);
d0_32x4 = vreinterpretq_u32_u8(d0_8x16);
d1_32x4 = vreinterpretq_u32_u8(d1_8x16);
vst1q_u32(start, d0_32x4);
vst1q_u32(start+4, d1_32x4);
s+=8;
start+=8;
}
end += (size & 7);
while (start < end)
{
int alpha;
alpha = 256 - (*s >> 24);
*start = *s++ + MUL_256(alpha, *start);
start++;
}
#else
#define AP "blend_p_dp_"
asm volatile (
@ -254,24 +358,113 @@ _op_blend_p_dp_neon(DATA32 *s, DATA8 *m, DATA32 c, DATA32 *d, int l) {
static void
_op_blend_pas_dp_neon(DATA32 *s, DATA8 *m, DATA32 c, DATA32 *d, int l) {
#ifdef BUILD_NEON_INTRINSICS
DATA32 *e;
int alpha;
UNROLL8_PLD_WHILE(d, l, e,
{
switch (*s & 0xff000000)
{
case 0:
break;
case 0xff000000:
*d = *s;
break;
default:
alpha = 256 - (*s >> 24);
*d = *s + MUL_256(alpha, *d);
break;
}
s++; d++;
});
uint16x8_t alpha00_16x8;
uint16x8_t alpha01_16x8;
uint16x8_t alpha10_16x8;
uint16x8_t alpha11_16x8;
uint16x8_t d00_16x8;
uint16x8_t d01_16x8;
uint16x8_t d10_16x8;
uint16x8_t d11_16x8;
uint32x4_t alpha0_32x4;
uint32x4_t alpha1_32x4;
uint32x4_t d0_32x4;
uint32x4_t d1_32x4;
uint32x4_t s0_32x4;
uint32x4_t s1_32x4;
uint32x4_t x1_32x4;
uint8x16_t alpha0_8x16;
uint8x16_t alpha1_8x16;
uint8x16_t d0_8x16;
uint8x16_t d1_8x16;
uint8x16_t s0_8x16;
uint8x16_t s1_8x16;
uint8x16_t x1_8x16;
uint8x16_t x255_8x16;
uint8x8_t alpha00_8x8;
uint8x8_t alpha01_8x8;
uint8x8_t alpha10_8x8;
uint8x8_t alpha11_8x8;
uint8x8_t d00_8x8;
uint8x8_t d01_8x8;
uint8x8_t d10_8x8;
uint8x8_t d11_8x8;
x1_8x16 = vdupq_n_u8(0x1);
x1_32x4 = vreinterpretq_u32_u8(x1_8x16);
x255_8x16 = vdupq_n_u8(0xff);
DATA32 *start = d;
int size = l;
DATA32 *end = start + (size & ~7);
while (start < end)
{
s0_32x4 = vld1q_u32(s);
s1_32x4 = vld1q_u32(s+4);
d0_32x4 = vld1q_u32(start);
d1_32x4 = vld1q_u32(start+4);
alpha0_32x4 = vshrq_n_u32(s0_32x4, 24);
alpha1_32x4 = vshrq_n_u32(s1_32x4, 24);
alpha0_32x4 = vmulq_u32(x1_32x4, alpha0_32x4);
alpha1_32x4 = vmulq_u32(x1_32x4, alpha1_32x4);
alpha0_8x16 = vreinterpretq_u8_u32(alpha0_32x4);
alpha1_8x16 = vreinterpretq_u8_u32(alpha1_32x4);
alpha0_8x16 = vsubq_u8(x255_8x16, alpha0_8x16);
alpha1_8x16 = vsubq_u8(x255_8x16, alpha1_8x16);
alpha10_8x8 = vget_low_u8(alpha1_8x16);
alpha11_8x8 = vget_high_u8(alpha1_8x16);
alpha00_8x8 = vget_low_u8(alpha0_8x16);
alpha01_8x8 = vget_high_u8(alpha0_8x16);
d0_8x16 = vreinterpretq_u8_u32(d0_32x4);
d1_8x16 = vreinterpretq_u8_u32(d1_32x4);
d00_8x8 = vget_low_u8(d0_8x16);
d01_8x8 = vget_high_u8(d0_8x16);
d10_8x8 = vget_low_u8(d1_8x16);
d11_8x8 = vget_high_u8(d1_8x16);
alpha00_16x8 = vmull_u8(alpha00_8x8, d00_8x8);
alpha01_16x8 = vmull_u8(alpha01_8x8, d01_8x8);
alpha10_16x8 = vmull_u8(alpha10_8x8, d10_8x8);
alpha11_16x8 = vmull_u8(alpha11_8x8, d11_8x8);
d00_16x8 = vmovl_u8(d00_8x8);
d01_16x8 = vmovl_u8(d01_8x8);
d10_16x8 = vmovl_u8(d10_8x8);
d11_16x8 = vmovl_u8(d11_8x8);
alpha00_16x8 = vaddq_u16(alpha00_16x8, d00_16x8);
alpha01_16x8 = vaddq_u16(alpha01_16x8, d01_16x8);
alpha10_16x8 = vaddq_u16(alpha10_16x8, d10_16x8);
alpha11_16x8 = vaddq_u16(alpha11_16x8, d11_16x8);
alpha00_8x8 = vshrn_n_u16(alpha00_16x8,8);
alpha01_8x8 = vshrn_n_u16(alpha01_16x8,8);
alpha10_8x8 = vshrn_n_u16(alpha10_16x8,8);
alpha11_8x8 = vshrn_n_u16(alpha11_16x8,8);
alpha0_8x16 = vcombine_u8(alpha00_8x8, alpha01_8x8);
alpha1_8x16 = vcombine_u8(alpha10_8x8, alpha11_8x8);
s0_8x16 = vreinterpretq_u8_u32(s0_32x4);
s1_8x16 = vreinterpretq_u8_u32(s1_32x4);
d0_8x16 = vaddq_u8(s0_8x16, alpha0_8x16);
d1_8x16 = vaddq_u8(s1_8x16, alpha1_8x16);
d0_32x4 = vreinterpretq_u32_u8(d0_8x16);
d1_32x4 = vreinterpretq_u32_u8(d1_8x16);
vst1q_u32(start, d0_32x4);
vst1q_u32(start+4, d1_32x4);
s+=8;
start+=8;
}
end += (size & 7);
while (start < end)
{
int alpha;
alpha = 256 - (*s >> 24);
*start = *s++ + MUL_256(alpha, *start);
start++;
}
#else
#define AP "blend_pas_dp_"
DATA32 *e = d + l,*tmp = e + 32,*pl=(void*)912;