Use NEON intrinsics for mapping instead of inline asm

Summary: Rewrite linline assembly in mapping func using NEON intrinsics.

Reviewers: raster

Differential Revision: https://phab.enlightenment.org/D1740
This commit is contained in:
Carsten Haitzler 2014-12-17 15:28:50 +09:00
parent c280e2f711
commit afb7315722
2 changed files with 219 additions and 112 deletions

View File

@ -4,6 +4,9 @@
#ifdef EVAS_CSERVE2 #ifdef EVAS_CSERVE2
#include "evas_cs2_private.h" #include "evas_cs2_private.h"
#endif #endif
#ifdef BUILD_NEON
#include <arm_neon.h>
#endif
#ifdef BUILD_MMX #ifdef BUILD_MMX
# undef SCALE_USING_MMX # undef SCALE_USING_MMX

View File

@ -9,24 +9,63 @@
# endif //SCALE_USING_MMX # endif //SCALE_USING_MMX
# ifdef SCALE_USING_NEON # ifdef SCALE_USING_NEON
FPU_NEON; # ifndef COLBLACK
VMOV_I2R_NEON(q2, #255); uint16x4_t temp_16x4;
# ifdef COLMUL uint16x4_t rv_16x4;
# ifndef COLBLACK uint16x4_t val1_16x4;
// this part can be done here as c1 and c2 are constants in the cycle uint16x4_t val3_16x4;
FPU_NEON; uint16x8_t ru_16x8;
VMOV_M2R_NEON(d18, c1); uint16x8_t val1_val3_16x8;
VEOR_NEON(q8); uint16x8_t val2_val4_16x8;
# ifndef COLSAME uint16x8_t x255_16x8;
VMOV_M2R_NEON(d19, c2); uint32x2_t res_32x2;
uint32x2_t val1_val3_32x2;
uint32x2_t val2_val4_32x2;
uint8x8_t val1_val3_8x8;
uint8x8_t val2_val4_8x8;
x255_16x8 = vdupq_n_u16(0xff);
# ifdef COLMUL
uint16x4_t x255_16x4;
x255_16x4 = vget_low_u16(x255_16x8);
uint16x4_t c1_16x4;
# ifdef COLSAME
uint16x4_t c1_val3_16x4;
uint16x8_t c1_16x8;
uint16x8_t c1_val3_16x8;
uint32x2_t c1_32x2;
uint8x8_t c1_8x8;
uint8x8_t c1_val3_8x8;
c1_32x2 = vset_lane_u32(c1, c1_32x2, 0);
c1_8x8 = vreinterpret_u8_u32(c1_32x2);
c1_16x8 = vmovl_u8(c1_8x8);
c1_16x4 = vget_low_u16(c1_16x8);
# else //COLSAME
uint16x4_t c2_16x4;
uint16x4_t c2_local_16x4;
uint16x4_t cv_16x4;
uint16x8_t c1_c2_16x8;
uint16x8_t c1_val1_16x8;
uint16x8_t c2_val3_16x8;
uint16x8_t cv_rv_16x8;
uint32x2_t c1_c2_32x2;
uint8x8_t c1_c2_8x8;
uint8x8_t val3_8x8;
uint16x8_t val3_16x8;
c1_c2_32x2 = vset_lane_u32(c1, c1_c2_32x2, 0);
c1_c2_32x2 = vset_lane_u32(c2, c1_c2_32x2, 1);
c1_c2_8x8 = vreinterpret_u8_u32(c1_c2_32x2);
c1_c2_16x8 = vmovl_u8(c1_c2_8x8);
c1_16x4 = vget_low_u16(c1_c2_16x8);
c2_16x4 = vget_high_u16(c1_c2_16x8);
# endif //COLSAME # endif //COLSAME
VZIP_NEON(q9, q8); # else //COLMUL
# ifndef COLSAME uint8x8_t val3_8x8;
VMOV_R2R_NEON(d19, d16); uint16x8_t val3_16x8;
# endif //COLSAME # endif //COLMUL
// here we have c1 and c2 spread through q9 register # endif //COLBLACK
# endif //COLBLACK
# endif //COLMUL
# endif //SCALE_USING_NEON # endif //SCALE_USING_NEON
while (ww > 0) while (ww > 0)
@ -99,54 +138,83 @@
# endif //COLMUL # endif //COLMUL
MOV_R2P(mm1, *d, mm0); MOV_R2P(mm1, *d, mm0);
# elif defined SCALE_USING_NEON # elif defined SCALE_USING_NEON
// not sure if we need this condition, but it doesn't affect the result
if (val1 | val2 | val3 | val4) if (val1 | val2 | val3 | val4)
{ {
FPU_NEON; rv_16x4 = vdup_n_u16(rv);
# ifdef COLMUL ru_16x8 = vdupq_n_u16(ru);
// initialize alpha for interpolation of c1 and c2
VDUP_NEON(d15, cv >> 16); val1_val3_32x2 = vset_lane_u32(val1, val1_val3_32x2, 0);
// copy c1 and c2 as algorithm will overwrite it val1_val3_32x2 = vset_lane_u32(val3, val1_val3_32x2, 1);
VMOV_R2R_NEON(q6, q9); val2_val4_32x2 = vset_lane_u32(val2, val2_val4_32x2, 0);
cv += cd; // col val2_val4_32x2 = vset_lane_u32(val4, val2_val4_32x2, 1);
# endif //COLMUL
VMOV_M2R_NEON(d8, val1); val1_val3_8x8 = vreinterpret_u8_u32(val1_val3_32x2);
VEOR_NEON(q0); val2_val4_8x8 = vreinterpret_u8_u32(val2_val4_32x2);
VMOV_M2R_NEON(d9, val3);
VMOV_M2R_NEON(d10, val2); val2_val4_16x8 = vmovl_u8(val2_val4_8x8);
VEOR_NEON(q1); val1_val3_16x8 = vmovl_u8(val1_val3_8x8);
VMOV_M2R_NEON(d11, val4);
VDUP_NEON(q3, ru); val2_val4_16x8 = vsubq_u16(val2_val4_16x8, val1_val3_16x8);
VDUP_NEON(d14, rv); val2_val4_16x8 = vmulq_u16(val2_val4_16x8, ru_16x8);
VZIP_NEON(q4, q0); val2_val4_16x8 = vshrq_n_u16(val2_val4_16x8, 8);
VZIP_NEON(q5, q1); val2_val4_16x8 = vaddq_u16(val2_val4_16x8, val1_val3_16x8);
VMOV_R2R_NEON(d9, d0); val2_val4_16x8 = vandq_u16(val2_val4_16x8, x255_16x8);
VMOV_R2R_NEON(d11, d2);
// by this point we have all required data in right registers val1_16x4 = vget_low_u16(val2_val4_16x8);
// interpolate val1,val2 and val3,val4 val3_16x4 = vget_high_u16(val2_val4_16x8);
INTERP_256_NEON(q3, q5, q4, q2);
# ifdef COLMUL # ifdef COLMUL
# ifdef COLSAME # ifdef COLSAME
INTERP_256_NEON(d14, d9, d8, d4);
val3_16x4 = vsub_u16(val3_16x4, val1_16x4);
val3_16x4 = vmul_u16(val3_16x4, rv_16x4);
val3_16x4 = vshr_n_u16(val3_16x4, 8);
val3_16x4 = vadd_u16(val3_16x4, val1_16x4);
val3_16x4 = vand_u16(val3_16x4, x255_16x4);
c1_val3_16x4 = vmul_u16(c1_16x4, val3_16x4);
c1_val3_16x4 = vadd_u16(c1_val3_16x4, x255_16x4);
c1_val3_16x8 = vcombine_u16(c1_val3_16x4, temp_16x4);
c1_val3_8x8 = vshrn_n_u16(c1_val3_16x8, 8);
res_32x2 = vreinterpret_u32_u8(c1_val3_8x8);
# else //COLSAME # else //COLSAME
/* move result of val3,val4 interpolation (and c1 if COLMUL is c1_val1_16x8 = vcombine_u16(c1_16x4, val1_16x4);
defined) for next step */ c2_val3_16x8 = vcombine_u16(c2_16x4, val3_16x4);
VSWP_NEON(d9, d12);
/* second stage of interpolation, also here c1 and c2 are cv_16x4 = vdup_n_u16(cv>>16);
interpolated */ cv += cd;
INTERP_256_NEON(q7, q6, q4, q2); cv_rv_16x8 = vcombine_u16(cv_16x4, rv_16x4);
c2_val3_16x8 = vsubq_u16(c2_val3_16x8, c1_val1_16x8);
c2_val3_16x8 = vmulq_u16(c2_val3_16x8, cv_rv_16x8);
c2_val3_16x8 = vshrq_n_u16(c2_val3_16x8, 8);
c2_val3_16x8 = vaddq_u16(c2_val3_16x8, c1_val1_16x8);
c2_val3_16x8 = vandq_u16(c2_val3_16x8, x255_16x8);
c2_local_16x4 = vget_low_u16(c2_val3_16x8);
val3_16x4 = vget_high_u16(c2_val3_16x8);
val3_16x4 = vmul_u16(c2_local_16x4, val3_16x4);
val3_16x4 = vadd_u16(val3_16x4, x255_16x4);
val3_16x8 = vcombine_u16(val3_16x4, temp_16x4);
val3_8x8 = vshrn_n_u16(val3_16x8, 8);
res_32x2 = vreinterpret_u32_u8(val3_8x8);
# endif //COLSAME # endif //COLSAME
# else //COLMUL # else //COLMUL
INTERP_256_NEON(d14, d9, d8, d4); val3_16x4 = vsub_u16(val3_16x4, val1_16x4);
val3_16x4 = vmul_u16(val3_16x4, rv_16x4);
val3_16x4 = vshr_n_u16(val3_16x4, 8);
val3_16x4 = vadd_u16(val3_16x4, val1_16x4);
val3_16x8 = vcombine_u16(val3_16x4, temp_16x4);
val3_8x8 = vmovn_u16(val3_16x8);
res_32x2 = vreinterpret_u32_u8(val3_8x8);
# endif //COLMUL # endif //COLMUL
# ifdef COLMUL vst1_lane_u32(d, res_32x2, 0);
# ifdef COLSAME
MUL4_SYM_NEON(d8, d12, d4);
# else //COLSAME
MUL4_SYM_NEON(d8, d9, d4); // do required multiplication
# endif //COLSAME
# endif //COLMUL
VMOV_R2M_NEON(q4, d8, d); // save result to d
} }
else else
*d = val1; *d = val1;
@ -177,79 +245,115 @@
#else //SMOOTH #else //SMOOTH
{ {
# ifdef SCALE_USING_NEON # ifdef SCALE_USING_NEON
# ifdef COLMUL # ifndef COLBLACK
# ifndef COLBLACK # ifdef COLMUL
uint16x4_t x255_16x4;
uint16x4_t temp_16x4;
uint16x8_t cval_16x8;
uint32x2_t res_32x2;
uint8x8_t cval_8x8;
uint16x4_t c1_16x4;
uint16x4_t cval_16x4;
uint16x4_t val1_16x4;
uint32x2_t val1_32x2;
uint8x8_t val1_8x8;
x255_16x4 = vdup_n_u16(0xff);
# ifdef COLSAME # ifdef COLSAME
FPU_NEON; uint16x8_t c1_16x8;
VMOV_I2R_NEON(q2, #255); uint16x8_t val1_16x8;
VMOV_M2R_NEON(d10, c1); uint32x2_t c1_32x2;
VEOR_NEON(d0); uint8x8_t c1_8x8;
VZIP_NEON(d10, d0);
# else c1_32x2 = vset_lane_u32(c1, c1_32x2, 0);
// c1 and c2 are constants inside the cycle
FPU_NEON; c1_8x8 = vreinterpret_u8_u32(c1_32x2);
VMOV_I2R_NEON(q2, #255); c1_16x8 = vmovl_u8(c1_8x8);
VMOV_M2R_NEON(d10, c1);
VEOR_NEON(q0); c1_16x4 = vget_low_u16(c1_16x8);
VMOV_M2R_NEON(d11, c2); # else //COLSAME
VZIP_NEON(q5, q0); uint16x4_t c2_16x4;
VMOV_R2R_NEON(d11, d0); uint16x4_t c2_c1_16x4;
uint16x4_t c2_c1_local_16x4;
uint16x4_t cv_16x4;
uint16x8_t c1_c2_16x8;
uint16x8_t val1_16x8;
uint32x2_t c1_c2_32x2;
uint8x8_t c1_c2_8x8;
c1_c2_32x2 = vset_lane_u32(c1, c1_c2_32x2, 0);
c1_c2_32x2 = vset_lane_u32(c2, c1_c2_32x2, 1);
c1_c2_8x8 = vreinterpret_u8_u32(c1_c2_32x2);
c1_c2_16x8 = vmovl_u8(c1_c2_8x8);
c1_16x4 = vget_low_u16(c1_c2_16x8);
c2_16x4 = vget_high_u16(c1_c2_16x8);
c2_c1_16x4 = vsub_u16(c2_16x4, c1_16x4);
# endif //COLSAME # endif //COLSAME
# endif //COLBLACK # endif //COLMUL
# endif //COLMUL # endif //COLBLACK
# endif //SCALE_USING_NEON # endif //SCALE_USING_NEON
while (ww > 0) while (ww > 0)
{ {
# ifdef COLMUL # ifndef SCALE_USING_NEON
# ifndef COLBLACK # ifdef COLMUL
DATA32 val1; # ifndef COLBLACK
# ifdef COLSAME DATA32 val1;
# else # ifndef COLSAME
DATA32 cval; // col DATA32 cval; // col
# endif //COLSAME # endif //COLSAME
# endif //COLBLACK # endif //COLBLACK
# endif //COLMUL # endif //COLMUL
# endif //SCALE_USING_NEON
# ifdef COLBLACK # ifdef COLBLACK
*d = 0xff000000; // col *d = 0xff000000; // col
# else //COLBLACK # else //COLBLACK
s = sp + ((v >> (FP + FPI)) * sw) + (u >> (FP + FPI)); s = sp + ((v >> (FP + FPI)) * sw) + (u >> (FP + FPI));
# ifdef COLMUL # ifdef COLMUL
# ifdef SCALE_USING_NEON
# ifdef COLSAME
val1_32x2 = vset_lane_u32(*s, val1_32x2, 0);
val1_8x8 = vreinterpret_u8_u32(val1_32x2);
val1_16x8 = vmovl_u8(val1_8x8);
val1_16x4 = vget_low_u16(val1_16x8);
cval_16x4 = c1_16x4;
# else //COLSAME
cv_16x4 = vdup_n_u16(cv>>16);
cv += cd; // col
c2_c1_local_16x4 = vmul_u16(c2_c1_16x4, cv_16x4);
c2_c1_local_16x4 = vshr_n_u16(c2_c1_local_16x4, 8);
c2_c1_local_16x4 = vadd_u16(c2_c1_local_16x4, c1_16x4);
cval_16x4 = vand_u16(c2_c1_local_16x4, x255_16x4);
val1_32x2 = vset_lane_u32(*s, val1_32x2, 0);
val1_8x8 = vreinterpret_u8_u32(val1_32x2);
val1_16x8 = vmovl_u8(val1_8x8);
val1_16x4 = vget_low_u16(val1_16x8);
# endif //COLSAME
cval_16x4 = vmul_u16(cval_16x4, val1_16x4);
cval_16x4 = vadd_u16(cval_16x4, x255_16x4);
cval_16x8 = vcombine_u16(cval_16x4, temp_16x4);
cval_8x8 = vshrn_n_u16(cval_16x8, 8);
res_32x2 = vreinterpret_u32_u8(cval_8x8);
vst1_lane_u32(d, res_32x2, 0);
# else //SCALE_USING_NEON
val1 = *s; // col val1 = *s; // col
# ifdef COLSAME # ifdef COLSAME
# ifdef SCALE_USING_NEON
VMOV_M2R_NEON(d1, val1);
VEOR_NEON(d0);
VZIP_NEON(d1, d0);
VMOV_R2R_NEON(d0, d10);
MUL4_SYM_NEON(d0, d1, d4)
VMOV_R2M_NEON(q0, d0, d);
# else
*d = MUL4_SYM(c1, val1); *d = MUL4_SYM(c1, val1);
# endif //SCALE_USING_NEON
# else //COLSAME
/* XXX: this neon is broken! :( FIXME
# ifdef SCALE_USING_NEON
FPU_NEON;
VMOV_M2R_NEON(d12, val1);
VMOV_R2R_NEON(q4, q5);
VEOR_NEON(q1);
VDUP_NEON(d15, cv >> 16);
VZIP_NEON(q6, q1);
INTERP_256_NEON(d15, d9, d8, d4); // interpolate c1 and c2
MUL4_SYM_NEON(d8, d12, d4); // multiply
VMOV_R2M_NEON(q4, d8, d); // save result
# else # else
*/
cval = INTERP_256((cv >> 16), c2, c1); // col cval = INTERP_256((cv >> 16), c2, c1); // col
val1 = MUL4_SYM(cval, val1); *d = MUL4_SYM(cval, val1);
cv += cd; // col cv += cd; // col
/*
# endif # endif
*/ # endif
# endif //COLSAME # else
# else //COLMUL
*d = *s; *d = *s;
# endif //COLMUL # endif //COLMUL
u += ud; u += ud;