summaryrefslogtreecommitdiff
path: root/src/lib/evas/common
diff options
context:
space:
mode:
authorCedric BAIL <cedric@osg.samsung.com>2015-04-28 23:37:37 +0200
committerCedric BAIL <cedric@osg.samsung.com>2015-05-07 09:53:11 +0200
commitd364cbdadd6a4f0d59bcdeead90205e847c84c56 (patch)
tree68ceca4d2a28281d02155cd99ef5990d63b3cd3e /src/lib/evas/common
parent76a5efe13ae76ce44d02e1f5921db9465e8a739b (diff)
evas: implement _op_blend_rel_p_c_dp_neon using NEON intrinsics
Summary: NEON intrinsics can be built both for armv7 and armv8. There were no NEON variant for this function, so it was added with all copies to init function. Reviewers: raster, cedric Reviewed By: cedric Subscribers: cedric Projects: #efl Differential Revision: https://phab.enlightenment.org/D2417 Signed-off-by: Cedric BAIL <cedric@osg.samsung.com>
Diffstat (limited to 'src/lib/evas/common')
-rw-r--r--src/lib/evas/common/evas_op_blend/op_blend_pixel_color_neon.c152
1 files changed, 152 insertions, 0 deletions
diff --git a/src/lib/evas/common/evas_op_blend/op_blend_pixel_color_neon.c b/src/lib/evas/common/evas_op_blend/op_blend_pixel_color_neon.c
index aec1c86..d49562a 100644
--- a/src/lib/evas/common/evas_op_blend/op_blend_pixel_color_neon.c
+++ b/src/lib/evas/common/evas_op_blend/op_blend_pixel_color_neon.c
@@ -808,6 +808,148 @@ init_blend_pixel_color_pt_funcs_neon(void)
808 808
809#ifdef BUILD_NEON 809#ifdef BUILD_NEON
810 810
811static void
812_op_blend_rel_p_c_dp_neon(DATA32 *s, DATA8 *m EINA_UNUSED, DATA32 c, DATA32 *d, int l) {
813 uint16x8_t ad0_16x8;
814 uint16x8_t ad1_16x8;
815 uint16x8_t dsc0_16x8;
816 uint16x8_t dsc1_16x8;
817 uint16x8_t sc0_16x8;
818 uint16x8_t sc1_16x8;
819 uint16x8_t x255_16x8;
820 uint32x2_t c_32x2;
821 uint32x4_t ad_32x4;
822 uint32x4_t alpha_32x4;
823 uint32x4_t cond_32x4;
824 uint32x4_t d_32x4;
825 uint32x4_t dsc_32x4;
826 uint32x4_t s_32x4;
827 uint32x4_t x0_32x4;
828 uint32x4_t x1_32x4;
829 uint8x16_t ad_8x16;
830 uint8x16_t alpha_8x16;
831 uint8x16_t d_8x16;
832 uint8x16_t dsc_8x16;
833 uint8x16_t s_8x16;
834 uint8x16_t sc_8x16;
835 uint8x16_t x0_8x16;
836 uint8x16_t x1_8x16;
837 uint8x8_t ad0_8x8;
838 uint8x8_t ad1_8x8;
839 uint8x8_t alpha0_8x8;
840 uint8x8_t alpha1_8x8;
841 uint8x8_t c_8x8;
842 uint8x8_t d0_8x8;
843 uint8x8_t d1_8x8;
844 uint8x8_t dsc0_8x8;
845 uint8x8_t dsc1_8x8;
846 uint8x8_t s0_8x8;
847 uint8x8_t s1_8x8;
848 uint8x8_t sc0_8x8;
849 uint8x8_t sc1_8x8;
850
851 c_32x2 = vdup_n_u32(c);
852 c_8x8 = vreinterpret_u8_u32(c_32x2);
853 x255_16x8 = vdupq_n_u16(0xff);
854 x0_8x16 = vdupq_n_u8(0x0);
855 x0_32x4 = vreinterpretq_u32_u8(x0_8x16);
856 x1_8x16 = vdupq_n_u8(0x1);
857 x1_32x4 = vreinterpretq_u32_u8(x1_8x16);
858
859 DATA32 *end = d + (l & ~3);
860 while (d < end)
861 {
862 // load 4 elements from s
863 s_32x4 = vld1q_u32(s);
864 s_8x16 = vreinterpretq_u8_u32(s_32x4);
865 s0_8x8 = vget_low_u8(s_8x16);
866 s1_8x8 = vget_high_u8(s_8x16);
867
868 // load 4 elements from d
869 d_32x4 = vld1q_u32(d);
870 d_8x16 = vreinterpretq_u8_u32(d_32x4);
871 d0_8x8 = vget_low_u8(d_8x16);
872 d1_8x8 = vget_high_u8(d_8x16);
873
874 // multiply MUL4_SYM(c, *s);
875 sc0_16x8 = vmull_u8(s0_8x8, c_8x8);
876 sc1_16x8 = vmull_u8(s1_8x8, c_8x8);
877 sc0_16x8 = vaddq_u16(sc0_16x8, x255_16x8);
878 sc1_16x8 = vaddq_u16(sc1_16x8, x255_16x8);
879 sc0_8x8 = vshrn_n_u16(sc0_16x8, 8);
880 sc1_8x8 = vshrn_n_u16(sc1_16x8, 8);
881 sc_8x16 = vcombine_u8(sc0_8x8, sc1_8x8);
882
883 // calculate alpha = 256 - (sc >> 24)
884 alpha_32x4 = vreinterpretq_u32_u8(sc_8x16);
885 alpha_32x4 = vshrq_n_u32(alpha_32x4, 24);
886 alpha_32x4 = vmulq_u32(x1_32x4, alpha_32x4);
887 alpha_8x16 = vreinterpretq_u8_u32(alpha_32x4);
888 alpha_8x16 = vsubq_u8(x0_8x16, alpha_8x16);
889 alpha0_8x8 = vget_low_u8(alpha_8x16);
890 alpha1_8x8 = vget_high_u8(alpha_8x16);
891
892 // multiply MUL_256(alpha, *d);
893 ad0_16x8 = vmull_u8(alpha0_8x8, d0_8x8);
894 ad1_16x8 = vmull_u8(alpha1_8x8, d1_8x8);
895 ad0_8x8 = vshrn_n_u16(ad0_16x8,8);
896 ad1_8x8 = vshrn_n_u16(ad1_16x8,8);
897 ad_8x16 = vcombine_u8(ad0_8x8, ad1_8x8);
898 ad_32x4 = vreinterpretq_u32_u8(ad_8x16);
899
900 // select d when alpha is 0
901 alpha_32x4 = vreinterpretq_u32_u8(alpha_8x16);
902 cond_32x4 = vceqq_u32(alpha_32x4, x0_32x4);
903 ad_32x4 = vbslq_u32(cond_32x4, d_32x4 , ad_32x4);
904
905 // shift (*d >> 24)
906 dsc_32x4 = vshrq_n_u32(d_32x4, 24);
907 dsc_32x4 = vmulq_u32(x1_32x4, dsc_32x4);
908 dsc_8x16 = vreinterpretq_u8_u32(dsc_32x4);
909 dsc0_8x8 = vget_low_u8(dsc_8x16);
910 dsc1_8x8 = vget_high_u8(dsc_8x16);
911
912 // multiply MUL_256(*d >> 24, sc);
913 dsc0_16x8 = vmull_u8(dsc0_8x8, sc0_8x8);
914 dsc1_16x8 = vmull_u8(dsc1_8x8, sc1_8x8);
915 dsc0_16x8 = vaddq_u16(dsc0_16x8, x255_16x8);
916 dsc1_16x8 = vaddq_u16(dsc1_16x8, x255_16x8);
917 dsc0_8x8 = vshrn_n_u16(dsc0_16x8, 8);
918 dsc1_8x8 = vshrn_n_u16(dsc1_16x8, 8);
919 dsc_8x16 = vcombine_u8(dsc0_8x8, dsc1_8x8);
920
921 // add up everything
922 dsc_32x4 = vreinterpretq_u32_u8(dsc_8x16);
923 d_32x4 = vaddq_u32(dsc_32x4, ad_32x4);
924
925 // save result
926 vst1q_u32(d, d_32x4);
927
928 d+=4;
929 s+=4;
930 }
931
932 end += (l & 3);
933 int alpha;
934 while (d < end)
935 {
936 DATA32 sc = MUL4_SYM(c, *s);
937 alpha = 256 - (sc >> 24);
938 *d = MUL_SYM(*d >> 24, sc) + MUL_256(alpha, *d);
939 d++;
940 s++;
941 }
942}
943
944#define _op_blend_rel_pas_c_dp_neon _op_blend_rel_p_c_dp_neon
945#define _op_blend_rel_pan_c_dp_neon _op_blend_rel_p_c_dp_neon
946#define _op_blend_rel_p_can_dp_neon _op_blend_rel_p_c_dp_neon
947#define _op_blend_rel_pas_can_dp_neon _op_blend_rel_p_c_dp_neon
948#define _op_blend_rel_pan_can_dp_neon _op_blend_rel_p_c_dp_neon
949#define _op_blend_rel_p_caa_dp_neon _op_blend_rel_p_c_dp_neon
950#define _op_blend_rel_pas_caa_dp_neon _op_blend_rel_p_c_dp_neon
951#define _op_blend_rel_pan_caa_dp_neon _op_blend_rel_p_c_dp_neon
952
811#define _op_blend_rel_p_c_dpan_neon _op_blend_p_c_dpan_neon 953#define _op_blend_rel_p_c_dpan_neon _op_blend_p_c_dpan_neon
812#define _op_blend_rel_pas_c_dpan_neon _op_blend_pas_c_dpan_neon 954#define _op_blend_rel_pas_c_dpan_neon _op_blend_pas_c_dpan_neon
813#define _op_blend_rel_pan_c_dpan_neon _op_blend_pan_c_dpan_neon 955#define _op_blend_rel_pan_c_dpan_neon _op_blend_pan_c_dpan_neon
@@ -821,6 +963,16 @@ init_blend_pixel_color_pt_funcs_neon(void)
821static void 963static void
822init_blend_rel_pixel_color_span_funcs_neon(void) 964init_blend_rel_pixel_color_span_funcs_neon(void)
823{ 965{
966 op_blend_rel_span_funcs[SP][SM_N][SC][DP][CPU_NEON] = _op_blend_rel_p_c_dp_neon;
967 op_blend_rel_span_funcs[SP_AS][SM_N][SC][DP][CPU_NEON] = _op_blend_rel_pas_c_dp_neon;
968 op_blend_rel_span_funcs[SP_AN][SM_N][SC][DP][CPU_NEON] = _op_blend_rel_pan_c_dp_neon;
969 op_blend_rel_span_funcs[SP][SM_N][SC_AN][DP][CPU_NEON] = _op_blend_rel_p_can_dp_neon;
970 op_blend_rel_span_funcs[SP_AS][SM_N][SC_AN][DP][CPU_NEON] = _op_blend_rel_pas_can_dp_neon;
971 op_blend_rel_span_funcs[SP_AN][SM_N][SC_AN][DP][CPU_NEON] = _op_blend_rel_pan_can_dp_neon;
972 op_blend_rel_span_funcs[SP][SM_N][SC_AA][DP][CPU_NEON] = _op_blend_rel_p_caa_dp_neon;
973 op_blend_rel_span_funcs[SP_AS][SM_N][SC_AA][DP][CPU_NEON] = _op_blend_rel_pas_caa_dp_neon;
974 op_blend_rel_span_funcs[SP_AN][SM_N][SC_AA][DP][CPU_NEON] = _op_blend_rel_pan_caa_dp_neon;
975
824 op_blend_rel_span_funcs[SP][SM_N][SC][DP_AN][CPU_NEON] = _op_blend_rel_p_c_dpan_neon; 976 op_blend_rel_span_funcs[SP][SM_N][SC][DP_AN][CPU_NEON] = _op_blend_rel_p_c_dpan_neon;
825 op_blend_rel_span_funcs[SP_AS][SM_N][SC][DP_AN][CPU_NEON] = _op_blend_rel_pas_c_dpan_neon; 977 op_blend_rel_span_funcs[SP_AS][SM_N][SC][DP_AN][CPU_NEON] = _op_blend_rel_pas_c_dpan_neon;
826 op_blend_rel_span_funcs[SP_AN][SM_N][SC][DP_AN][CPU_NEON] = _op_blend_rel_pan_c_dpan_neon; 978 op_blend_rel_span_funcs[SP_AN][SM_N][SC][DP_AN][CPU_NEON] = _op_blend_rel_pan_c_dpan_neon;