56 #ifndef INCLUDED_volk_16i_branch_4_state_8_a_H 57 #define INCLUDED_volk_16i_branch_4_state_8_a_H 64 #include <emmintrin.h> 65 #include <tmmintrin.h> 66 #include <xmmintrin.h> 75 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11;
76 __m128i *p_target, *p_src0, *p_cntl2, *p_cntl3, *p_scalars;
78 p_target = (__m128i*)target;
79 p_src0 = (__m128i*)src0;
80 p_cntl2 = (__m128i*)cntl2;
81 p_cntl3 = (__m128i*)cntl3;
82 p_scalars = (__m128i*)scalars;
84 xmm0 = _mm_load_si128(p_scalars);
86 xmm1 = _mm_shufflelo_epi16(xmm0, 0);
87 xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
88 xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
89 xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
91 xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
92 xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
93 xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
94 xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
96 xmm0 = _mm_load_si128((__m128i*)permuters[0]);
97 xmm6 = _mm_load_si128((__m128i*)permuters[1]);
98 xmm8 = _mm_load_si128((__m128i*)permuters[2]);
99 xmm10 = _mm_load_si128((__m128i*)permuters[3]);
101 xmm5 = _mm_load_si128(p_src0);
102 xmm0 = _mm_shuffle_epi8(xmm5, xmm0);
103 xmm6 = _mm_shuffle_epi8(xmm5, xmm6);
104 xmm8 = _mm_shuffle_epi8(xmm5, xmm8);
105 xmm10 = _mm_shuffle_epi8(xmm5, xmm10);
107 xmm5 = _mm_add_epi16(xmm1, xmm2);
109 xmm6 = _mm_add_epi16(xmm2, xmm6);
110 xmm8 = _mm_add_epi16(xmm1, xmm8);
112 xmm7 = _mm_load_si128(p_cntl2);
113 xmm9 = _mm_load_si128(p_cntl3);
115 xmm0 = _mm_add_epi16(xmm5, xmm0);
117 xmm7 = _mm_and_si128(xmm7, xmm3);
118 xmm9 = _mm_and_si128(xmm9, xmm4);
120 xmm5 = _mm_load_si128(&p_cntl2[1]);
121 xmm11 = _mm_load_si128(&p_cntl3[1]);
123 xmm7 = _mm_add_epi16(xmm7, xmm9);
125 xmm5 = _mm_and_si128(xmm5, xmm3);
126 xmm11 = _mm_and_si128(xmm11, xmm4);
128 xmm0 = _mm_add_epi16(xmm0, xmm7);
131 xmm7 = _mm_load_si128(&p_cntl2[2]);
132 xmm9 = _mm_load_si128(&p_cntl3[2]);
134 xmm5 = _mm_add_epi16(xmm5, xmm11);
136 xmm7 = _mm_and_si128(xmm7, xmm3);
137 xmm9 = _mm_and_si128(xmm9, xmm4);
139 xmm6 = _mm_add_epi16(xmm6, xmm5);
142 xmm5 = _mm_load_si128(&p_cntl2[3]);
143 xmm11 = _mm_load_si128(&p_cntl3[3]);
145 xmm7 = _mm_add_epi16(xmm7, xmm9);
147 xmm5 = _mm_and_si128(xmm5, xmm3);
148 xmm11 = _mm_and_si128(xmm11, xmm4);
150 xmm8 = _mm_add_epi16(xmm8, xmm7);
152 xmm5 = _mm_add_epi16(xmm5, xmm11);
154 _mm_store_si128(p_target, xmm0);
155 _mm_store_si128(&p_target[1], xmm6);
157 xmm10 = _mm_add_epi16(xmm5, xmm10);
159 _mm_store_si128(&p_target[2], xmm8);
161 _mm_store_si128(&p_target[3], xmm10);
167 #ifdef LV_HAVE_GENERIC 179 for (; i < bound; ++
i) {
180 target[i * 8] = src0[((char)permuters[i][0]) / 2] + ((i + 1) % 2 * scalars[0]) +
181 (((i >> 1) ^ 1) * scalars[1]) + (cntl2[i * 8] & scalars[2]) +
182 (cntl3[i * 8] & scalars[3]);
183 target[i * 8 + 1] = src0[((char)permuters[i][1 * 2]) / 2] +
184 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
185 (cntl2[i * 8 + 1] & scalars[2]) +
186 (cntl3[i * 8 + 1] & scalars[3]);
187 target[i * 8 + 2] = src0[((char)permuters[i][2 * 2]) / 2] +
188 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
189 (cntl2[i * 8 + 2] & scalars[2]) +
190 (cntl3[i * 8 + 2] & scalars[3]);
191 target[i * 8 + 3] = src0[((char)permuters[i][3 * 2]) / 2] +
192 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
193 (cntl2[i * 8 + 3] & scalars[2]) +
194 (cntl3[i * 8 + 3] & scalars[3]);
195 target[i * 8 + 4] = src0[((char)permuters[i][4 * 2]) / 2] +
196 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
197 (cntl2[i * 8 + 4] & scalars[2]) +
198 (cntl3[i * 8 + 4] & scalars[3]);
199 target[i * 8 + 5] = src0[((char)permuters[i][5 * 2]) / 2] +
200 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
201 (cntl2[i * 8 + 5] & scalars[2]) +
202 (cntl3[i * 8 + 5] & scalars[3]);
203 target[i * 8 + 6] = src0[((char)permuters[i][6 * 2]) / 2] +
204 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
205 (cntl2[i * 8 + 6] & scalars[2]) +
206 (cntl3[i * 8 + 6] & scalars[3]);
207 target[i * 8 + 7] = src0[((char)permuters[i][7 * 2]) / 2] +
208 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
209 (cntl2[i * 8 + 7] & scalars[2]) +
210 (cntl3[i * 8 + 7] & scalars[3]);
static void volk_16i_branch_4_state_8_a_ssse3(short *target, short *src0, char **permuters, short *cntl2, short *cntl3, short *scalars)
Definition: volk_16i_branch_4_state_8.h:68
static void volk_16i_branch_4_state_8_generic(short *target, short *src0, char **permuters, short *cntl2, short *cntl3, short *scalars)
Definition: volk_16i_branch_4_state_8.h:168
for i
Definition: volk_config_fixed.tmpl.h:25