You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

avx2.c 7.7KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*-
  2. * Copyright 2018 Vsevolod Stakhov
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*-
  17. Copyright (c) 2013-2015, Alfred Klomp
  18. Copyright (c) 2018, Vsevolod Stakhov
  19. All rights reserved.
  20. Redistribution and use in source and binary forms, with or without
  21. modification, are permitted provided that the following conditions are
  22. met:
  23. - Redistributions of source code must retain the above copyright notice,
  24. this list of conditions and the following disclaimer.
  25. - Redistributions in binary form must reproduce the above copyright
  26. notice, this list of conditions and the following disclaimer in the
  27. documentation and/or other materials provided with the distribution.
  28. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  29. IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  30. TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  31. PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  32. HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  33. SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
  34. TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  35. PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  36. LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  37. NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  38. SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  39. */
  40. #include "config.h"
  41. #include "cryptobox.h"
  42. extern const uint8_t base64_table_dec[256];
  43. #ifdef RSPAMD_HAS_TARGET_ATTR
  44. #if defined(__GNUC__) && !defined(__clang__)
  45. #pragma GCC push_options
  46. #pragma GCC target("avx2")
  47. #endif
  48. #ifndef __SSE2__
  49. #define __SSE2__
  50. #endif
  51. #ifndef __SSE__
  52. #define __SSE__
  53. #endif
  54. #ifndef __SSE4_2__
  55. #define __SSE4_2__
  56. #endif
  57. #ifndef __SSE4_1__
  58. #define __SSE4_1__
  59. #endif
  60. #ifndef __SSEE3__
  61. #define __SSEE3__
  62. #endif
  63. #ifndef __AVX__
  64. #define __AVX__
  65. #endif
  66. #ifndef __AVX2__
  67. #define __AVX2__
  68. #endif
  69. #include <immintrin.h>
  70. #define CMPGT(s,n) _mm256_cmpgt_epi8((s), _mm256_set1_epi8(n))
  71. #define CMPEQ(s,n) _mm256_cmpeq_epi8((s), _mm256_set1_epi8(n))
  72. #define REPLACE(s,n) _mm256_and_si256((s), _mm256_set1_epi8(n))
  73. #define RANGE(s,a,b) _mm256_andnot_si256(CMPGT((s), (b)), CMPGT((s), (a) - 1))
  74. static inline __m256i
  75. dec_reshuffle (__m256i in) __attribute__((__target__("avx2")));
  76. static inline __m256i
  77. dec_reshuffle (__m256i in)
  78. {
  79. // in, lower lane, bits, upper case are most significant bits, lower case are least significant bits:
  80. // 00llllll 00kkkkLL 00jjKKKK 00JJJJJJ
  81. // 00iiiiii 00hhhhII 00ggHHHH 00GGGGGG
  82. // 00ffffff 00eeeeFF 00ddEEEE 00DDDDDD
  83. // 00cccccc 00bbbbCC 00aaBBBB 00AAAAAA
  84. const __m256i merge_ab_and_bc = _mm256_maddubs_epi16(in, _mm256_set1_epi32(0x01400140));
  85. // 0000kkkk LLllllll 0000JJJJ JJjjKKKK
  86. // 0000hhhh IIiiiiii 0000GGGG GGggHHHH
  87. // 0000eeee FFffffff 0000DDDD DDddEEEE
  88. // 0000bbbb CCcccccc 0000AAAA AAaaBBBB
  89. __m256i out = _mm256_madd_epi16(merge_ab_and_bc, _mm256_set1_epi32(0x00011000));
  90. // 00000000 JJJJJJjj KKKKkkkk LLllllll
  91. // 00000000 GGGGGGgg HHHHhhhh IIiiiiii
  92. // 00000000 DDDDDDdd EEEEeeee FFffffff
  93. // 00000000 AAAAAAaa BBBBbbbb CCcccccc
  94. // Pack bytes together in each lane:
  95. out = _mm256_shuffle_epi8(out, _mm256_setr_epi8(
  96. 2, 1, 0, 6, 5, 4, 10, 9, 8, 14, 13, 12, -1, -1, -1, -1,
  97. 2, 1, 0, 6, 5, 4, 10, 9, 8, 14, 13, 12, -1, -1, -1, -1));
  98. // 00000000 00000000 00000000 00000000
  99. // LLllllll KKKKkkkk JJJJJJjj IIiiiiii
  100. // HHHHhhhh GGGGGGgg FFffffff EEEEeeee
  101. // DDDDDDdd CCcccccc BBBBbbbb AAAAAAaa
  102. // Pack lanes
  103. return _mm256_permutevar8x32_epi32(out, _mm256_setr_epi32(0, 1, 2, 4, 5, 6, -1, -1));
  104. }
  105. #define INNER_LOOP_AVX2 \
  106. while (inlen >= 45) { \
  107. __m256i str = _mm256_loadu_si256((__m256i *)c); \
  108. const __m256i lut_lo = _mm256_setr_epi8( \
  109. 0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, \
  110. 0x11, 0x11, 0x13, 0x1A, 0x1B, 0x1B, 0x1B, 0x1A, \
  111. 0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, \
  112. 0x11, 0x11, 0x13, 0x1A, 0x1B, 0x1B, 0x1B, 0x1A); \
  113. const __m256i lut_hi = _mm256_setr_epi8( \
  114. 0x10, 0x10, 0x01, 0x02, 0x04, 0x08, 0x04, 0x08, \
  115. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, \
  116. 0x10, 0x10, 0x01, 0x02, 0x04, 0x08, 0x04, 0x08, \
  117. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10); \
  118. const __m256i lut_roll = _mm256_setr_epi8( \
  119. 0, 16, 19, 4, -65, -65, -71, -71, \
  120. 0, 0, 0, 0, 0, 0, 0, 0, \
  121. 0, 16, 19, 4, -65, -65, -71, -71, \
  122. 0, 0, 0, 0, 0, 0, 0, 0); \
  123. const __m256i mask_2F = _mm256_set1_epi8(0x2f); \
  124. const __m256i hi_nibbles = _mm256_and_si256(_mm256_srli_epi32(str, 4), mask_2F); \
  125. const __m256i lo_nibbles = _mm256_and_si256(str, mask_2F); \
  126. const __m256i hi = _mm256_shuffle_epi8(lut_hi, hi_nibbles); \
  127. const __m256i lo = _mm256_shuffle_epi8(lut_lo, lo_nibbles); \
  128. const __m256i eq_2F = _mm256_cmpeq_epi8(str, mask_2F); \
  129. const __m256i roll = _mm256_shuffle_epi8(lut_roll, _mm256_add_epi8(eq_2F, hi_nibbles)); \
  130. if (!_mm256_testz_si256(lo, hi)) { \
  131. seen_error = true; \
  132. break; \
  133. } \
  134. str = _mm256_add_epi8(str, roll); \
  135. str = dec_reshuffle(str); \
  136. _mm256_storeu_si256((__m256i *)o, str); \
  137. c += 32; \
  138. o += 24; \
  139. outl += 24; \
  140. inlen -= 32; \
  141. }
  142. int
  143. base64_decode_avx2 (const char *in, size_t inlen,
  144. unsigned char *out, size_t *outlen) __attribute__((__target__("avx2")));
  145. int
  146. base64_decode_avx2 (const char *in, size_t inlen,
  147. unsigned char *out, size_t *outlen)
  148. {
  149. ssize_t ret = 0;
  150. const uint8_t *c = (const uint8_t *)in;
  151. uint8_t *o = (uint8_t *)out;
  152. uint8_t q, carry;
  153. size_t outl = 0;
  154. size_t leftover = 0;
  155. bool seen_error = false;
  156. repeat:
  157. switch (leftover) {
  158. for (;;) {
  159. case 0:
  160. if (G_LIKELY (!seen_error)) {
  161. INNER_LOOP_AVX2
  162. }
  163. if (inlen-- == 0) {
  164. ret = 1;
  165. break;
  166. }
  167. if ((q = base64_table_dec[*c++]) >= 254) {
  168. ret = 0;
  169. break;
  170. }
  171. carry = q << 2;
  172. leftover++;
  173. case 1:
  174. if (inlen-- == 0) {
  175. ret = 1;
  176. break;
  177. }
  178. if ((q = base64_table_dec[*c++]) >= 254) {
  179. ret = 0;
  180. break;
  181. }
  182. *o++ = carry | (q >> 4);
  183. carry = q << 4;
  184. leftover++;
  185. outl++;
  186. case 2:
  187. if (inlen-- == 0) {
  188. ret = 1;
  189. break;
  190. }
  191. if ((q = base64_table_dec[*c++]) >= 254) {
  192. leftover++;
  193. if (q == 254) {
  194. if (inlen-- != 0) {
  195. leftover = 0;
  196. q = base64_table_dec[*c++];
  197. ret = ((q == 254) && (inlen == 0)) ? 1 : 0;
  198. break;
  199. }
  200. else {
  201. ret = 1;
  202. break;
  203. }
  204. }
  205. else {
  206. leftover --;
  207. }
  208. /* If we get here, there was an error: */
  209. break;
  210. }
  211. *o++ = carry | (q >> 2);
  212. carry = q << 6;
  213. leftover++;
  214. outl++;
  215. case 3:
  216. if (inlen-- == 0) {
  217. ret = 1;
  218. break;
  219. }
  220. if ((q = base64_table_dec[*c++]) >= 254) {
  221. /*
  222. * When q == 254, the input char is '='. Return 1 and EOF.
  223. * When q == 255, the input char is invalid. Return 0 and EOF.
  224. */
  225. if (q == 254 && inlen == 0) {
  226. ret = 1;
  227. leftover = 0;
  228. }
  229. else {
  230. ret = 0;
  231. }
  232. break;
  233. }
  234. *o++ = carry | q;
  235. carry = 0;
  236. leftover = 0;
  237. outl++;
  238. }
  239. }
  240. if (!ret && inlen > 0) {
  241. /* Skip to the next valid character in input */
  242. while (inlen > 0 && base64_table_dec[*c] >= 254) {
  243. c ++;
  244. inlen --;
  245. }
  246. if (inlen > 0) {
  247. seen_error = false;
  248. goto repeat;
  249. }
  250. }
  251. *outlen = outl;
  252. return ret;
  253. }
  254. #if defined(__GNUC__) && !defined(__clang__)
  255. #pragma GCC pop_options
  256. #endif
  257. #endif