2 * Implementation of core functions for GOST R 34.11-2012 using SSE2.
4 * Copyright (c) 2013 Cryptocom LTD.
5 * This file is distributed under the same license as OpenSSL.
7 * Author: Alexey Degtyarev <alexey@renatasystems.org>
11 #ifndef __GOST3411_HAS_SSE2__
12 # error "GOST R 34.11-2012: SSE2 not enabled"
15 # pragma message "Use SIMD implementation"
18 #include <emmintrin.h>
20 # include <pmmintrin.h>
23 #define LO(v) ((unsigned char) (v))
24 #define HI(v) ((unsigned char) (((unsigned int) (v)) >> 8))
27 # define EXTRACT EXTRACT32
29 # define EXTRACT EXTRACT64
33 # define _mm_cvtsi64_m64(v) (__m64) v
34 # define _mm_cvtm64_si64(v) (long long) v
39 * "This intrinsic may perform better than _mm_loadu_si128 when
40 * the data crosses a cache line boundary."
42 # define UMEM_READ_I128 _mm_lddqu_si128
44 # define UMEM_READ_I128 _mm_loadu_si128
47 /* load 512bit from unaligned memory */
48 #define ULOAD(P, xmm0, xmm1, xmm2, xmm3) { \
49 const __m128i *__m128p = (const __m128i *) P; \
50 xmm0 = UMEM_READ_I128(&__m128p[0]); \
51 xmm1 = UMEM_READ_I128(&__m128p[1]); \
52 xmm2 = UMEM_READ_I128(&__m128p[2]); \
53 xmm3 = UMEM_READ_I128(&__m128p[3]); \
56 #ifdef UNALIGNED_SIMD_ACCESS
58 # define MEM_WRITE_I128 _mm_storeu_si128
59 # define MEM_READ_I128 UMEM_READ_I128
62 #else /* !UNALIGNED_SIMD_ACCESS */
64 # define MEM_WRITE_I128 _mm_store_si128
65 # define MEM_READ_I128 _mm_load_si128
66 #define LOAD(P, xmm0, xmm1, xmm2, xmm3) { \
67 const __m128i *__m128p = (const __m128i *) P; \
68 xmm0 = MEM_READ_I128(&__m128p[0]); \
69 xmm1 = MEM_READ_I128(&__m128p[1]); \
70 xmm2 = MEM_READ_I128(&__m128p[2]); \
71 xmm3 = MEM_READ_I128(&__m128p[3]); \
73 #endif /* !UNALIGNED_SIMD_ACCESS */
75 #define STORE(P, xmm0, xmm1, xmm2, xmm3) { \
76 __m128i *__m128p = (__m128i *) &P[0]; \
77 MEM_WRITE_I128(&__m128p[0], xmm0); \
78 MEM_WRITE_I128(&__m128p[1], xmm1); \
79 MEM_WRITE_I128(&__m128p[2], xmm2); \
80 MEM_WRITE_I128(&__m128p[3], xmm3); \
83 #define X128R(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7) { \
84 xmm0 = _mm_xor_si128(xmm0, xmm4); \
85 xmm1 = _mm_xor_si128(xmm1, xmm5); \
86 xmm2 = _mm_xor_si128(xmm2, xmm6); \
87 xmm3 = _mm_xor_si128(xmm3, xmm7); \
90 #define X128M(P, xmm0, xmm1, xmm2, xmm3) { \
91 const __m128i *__m128p = (const __m128i *) &P[0]; \
92 xmm0 = _mm_xor_si128(xmm0, MEM_READ_I128(&__m128p[0])); \
93 xmm1 = _mm_xor_si128(xmm1, MEM_READ_I128(&__m128p[1])); \
94 xmm2 = _mm_xor_si128(xmm2, MEM_READ_I128(&__m128p[2])); \
95 xmm3 = _mm_xor_si128(xmm3, MEM_READ_I128(&__m128p[3])); \
98 #define _mm_xor_64(mm0, mm1) _mm_xor_si64(mm0, _mm_cvtsi64_m64(mm1))
100 #define EXTRACT32(row, xmm0, xmm1, xmm2, xmm3, xmm4) { \
101 register unsigned short ax; \
104 ax = (unsigned short) _mm_extract_epi16(xmm0, row + 0); \
105 mm0 = _mm_cvtsi64_m64(Ax[0][LO(ax)]); \
106 mm1 = _mm_cvtsi64_m64(Ax[0][HI(ax)]); \
108 ax = (unsigned short) _mm_extract_epi16(xmm0, row + 4); \
109 mm0 = _mm_xor_64(mm0, Ax[1][LO(ax)]); \
110 mm1 = _mm_xor_64(mm1, Ax[1][HI(ax)]); \
112 ax = (unsigned short) _mm_extract_epi16(xmm1, row + 0); \
113 mm0 = _mm_xor_64(mm0, Ax[2][LO(ax)]); \
114 mm1 = _mm_xor_64(mm1, Ax[2][HI(ax)]); \
116 ax = (unsigned short) _mm_extract_epi16(xmm1, row + 4); \
117 mm0 = _mm_xor_64(mm0, Ax[3][LO(ax)]); \
118 mm1 = _mm_xor_64(mm1, Ax[3][HI(ax)]); \
120 ax = (unsigned short) _mm_extract_epi16(xmm2, row + 0); \
121 mm0 = _mm_xor_64(mm0, Ax[4][LO(ax)]); \
122 mm1 = _mm_xor_64(mm1, Ax[4][HI(ax)]); \
124 ax = (unsigned short) _mm_extract_epi16(xmm2, row + 4); \
125 mm0 = _mm_xor_64(mm0, Ax[5][LO(ax)]); \
126 mm1 = _mm_xor_64(mm1, Ax[5][HI(ax)]); \
128 ax = (unsigned short) _mm_extract_epi16(xmm3, row + 0); \
129 mm0 = _mm_xor_64(mm0, Ax[6][LO(ax)]); \
130 mm1 = _mm_xor_64(mm1, Ax[6][HI(ax)]); \
132 ax = (unsigned short) _mm_extract_epi16(xmm3, row + 4); \
133 mm0 = _mm_xor_64(mm0, Ax[7][LO(ax)]); \
134 mm1 = _mm_xor_64(mm1, Ax[7][HI(ax)]); \
136 xmm4 = _mm_set_epi64(mm1, mm0); \
139 #define EXTRACT64(row, xmm0, xmm1, xmm2, xmm3, xmm4) { \
141 register unsigned short ax; \
142 register unsigned long long r0, r1; \
144 ax = (unsigned short) _mm_extract_epi16(xmm0, row + 0); \
145 r0 = Ax[0][LO(ax)]; \
146 r1 = Ax[0][HI(ax)]; \
148 ax = (unsigned short) _mm_extract_epi16(xmm0, row + 4); \
149 r0 ^= Ax[1][LO(ax)]; \
150 r1 ^= Ax[1][HI(ax)]; \
152 ax = (unsigned short) _mm_extract_epi16(xmm1, row + 0); \
153 r0 ^= Ax[2][LO(ax)]; \
154 r1 ^= Ax[2][HI(ax)]; \
156 ax = (unsigned short) _mm_extract_epi16(xmm1, row + 4); \
157 r0 ^= Ax[3][LO(ax)]; \
158 r1 ^= Ax[3][HI(ax)]; \
160 ax = (unsigned short) _mm_extract_epi16(xmm2, row + 0); \
161 r0 ^= Ax[4][LO(ax)]; \
162 r1 ^= Ax[4][HI(ax)]; \
164 ax = (unsigned short) _mm_extract_epi16(xmm2, row + 4); \
165 r0 ^= Ax[5][LO(ax)]; \
166 r1 ^= Ax[5][HI(ax)]; \
168 ax = (unsigned short) _mm_extract_epi16(xmm3, row + 0); \
169 r0 ^= Ax[6][LO(ax)]; \
170 r1 ^= Ax[6][HI(ax)]; \
172 ax = (unsigned short) _mm_extract_epi16(xmm3, row + 4); \
173 r0 ^= Ax[7][LO(ax)]; \
174 r1 ^= Ax[7][HI(ax)]; \
176 xmm4 = _mm_cvtsi64_si128((long long) r0); \
177 tmm4 = _mm_cvtsi64_si128((long long) r1); \
178 xmm4 = _mm_unpacklo_epi64(xmm4, tmm4); \
181 #define XLPS128M(P, xmm0, xmm1, xmm2, xmm3) { \
182 __m128i tmm0, tmm1, tmm2, tmm3; \
183 X128M(P, xmm0, xmm1, xmm2, xmm3); \
185 EXTRACT(0, xmm0, xmm1, xmm2, xmm3, tmm0); \
186 EXTRACT(1, xmm0, xmm1, xmm2, xmm3, tmm1); \
187 EXTRACT(2, xmm0, xmm1, xmm2, xmm3, tmm2); \
188 EXTRACT(3, xmm0, xmm1, xmm2, xmm3, tmm3); \
196 #define XLPS128R(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7) { \
197 __m128i tmm0, tmm1, tmm2, tmm3; \
198 X128R(xmm4, xmm5, xmm6, xmm7, xmm0, xmm1, xmm2, xmm3); \
200 EXTRACT(0, xmm4, xmm5, xmm6, xmm7, tmm0); \
201 EXTRACT(1, xmm4, xmm5, xmm6, xmm7, tmm1); \
202 EXTRACT(2, xmm4, xmm5, xmm6, xmm7, tmm2); \
203 EXTRACT(3, xmm4, xmm5, xmm6, xmm7, tmm3); \
211 #define ROUND128(i, xmm0, xmm2, xmm4, xmm6, xmm1, xmm3, xmm5, xmm7) { \
212 XLPS128M((&C[i]), xmm0, xmm2, xmm4, xmm6); \
213 XLPS128R(xmm0, xmm2, xmm4, xmm6, xmm1, xmm3, xmm5, xmm7); \