Loading...
1/*
2 * This is a SIMD SHA-1 implementation. It requires the Intel(R) Supplemental
3 * SSE3 instruction set extensions introduced in Intel Core Microarchitecture
4 * processors. CPUs supporting Intel(R) AVX extensions will get an additional
5 * boost.
6 *
7 * This work was inspired by the vectorized implementation of Dean Gaudet.
8 * Additional information on it can be found at:
9 * http://www.arctic.org/~dean/crypto/sha1.html
10 *
11 * It was improved upon with more efficient vectorization of the message
12 * scheduling. This implementation has also been optimized for all current and
13 * several future generations of Intel CPUs.
14 *
15 * See this article for more information about the implementation details:
16 * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/
17 *
18 * Copyright (C) 2010, Intel Corp.
19 * Authors: Maxim Locktyukhin <maxim.locktyukhin@intel.com>
20 * Ronen Zohar <ronen.zohar@intel.com>
21 *
22 * Converted to AT&T syntax and adapted for inclusion in the Linux kernel:
23 * Author: Mathias Krause <minipli@googlemail.com>
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 */
30
31#define CTX %rdi // arg1
32#define BUF %rsi // arg2
33#define CNT %rdx // arg3
34
35#define REG_A %ecx
36#define REG_B %esi
37#define REG_C %edi
38#define REG_D %ebp
39#define REG_E %edx
40
41#define REG_T1 %eax
42#define REG_T2 %ebx
43
44#define K_BASE %r8
45#define HASH_PTR %r9
46#define BUFFER_PTR %r10
47#define BUFFER_END %r11
48
49#define W_TMP1 %xmm0
50#define W_TMP2 %xmm9
51
52#define W0 %xmm1
53#define W4 %xmm2
54#define W8 %xmm3
55#define W12 %xmm4
56#define W16 %xmm5
57#define W20 %xmm6
58#define W24 %xmm7
59#define W28 %xmm8
60
61#define XMM_SHUFB_BSWAP %xmm10
62
63/* we keep window of 64 w[i]+K pre-calculated values in a circular buffer */
64#define WK(t) (((t) & 15) * 4)(%rsp)
65#define W_PRECALC_AHEAD 16
66
67/*
68 * This macro implements the SHA-1 function's body for single 64-byte block
69 * param: function's name
70 */
71.macro SHA1_VECTOR_ASM name
72 .global \name
73 .type \name, @function
74 .align 32
75\name:
76 push %rbx
77 push %rbp
78 push %r12
79
80 mov %rsp, %r12
81 sub $64, %rsp # allocate workspace
82 and $~15, %rsp # align stack
83
84 mov CTX, HASH_PTR
85 mov BUF, BUFFER_PTR
86
87 shl $6, CNT # multiply by 64
88 add BUF, CNT
89 mov CNT, BUFFER_END
90
91 lea K_XMM_AR(%rip), K_BASE
92 xmm_mov BSWAP_SHUFB_CTL(%rip), XMM_SHUFB_BSWAP
93
94 SHA1_PIPELINED_MAIN_BODY
95
96 # cleanup workspace
97 mov $8, %ecx
98 mov %rsp, %rdi
99 xor %rax, %rax
100 rep stosq
101
102 mov %r12, %rsp # deallocate workspace
103
104 pop %r12
105 pop %rbp
106 pop %rbx
107 ret
108
109 .size \name, .-\name
110.endm
111
112/*
113 * This macro implements 80 rounds of SHA-1 for one 64-byte block
114 */
115.macro SHA1_PIPELINED_MAIN_BODY
116 INIT_REGALLOC
117
118 mov (HASH_PTR), A
119 mov 4(HASH_PTR), B
120 mov 8(HASH_PTR), C
121 mov 12(HASH_PTR), D
122 mov 16(HASH_PTR), E
123
124 .set i, 0
125 .rept W_PRECALC_AHEAD
126 W_PRECALC i
127 .set i, (i+1)
128 .endr
129
130.align 4
1311:
132 RR F1,A,B,C,D,E,0
133 RR F1,D,E,A,B,C,2
134 RR F1,B,C,D,E,A,4
135 RR F1,E,A,B,C,D,6
136 RR F1,C,D,E,A,B,8
137
138 RR F1,A,B,C,D,E,10
139 RR F1,D,E,A,B,C,12
140 RR F1,B,C,D,E,A,14
141 RR F1,E,A,B,C,D,16
142 RR F1,C,D,E,A,B,18
143
144 RR F2,A,B,C,D,E,20
145 RR F2,D,E,A,B,C,22
146 RR F2,B,C,D,E,A,24
147 RR F2,E,A,B,C,D,26
148 RR F2,C,D,E,A,B,28
149
150 RR F2,A,B,C,D,E,30
151 RR F2,D,E,A,B,C,32
152 RR F2,B,C,D,E,A,34
153 RR F2,E,A,B,C,D,36
154 RR F2,C,D,E,A,B,38
155
156 RR F3,A,B,C,D,E,40
157 RR F3,D,E,A,B,C,42
158 RR F3,B,C,D,E,A,44
159 RR F3,E,A,B,C,D,46
160 RR F3,C,D,E,A,B,48
161
162 RR F3,A,B,C,D,E,50
163 RR F3,D,E,A,B,C,52
164 RR F3,B,C,D,E,A,54
165 RR F3,E,A,B,C,D,56
166 RR F3,C,D,E,A,B,58
167
168 add $64, BUFFER_PTR # move to the next 64-byte block
169 cmp BUFFER_END, BUFFER_PTR # if the current is the last one use
170 cmovae K_BASE, BUFFER_PTR # dummy source to avoid buffer overrun
171
172 RR F4,A,B,C,D,E,60
173 RR F4,D,E,A,B,C,62
174 RR F4,B,C,D,E,A,64
175 RR F4,E,A,B,C,D,66
176 RR F4,C,D,E,A,B,68
177
178 RR F4,A,B,C,D,E,70
179 RR F4,D,E,A,B,C,72
180 RR F4,B,C,D,E,A,74
181 RR F4,E,A,B,C,D,76
182 RR F4,C,D,E,A,B,78
183
184 UPDATE_HASH (HASH_PTR), A
185 UPDATE_HASH 4(HASH_PTR), B
186 UPDATE_HASH 8(HASH_PTR), C
187 UPDATE_HASH 12(HASH_PTR), D
188 UPDATE_HASH 16(HASH_PTR), E
189
190 RESTORE_RENAMED_REGS
191 cmp K_BASE, BUFFER_PTR # K_BASE means, we reached the end
192 jne 1b
193.endm
194
195.macro INIT_REGALLOC
196 .set A, REG_A
197 .set B, REG_B
198 .set C, REG_C
199 .set D, REG_D
200 .set E, REG_E
201 .set T1, REG_T1
202 .set T2, REG_T2
203.endm
204
205.macro RESTORE_RENAMED_REGS
206 # order is important (REG_C is where it should be)
207 mov B, REG_B
208 mov D, REG_D
209 mov A, REG_A
210 mov E, REG_E
211.endm
212
213.macro SWAP_REG_NAMES a, b
214 .set _T, \a
215 .set \a, \b
216 .set \b, _T
217.endm
218
219.macro F1 b, c, d
220 mov \c, T1
221 SWAP_REG_NAMES \c, T1
222 xor \d, T1
223 and \b, T1
224 xor \d, T1
225.endm
226
227.macro F2 b, c, d
228 mov \d, T1
229 SWAP_REG_NAMES \d, T1
230 xor \c, T1
231 xor \b, T1
232.endm
233
234.macro F3 b, c ,d
235 mov \c, T1
236 SWAP_REG_NAMES \c, T1
237 mov \b, T2
238 or \b, T1
239 and \c, T2
240 and \d, T1
241 or T2, T1
242.endm
243
244.macro F4 b, c, d
245 F2 \b, \c, \d
246.endm
247
248.macro UPDATE_HASH hash, val
249 add \hash, \val
250 mov \val, \hash
251.endm
252
253/*
254 * RR does two rounds of SHA-1 back to back with W[] pre-calc
255 * t1 = F(b, c, d); e += w(i)
256 * e += t1; b <<= 30; d += w(i+1);
257 * t1 = F(a, b, c);
258 * d += t1; a <<= 5;
259 * e += a;
260 * t1 = e; a >>= 7;
261 * t1 <<= 5;
262 * d += t1;
263 */
264.macro RR F, a, b, c, d, e, round
265 add WK(\round), \e
266 \F \b, \c, \d # t1 = F(b, c, d);
267 W_PRECALC (\round + W_PRECALC_AHEAD)
268 rol $30, \b
269 add T1, \e
270 add WK(\round + 1), \d
271
272 \F \a, \b, \c
273 W_PRECALC (\round + W_PRECALC_AHEAD + 1)
274 rol $5, \a
275 add \a, \e
276 add T1, \d
277 ror $7, \a # (a <<r 5) >>r 7) => a <<r 30)
278
279 mov \e, T1
280 SWAP_REG_NAMES \e, T1
281
282 rol $5, T1
283 add T1, \d
284
285 # write: \a, \b
286 # rotate: \a<=\d, \b<=\e, \c<=\a, \d<=\b, \e<=\c
287.endm
288
289.macro W_PRECALC r
290 .set i, \r
291
292 .if (i < 20)
293 .set K_XMM, 0
294 .elseif (i < 40)
295 .set K_XMM, 16
296 .elseif (i < 60)
297 .set K_XMM, 32
298 .elseif (i < 80)
299 .set K_XMM, 48
300 .endif
301
302 .if ((i < 16) || ((i >= 80) && (i < (80 + W_PRECALC_AHEAD))))
303 .set i, ((\r) % 80) # pre-compute for the next iteration
304 .if (i == 0)
305 W_PRECALC_RESET
306 .endif
307 W_PRECALC_00_15
308 .elseif (i<32)
309 W_PRECALC_16_31
310 .elseif (i < 80) // rounds 32-79
311 W_PRECALC_32_79
312 .endif
313.endm
314
315.macro W_PRECALC_RESET
316 .set W, W0
317 .set W_minus_04, W4
318 .set W_minus_08, W8
319 .set W_minus_12, W12
320 .set W_minus_16, W16
321 .set W_minus_20, W20
322 .set W_minus_24, W24
323 .set W_minus_28, W28
324 .set W_minus_32, W
325.endm
326
327.macro W_PRECALC_ROTATE
328 .set W_minus_32, W_minus_28
329 .set W_minus_28, W_minus_24
330 .set W_minus_24, W_minus_20
331 .set W_minus_20, W_minus_16
332 .set W_minus_16, W_minus_12
333 .set W_minus_12, W_minus_08
334 .set W_minus_08, W_minus_04
335 .set W_minus_04, W
336 .set W, W_minus_32
337.endm
338
339.macro W_PRECALC_SSSE3
340
341.macro W_PRECALC_00_15
342 W_PRECALC_00_15_SSSE3
343.endm
344.macro W_PRECALC_16_31
345 W_PRECALC_16_31_SSSE3
346.endm
347.macro W_PRECALC_32_79
348 W_PRECALC_32_79_SSSE3
349.endm
350
351/* message scheduling pre-compute for rounds 0-15 */
352.macro W_PRECALC_00_15_SSSE3
353 .if ((i & 3) == 0)
354 movdqu (i*4)(BUFFER_PTR), W_TMP1
355 .elseif ((i & 3) == 1)
356 pshufb XMM_SHUFB_BSWAP, W_TMP1
357 movdqa W_TMP1, W
358 .elseif ((i & 3) == 2)
359 paddd (K_BASE), W_TMP1
360 .elseif ((i & 3) == 3)
361 movdqa W_TMP1, WK(i&~3)
362 W_PRECALC_ROTATE
363 .endif
364.endm
365
366/* message scheduling pre-compute for rounds 16-31
367 *
368 * - calculating last 32 w[i] values in 8 XMM registers
369 * - pre-calculate K+w[i] values and store to mem, for later load by ALU add
370 * instruction
371 *
372 * some "heavy-lifting" vectorization for rounds 16-31 due to w[i]->w[i-3]
373 * dependency, but improves for 32-79
374 */
375.macro W_PRECALC_16_31_SSSE3
376 # blended scheduling of vector and scalar instruction streams, one 4-wide
377 # vector iteration / 4 scalar rounds
378 .if ((i & 3) == 0)
379 movdqa W_minus_12, W
380 palignr $8, W_minus_16, W # w[i-14]
381 movdqa W_minus_04, W_TMP1
382 psrldq $4, W_TMP1 # w[i-3]
383 pxor W_minus_08, W
384 .elseif ((i & 3) == 1)
385 pxor W_minus_16, W_TMP1
386 pxor W_TMP1, W
387 movdqa W, W_TMP2
388 movdqa W, W_TMP1
389 pslldq $12, W_TMP2
390 .elseif ((i & 3) == 2)
391 psrld $31, W
392 pslld $1, W_TMP1
393 por W, W_TMP1
394 movdqa W_TMP2, W
395 psrld $30, W_TMP2
396 pslld $2, W
397 .elseif ((i & 3) == 3)
398 pxor W, W_TMP1
399 pxor W_TMP2, W_TMP1
400 movdqa W_TMP1, W
401 paddd K_XMM(K_BASE), W_TMP1
402 movdqa W_TMP1, WK(i&~3)
403 W_PRECALC_ROTATE
404 .endif
405.endm
406
407/* message scheduling pre-compute for rounds 32-79
408 *
409 * in SHA-1 specification: w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1
410 * instead we do equal: w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2
411 * allows more efficient vectorization since w[i]=>w[i-3] dependency is broken
412 */
413.macro W_PRECALC_32_79_SSSE3
414 .if ((i & 3) == 0)
415 movdqa W_minus_04, W_TMP1
416 pxor W_minus_28, W # W is W_minus_32 before xor
417 palignr $8, W_minus_08, W_TMP1
418 .elseif ((i & 3) == 1)
419 pxor W_minus_16, W
420 pxor W_TMP1, W
421 movdqa W, W_TMP1
422 .elseif ((i & 3) == 2)
423 psrld $30, W
424 pslld $2, W_TMP1
425 por W, W_TMP1
426 .elseif ((i & 3) == 3)
427 movdqa W_TMP1, W
428 paddd K_XMM(K_BASE), W_TMP1
429 movdqa W_TMP1, WK(i&~3)
430 W_PRECALC_ROTATE
431 .endif
432.endm
433
434.endm // W_PRECALC_SSSE3
435
436
437#define K1 0x5a827999
438#define K2 0x6ed9eba1
439#define K3 0x8f1bbcdc
440#define K4 0xca62c1d6
441
442.section .rodata
443.align 16
444
445K_XMM_AR:
446 .long K1, K1, K1, K1
447 .long K2, K2, K2, K2
448 .long K3, K3, K3, K3
449 .long K4, K4, K4, K4
450
451BSWAP_SHUFB_CTL:
452 .long 0x00010203
453 .long 0x04050607
454 .long 0x08090a0b
455 .long 0x0c0d0e0f
456
457
458.section .text
459
460W_PRECALC_SSSE3
461.macro xmm_mov a, b
462 movdqu \a,\b
463.endm
464
465/* SSSE3 optimized implementation:
466 * extern "C" void sha1_transform_ssse3(u32 *digest, const char *data, u32 *ws,
467 * unsigned int rounds);
468 */
469SHA1_VECTOR_ASM sha1_transform_ssse3
470
471#ifdef SHA1_ENABLE_AVX_SUPPORT
472
473.macro W_PRECALC_AVX
474
475.purgem W_PRECALC_00_15
476.macro W_PRECALC_00_15
477 W_PRECALC_00_15_AVX
478.endm
479.purgem W_PRECALC_16_31
480.macro W_PRECALC_16_31
481 W_PRECALC_16_31_AVX
482.endm
483.purgem W_PRECALC_32_79
484.macro W_PRECALC_32_79
485 W_PRECALC_32_79_AVX
486.endm
487
488.macro W_PRECALC_00_15_AVX
489 .if ((i & 3) == 0)
490 vmovdqu (i*4)(BUFFER_PTR), W_TMP1
491 .elseif ((i & 3) == 1)
492 vpshufb XMM_SHUFB_BSWAP, W_TMP1, W
493 .elseif ((i & 3) == 2)
494 vpaddd (K_BASE), W, W_TMP1
495 .elseif ((i & 3) == 3)
496 vmovdqa W_TMP1, WK(i&~3)
497 W_PRECALC_ROTATE
498 .endif
499.endm
500
501.macro W_PRECALC_16_31_AVX
502 .if ((i & 3) == 0)
503 vpalignr $8, W_minus_16, W_minus_12, W # w[i-14]
504 vpsrldq $4, W_minus_04, W_TMP1 # w[i-3]
505 vpxor W_minus_08, W, W
506 vpxor W_minus_16, W_TMP1, W_TMP1
507 .elseif ((i & 3) == 1)
508 vpxor W_TMP1, W, W
509 vpslldq $12, W, W_TMP2
510 vpslld $1, W, W_TMP1
511 .elseif ((i & 3) == 2)
512 vpsrld $31, W, W
513 vpor W, W_TMP1, W_TMP1
514 vpslld $2, W_TMP2, W
515 vpsrld $30, W_TMP2, W_TMP2
516 .elseif ((i & 3) == 3)
517 vpxor W, W_TMP1, W_TMP1
518 vpxor W_TMP2, W_TMP1, W
519 vpaddd K_XMM(K_BASE), W, W_TMP1
520 vmovdqu W_TMP1, WK(i&~3)
521 W_PRECALC_ROTATE
522 .endif
523.endm
524
525.macro W_PRECALC_32_79_AVX
526 .if ((i & 3) == 0)
527 vpalignr $8, W_minus_08, W_minus_04, W_TMP1
528 vpxor W_minus_28, W, W # W is W_minus_32 before xor
529 .elseif ((i & 3) == 1)
530 vpxor W_minus_16, W_TMP1, W_TMP1
531 vpxor W_TMP1, W, W
532 .elseif ((i & 3) == 2)
533 vpslld $2, W, W_TMP1
534 vpsrld $30, W, W
535 vpor W, W_TMP1, W
536 .elseif ((i & 3) == 3)
537 vpaddd K_XMM(K_BASE), W, W_TMP1
538 vmovdqu W_TMP1, WK(i&~3)
539 W_PRECALC_ROTATE
540 .endif
541.endm
542
543.endm // W_PRECALC_AVX
544
545W_PRECALC_AVX
546.purgem xmm_mov
547.macro xmm_mov a, b
548 vmovdqu \a,\b
549.endm
550
551
552/* AVX optimized implementation:
553 * extern "C" void sha1_transform_avx(u32 *digest, const char *data, u32 *ws,
554 * unsigned int rounds);
555 */
556SHA1_VECTOR_ASM sha1_transform_avx
557
558#endif
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * This is a SIMD SHA-1 implementation. It requires the Intel(R) Supplemental
4 * SSE3 instruction set extensions introduced in Intel Core Microarchitecture
5 * processors. CPUs supporting Intel(R) AVX extensions will get an additional
6 * boost.
7 *
8 * This work was inspired by the vectorized implementation of Dean Gaudet.
9 * Additional information on it can be found at:
10 * http://www.arctic.org/~dean/crypto/sha1.html
11 *
12 * It was improved upon with more efficient vectorization of the message
13 * scheduling. This implementation has also been optimized for all current and
14 * several future generations of Intel CPUs.
15 *
16 * See this article for more information about the implementation details:
17 * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/
18 *
19 * Copyright (C) 2010, Intel Corp.
20 * Authors: Maxim Locktyukhin <maxim.locktyukhin@intel.com>
21 * Ronen Zohar <ronen.zohar@intel.com>
22 *
23 * Converted to AT&T syntax and adapted for inclusion in the Linux kernel:
24 * Author: Mathias Krause <minipli@googlemail.com>
25 */
26
27#include <linux/linkage.h>
28
29#define CTX %rdi // arg1
30#define BUF %rsi // arg2
31#define CNT %rdx // arg3
32
33#define REG_A %ecx
34#define REG_B %esi
35#define REG_C %edi
36#define REG_D %r12d
37#define REG_E %edx
38
39#define REG_T1 %eax
40#define REG_T2 %ebx
41
42#define K_BASE %r8
43#define HASH_PTR %r9
44#define BUFFER_PTR %r10
45#define BUFFER_END %r11
46
47#define W_TMP1 %xmm0
48#define W_TMP2 %xmm9
49
50#define W0 %xmm1
51#define W4 %xmm2
52#define W8 %xmm3
53#define W12 %xmm4
54#define W16 %xmm5
55#define W20 %xmm6
56#define W24 %xmm7
57#define W28 %xmm8
58
59#define XMM_SHUFB_BSWAP %xmm10
60
61/* we keep window of 64 w[i]+K pre-calculated values in a circular buffer */
62#define WK(t) (((t) & 15) * 4)(%rsp)
63#define W_PRECALC_AHEAD 16
64
65/*
66 * This macro implements the SHA-1 function's body for single 64-byte block
67 * param: function's name
68 */
69.macro SHA1_VECTOR_ASM name
70 SYM_FUNC_START(\name)
71
72 push %rbx
73 push %r12
74 push %rbp
75 mov %rsp, %rbp
76
77 sub $64, %rsp # allocate workspace
78 and $~15, %rsp # align stack
79
80 mov CTX, HASH_PTR
81 mov BUF, BUFFER_PTR
82
83 shl $6, CNT # multiply by 64
84 add BUF, CNT
85 mov CNT, BUFFER_END
86
87 lea K_XMM_AR(%rip), K_BASE
88 xmm_mov BSWAP_SHUFB_CTL(%rip), XMM_SHUFB_BSWAP
89
90 SHA1_PIPELINED_MAIN_BODY
91
92 # cleanup workspace
93 mov $8, %ecx
94 mov %rsp, %rdi
95 xor %eax, %eax
96 rep stosq
97
98 mov %rbp, %rsp # deallocate workspace
99 pop %rbp
100 pop %r12
101 pop %rbx
102 ret
103
104 SYM_FUNC_END(\name)
105.endm
106
107/*
108 * This macro implements 80 rounds of SHA-1 for one 64-byte block
109 */
110.macro SHA1_PIPELINED_MAIN_BODY
111 INIT_REGALLOC
112
113 mov (HASH_PTR), A
114 mov 4(HASH_PTR), B
115 mov 8(HASH_PTR), C
116 mov 12(HASH_PTR), D
117 mov 16(HASH_PTR), E
118
119 .set i, 0
120 .rept W_PRECALC_AHEAD
121 W_PRECALC i
122 .set i, (i+1)
123 .endr
124
125.align 4
1261:
127 RR F1,A,B,C,D,E,0
128 RR F1,D,E,A,B,C,2
129 RR F1,B,C,D,E,A,4
130 RR F1,E,A,B,C,D,6
131 RR F1,C,D,E,A,B,8
132
133 RR F1,A,B,C,D,E,10
134 RR F1,D,E,A,B,C,12
135 RR F1,B,C,D,E,A,14
136 RR F1,E,A,B,C,D,16
137 RR F1,C,D,E,A,B,18
138
139 RR F2,A,B,C,D,E,20
140 RR F2,D,E,A,B,C,22
141 RR F2,B,C,D,E,A,24
142 RR F2,E,A,B,C,D,26
143 RR F2,C,D,E,A,B,28
144
145 RR F2,A,B,C,D,E,30
146 RR F2,D,E,A,B,C,32
147 RR F2,B,C,D,E,A,34
148 RR F2,E,A,B,C,D,36
149 RR F2,C,D,E,A,B,38
150
151 RR F3,A,B,C,D,E,40
152 RR F3,D,E,A,B,C,42
153 RR F3,B,C,D,E,A,44
154 RR F3,E,A,B,C,D,46
155 RR F3,C,D,E,A,B,48
156
157 RR F3,A,B,C,D,E,50
158 RR F3,D,E,A,B,C,52
159 RR F3,B,C,D,E,A,54
160 RR F3,E,A,B,C,D,56
161 RR F3,C,D,E,A,B,58
162
163 add $64, BUFFER_PTR # move to the next 64-byte block
164 cmp BUFFER_END, BUFFER_PTR # if the current is the last one use
165 cmovae K_BASE, BUFFER_PTR # dummy source to avoid buffer overrun
166
167 RR F4,A,B,C,D,E,60
168 RR F4,D,E,A,B,C,62
169 RR F4,B,C,D,E,A,64
170 RR F4,E,A,B,C,D,66
171 RR F4,C,D,E,A,B,68
172
173 RR F4,A,B,C,D,E,70
174 RR F4,D,E,A,B,C,72
175 RR F4,B,C,D,E,A,74
176 RR F4,E,A,B,C,D,76
177 RR F4,C,D,E,A,B,78
178
179 UPDATE_HASH (HASH_PTR), A
180 UPDATE_HASH 4(HASH_PTR), B
181 UPDATE_HASH 8(HASH_PTR), C
182 UPDATE_HASH 12(HASH_PTR), D
183 UPDATE_HASH 16(HASH_PTR), E
184
185 RESTORE_RENAMED_REGS
186 cmp K_BASE, BUFFER_PTR # K_BASE means, we reached the end
187 jne 1b
188.endm
189
190.macro INIT_REGALLOC
191 .set A, REG_A
192 .set B, REG_B
193 .set C, REG_C
194 .set D, REG_D
195 .set E, REG_E
196 .set T1, REG_T1
197 .set T2, REG_T2
198.endm
199
200.macro RESTORE_RENAMED_REGS
201 # order is important (REG_C is where it should be)
202 mov B, REG_B
203 mov D, REG_D
204 mov A, REG_A
205 mov E, REG_E
206.endm
207
208.macro SWAP_REG_NAMES a, b
209 .set _T, \a
210 .set \a, \b
211 .set \b, _T
212.endm
213
214.macro F1 b, c, d
215 mov \c, T1
216 SWAP_REG_NAMES \c, T1
217 xor \d, T1
218 and \b, T1
219 xor \d, T1
220.endm
221
222.macro F2 b, c, d
223 mov \d, T1
224 SWAP_REG_NAMES \d, T1
225 xor \c, T1
226 xor \b, T1
227.endm
228
229.macro F3 b, c ,d
230 mov \c, T1
231 SWAP_REG_NAMES \c, T1
232 mov \b, T2
233 or \b, T1
234 and \c, T2
235 and \d, T1
236 or T2, T1
237.endm
238
239.macro F4 b, c, d
240 F2 \b, \c, \d
241.endm
242
243.macro UPDATE_HASH hash, val
244 add \hash, \val
245 mov \val, \hash
246.endm
247
248/*
249 * RR does two rounds of SHA-1 back to back with W[] pre-calc
250 * t1 = F(b, c, d); e += w(i)
251 * e += t1; b <<= 30; d += w(i+1);
252 * t1 = F(a, b, c);
253 * d += t1; a <<= 5;
254 * e += a;
255 * t1 = e; a >>= 7;
256 * t1 <<= 5;
257 * d += t1;
258 */
259.macro RR F, a, b, c, d, e, round
260 add WK(\round), \e
261 \F \b, \c, \d # t1 = F(b, c, d);
262 W_PRECALC (\round + W_PRECALC_AHEAD)
263 rol $30, \b
264 add T1, \e
265 add WK(\round + 1), \d
266
267 \F \a, \b, \c
268 W_PRECALC (\round + W_PRECALC_AHEAD + 1)
269 rol $5, \a
270 add \a, \e
271 add T1, \d
272 ror $7, \a # (a <<r 5) >>r 7) => a <<r 30)
273
274 mov \e, T1
275 SWAP_REG_NAMES \e, T1
276
277 rol $5, T1
278 add T1, \d
279
280 # write: \a, \b
281 # rotate: \a<=\d, \b<=\e, \c<=\a, \d<=\b, \e<=\c
282.endm
283
284.macro W_PRECALC r
285 .set i, \r
286
287 .if (i < 20)
288 .set K_XMM, 0
289 .elseif (i < 40)
290 .set K_XMM, 16
291 .elseif (i < 60)
292 .set K_XMM, 32
293 .elseif (i < 80)
294 .set K_XMM, 48
295 .endif
296
297 .if ((i < 16) || ((i >= 80) && (i < (80 + W_PRECALC_AHEAD))))
298 .set i, ((\r) % 80) # pre-compute for the next iteration
299 .if (i == 0)
300 W_PRECALC_RESET
301 .endif
302 W_PRECALC_00_15
303 .elseif (i<32)
304 W_PRECALC_16_31
305 .elseif (i < 80) // rounds 32-79
306 W_PRECALC_32_79
307 .endif
308.endm
309
310.macro W_PRECALC_RESET
311 .set W, W0
312 .set W_minus_04, W4
313 .set W_minus_08, W8
314 .set W_minus_12, W12
315 .set W_minus_16, W16
316 .set W_minus_20, W20
317 .set W_minus_24, W24
318 .set W_minus_28, W28
319 .set W_minus_32, W
320.endm
321
322.macro W_PRECALC_ROTATE
323 .set W_minus_32, W_minus_28
324 .set W_minus_28, W_minus_24
325 .set W_minus_24, W_minus_20
326 .set W_minus_20, W_minus_16
327 .set W_minus_16, W_minus_12
328 .set W_minus_12, W_minus_08
329 .set W_minus_08, W_minus_04
330 .set W_minus_04, W
331 .set W, W_minus_32
332.endm
333
334.macro W_PRECALC_SSSE3
335
336.macro W_PRECALC_00_15
337 W_PRECALC_00_15_SSSE3
338.endm
339.macro W_PRECALC_16_31
340 W_PRECALC_16_31_SSSE3
341.endm
342.macro W_PRECALC_32_79
343 W_PRECALC_32_79_SSSE3
344.endm
345
346/* message scheduling pre-compute for rounds 0-15 */
347.macro W_PRECALC_00_15_SSSE3
348 .if ((i & 3) == 0)
349 movdqu (i*4)(BUFFER_PTR), W_TMP1
350 .elseif ((i & 3) == 1)
351 pshufb XMM_SHUFB_BSWAP, W_TMP1
352 movdqa W_TMP1, W
353 .elseif ((i & 3) == 2)
354 paddd (K_BASE), W_TMP1
355 .elseif ((i & 3) == 3)
356 movdqa W_TMP1, WK(i&~3)
357 W_PRECALC_ROTATE
358 .endif
359.endm
360
361/* message scheduling pre-compute for rounds 16-31
362 *
363 * - calculating last 32 w[i] values in 8 XMM registers
364 * - pre-calculate K+w[i] values and store to mem, for later load by ALU add
365 * instruction
366 *
367 * some "heavy-lifting" vectorization for rounds 16-31 due to w[i]->w[i-3]
368 * dependency, but improves for 32-79
369 */
370.macro W_PRECALC_16_31_SSSE3
371 # blended scheduling of vector and scalar instruction streams, one 4-wide
372 # vector iteration / 4 scalar rounds
373 .if ((i & 3) == 0)
374 movdqa W_minus_12, W
375 palignr $8, W_minus_16, W # w[i-14]
376 movdqa W_minus_04, W_TMP1
377 psrldq $4, W_TMP1 # w[i-3]
378 pxor W_minus_08, W
379 .elseif ((i & 3) == 1)
380 pxor W_minus_16, W_TMP1
381 pxor W_TMP1, W
382 movdqa W, W_TMP2
383 movdqa W, W_TMP1
384 pslldq $12, W_TMP2
385 .elseif ((i & 3) == 2)
386 psrld $31, W
387 pslld $1, W_TMP1
388 por W, W_TMP1
389 movdqa W_TMP2, W
390 psrld $30, W_TMP2
391 pslld $2, W
392 .elseif ((i & 3) == 3)
393 pxor W, W_TMP1
394 pxor W_TMP2, W_TMP1
395 movdqa W_TMP1, W
396 paddd K_XMM(K_BASE), W_TMP1
397 movdqa W_TMP1, WK(i&~3)
398 W_PRECALC_ROTATE
399 .endif
400.endm
401
402/* message scheduling pre-compute for rounds 32-79
403 *
404 * in SHA-1 specification: w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1
405 * instead we do equal: w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2
406 * allows more efficient vectorization since w[i]=>w[i-3] dependency is broken
407 */
408.macro W_PRECALC_32_79_SSSE3
409 .if ((i & 3) == 0)
410 movdqa W_minus_04, W_TMP1
411 pxor W_minus_28, W # W is W_minus_32 before xor
412 palignr $8, W_minus_08, W_TMP1
413 .elseif ((i & 3) == 1)
414 pxor W_minus_16, W
415 pxor W_TMP1, W
416 movdqa W, W_TMP1
417 .elseif ((i & 3) == 2)
418 psrld $30, W
419 pslld $2, W_TMP1
420 por W, W_TMP1
421 .elseif ((i & 3) == 3)
422 movdqa W_TMP1, W
423 paddd K_XMM(K_BASE), W_TMP1
424 movdqa W_TMP1, WK(i&~3)
425 W_PRECALC_ROTATE
426 .endif
427.endm
428
429.endm // W_PRECALC_SSSE3
430
431
432#define K1 0x5a827999
433#define K2 0x6ed9eba1
434#define K3 0x8f1bbcdc
435#define K4 0xca62c1d6
436
437.section .rodata
438.align 16
439
440K_XMM_AR:
441 .long K1, K1, K1, K1
442 .long K2, K2, K2, K2
443 .long K3, K3, K3, K3
444 .long K4, K4, K4, K4
445
446BSWAP_SHUFB_CTL:
447 .long 0x00010203
448 .long 0x04050607
449 .long 0x08090a0b
450 .long 0x0c0d0e0f
451
452
453.section .text
454
455W_PRECALC_SSSE3
456.macro xmm_mov a, b
457 movdqu \a,\b
458.endm
459
460/*
461 * SSSE3 optimized implementation:
462 *
463 * extern "C" void sha1_transform_ssse3(struct sha1_state *state,
464 * const u8 *data, int blocks);
465 *
466 * Note that struct sha1_state is assumed to begin with u32 state[5].
467 */
468SHA1_VECTOR_ASM sha1_transform_ssse3
469
470.macro W_PRECALC_AVX
471
472.purgem W_PRECALC_00_15
473.macro W_PRECALC_00_15
474 W_PRECALC_00_15_AVX
475.endm
476.purgem W_PRECALC_16_31
477.macro W_PRECALC_16_31
478 W_PRECALC_16_31_AVX
479.endm
480.purgem W_PRECALC_32_79
481.macro W_PRECALC_32_79
482 W_PRECALC_32_79_AVX
483.endm
484
485.macro W_PRECALC_00_15_AVX
486 .if ((i & 3) == 0)
487 vmovdqu (i*4)(BUFFER_PTR), W_TMP1
488 .elseif ((i & 3) == 1)
489 vpshufb XMM_SHUFB_BSWAP, W_TMP1, W
490 .elseif ((i & 3) == 2)
491 vpaddd (K_BASE), W, W_TMP1
492 .elseif ((i & 3) == 3)
493 vmovdqa W_TMP1, WK(i&~3)
494 W_PRECALC_ROTATE
495 .endif
496.endm
497
498.macro W_PRECALC_16_31_AVX
499 .if ((i & 3) == 0)
500 vpalignr $8, W_minus_16, W_minus_12, W # w[i-14]
501 vpsrldq $4, W_minus_04, W_TMP1 # w[i-3]
502 vpxor W_minus_08, W, W
503 vpxor W_minus_16, W_TMP1, W_TMP1
504 .elseif ((i & 3) == 1)
505 vpxor W_TMP1, W, W
506 vpslldq $12, W, W_TMP2
507 vpslld $1, W, W_TMP1
508 .elseif ((i & 3) == 2)
509 vpsrld $31, W, W
510 vpor W, W_TMP1, W_TMP1
511 vpslld $2, W_TMP2, W
512 vpsrld $30, W_TMP2, W_TMP2
513 .elseif ((i & 3) == 3)
514 vpxor W, W_TMP1, W_TMP1
515 vpxor W_TMP2, W_TMP1, W
516 vpaddd K_XMM(K_BASE), W, W_TMP1
517 vmovdqu W_TMP1, WK(i&~3)
518 W_PRECALC_ROTATE
519 .endif
520.endm
521
522.macro W_PRECALC_32_79_AVX
523 .if ((i & 3) == 0)
524 vpalignr $8, W_minus_08, W_minus_04, W_TMP1
525 vpxor W_minus_28, W, W # W is W_minus_32 before xor
526 .elseif ((i & 3) == 1)
527 vpxor W_minus_16, W_TMP1, W_TMP1
528 vpxor W_TMP1, W, W
529 .elseif ((i & 3) == 2)
530 vpslld $2, W, W_TMP1
531 vpsrld $30, W, W
532 vpor W, W_TMP1, W
533 .elseif ((i & 3) == 3)
534 vpaddd K_XMM(K_BASE), W, W_TMP1
535 vmovdqu W_TMP1, WK(i&~3)
536 W_PRECALC_ROTATE
537 .endif
538.endm
539
540.endm // W_PRECALC_AVX
541
542W_PRECALC_AVX
543.purgem xmm_mov
544.macro xmm_mov a, b
545 vmovdqu \a,\b
546.endm
547
548
549/* AVX optimized implementation:
550 * extern "C" void sha1_transform_avx(struct sha1_state *state,
551 * const u8 *data, int blocks);
552 */
553SHA1_VECTOR_ASM sha1_transform_avx