Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Twofish Cipher 8-way parallel algorithm (AVX/x86_64)
4 *
5 * Copyright (C) 2012 Johannes Goetzfried
6 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
7 *
8 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
9 */
10
11#include <linux/linkage.h>
12#include <asm/frame.h>
13#include "glue_helper-asm-avx.S"
14
15.file "twofish-avx-x86_64-asm_64.S"
16
17.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
18.align 16
19.Lbswap128_mask:
20 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
21
22.text
23
24/* structure of crypto context */
25#define s0 0
26#define s1 1024
27#define s2 2048
28#define s3 3072
29#define w 4096
30#define k 4128
31
32/**********************************************************************
33 8-way AVX twofish
34 **********************************************************************/
35#define CTX %rdi
36
37#define RA1 %xmm0
38#define RB1 %xmm1
39#define RC1 %xmm2
40#define RD1 %xmm3
41
42#define RA2 %xmm4
43#define RB2 %xmm5
44#define RC2 %xmm6
45#define RD2 %xmm7
46
47#define RX0 %xmm8
48#define RY0 %xmm9
49
50#define RX1 %xmm10
51#define RY1 %xmm11
52
53#define RK1 %xmm12
54#define RK2 %xmm13
55
56#define RT %xmm14
57#define RR %xmm15
58
59#define RID1 %r13
60#define RID1d %r13d
61#define RID2 %rsi
62#define RID2d %esi
63
64#define RGI1 %rdx
65#define RGI1bl %dl
66#define RGI1bh %dh
67#define RGI2 %rcx
68#define RGI2bl %cl
69#define RGI2bh %ch
70
71#define RGI3 %rax
72#define RGI3bl %al
73#define RGI3bh %ah
74#define RGI4 %rbx
75#define RGI4bl %bl
76#define RGI4bh %bh
77
78#define RGS1 %r8
79#define RGS1d %r8d
80#define RGS2 %r9
81#define RGS2d %r9d
82#define RGS3 %r10
83#define RGS3d %r10d
84
85
86#define lookup_32bit(t0, t1, t2, t3, src, dst, interleave_op, il_reg) \
87 movzbl src ## bl, RID1d; \
88 movzbl src ## bh, RID2d; \
89 shrq $16, src; \
90 movl t0(CTX, RID1, 4), dst ## d; \
91 movl t1(CTX, RID2, 4), RID2d; \
92 movzbl src ## bl, RID1d; \
93 xorl RID2d, dst ## d; \
94 movzbl src ## bh, RID2d; \
95 interleave_op(il_reg); \
96 xorl t2(CTX, RID1, 4), dst ## d; \
97 xorl t3(CTX, RID2, 4), dst ## d;
98
99#define dummy(d) /* do nothing */
100
101#define shr_next(reg) \
102 shrq $16, reg;
103
104#define G(gi1, gi2, x, t0, t1, t2, t3) \
105 lookup_32bit(t0, t1, t2, t3, ##gi1, RGS1, shr_next, ##gi1); \
106 lookup_32bit(t0, t1, t2, t3, ##gi2, RGS3, shr_next, ##gi2); \
107 \
108 lookup_32bit(t0, t1, t2, t3, ##gi1, RGS2, dummy, none); \
109 shlq $32, RGS2; \
110 orq RGS1, RGS2; \
111 lookup_32bit(t0, t1, t2, t3, ##gi2, RGS1, dummy, none); \
112 shlq $32, RGS1; \
113 orq RGS1, RGS3;
114
115#define round_head_2(a, b, x1, y1, x2, y2) \
116 vmovq b ## 1, RGI3; \
117 vpextrq $1, b ## 1, RGI4; \
118 \
119 G(RGI1, RGI2, x1, s0, s1, s2, s3); \
120 vmovq a ## 2, RGI1; \
121 vpextrq $1, a ## 2, RGI2; \
122 vmovq RGS2, x1; \
123 vpinsrq $1, RGS3, x1, x1; \
124 \
125 G(RGI3, RGI4, y1, s1, s2, s3, s0); \
126 vmovq b ## 2, RGI3; \
127 vpextrq $1, b ## 2, RGI4; \
128 vmovq RGS2, y1; \
129 vpinsrq $1, RGS3, y1, y1; \
130 \
131 G(RGI1, RGI2, x2, s0, s1, s2, s3); \
132 vmovq RGS2, x2; \
133 vpinsrq $1, RGS3, x2, x2; \
134 \
135 G(RGI3, RGI4, y2, s1, s2, s3, s0); \
136 vmovq RGS2, y2; \
137 vpinsrq $1, RGS3, y2, y2;
138
139#define encround_tail(a, b, c, d, x, y, prerotate) \
140 vpaddd x, y, x; \
141 vpaddd x, RK1, RT;\
142 prerotate(b); \
143 vpxor RT, c, c; \
144 vpaddd y, x, y; \
145 vpaddd y, RK2, y; \
146 vpsrld $1, c, RT; \
147 vpslld $(32 - 1), c, c; \
148 vpor c, RT, c; \
149 vpxor d, y, d; \
150
151#define decround_tail(a, b, c, d, x, y, prerotate) \
152 vpaddd x, y, x; \
153 vpaddd x, RK1, RT;\
154 prerotate(a); \
155 vpxor RT, c, c; \
156 vpaddd y, x, y; \
157 vpaddd y, RK2, y; \
158 vpxor d, y, d; \
159 vpsrld $1, d, y; \
160 vpslld $(32 - 1), d, d; \
161 vpor d, y, d; \
162
163#define rotate_1l(x) \
164 vpslld $1, x, RR; \
165 vpsrld $(32 - 1), x, x; \
166 vpor x, RR, x;
167
168#define preload_rgi(c) \
169 vmovq c, RGI1; \
170 vpextrq $1, c, RGI2;
171
172#define encrypt_round(n, a, b, c, d, preload, prerotate) \
173 vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
174 vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
175 round_head_2(a, b, RX0, RY0, RX1, RY1); \
176 encround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
177 preload(c ## 1); \
178 encround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);
179
180#define decrypt_round(n, a, b, c, d, preload, prerotate) \
181 vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
182 vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
183 round_head_2(a, b, RX0, RY0, RX1, RY1); \
184 decround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
185 preload(c ## 1); \
186 decround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);
187
188#define encrypt_cycle(n) \
189 encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
190 encrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l);
191
192#define encrypt_cycle_last(n) \
193 encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
194 encrypt_round(((2*n) + 1), RC, RD, RA, RB, dummy, dummy);
195
196#define decrypt_cycle(n) \
197 decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
198 decrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l);
199
200#define decrypt_cycle_last(n) \
201 decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
202 decrypt_round((2*n), RA, RB, RC, RD, dummy, dummy);
203
204#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
205 vpunpckldq x1, x0, t0; \
206 vpunpckhdq x1, x0, t2; \
207 vpunpckldq x3, x2, t1; \
208 vpunpckhdq x3, x2, x3; \
209 \
210 vpunpcklqdq t1, t0, x0; \
211 vpunpckhqdq t1, t0, x1; \
212 vpunpcklqdq x3, t2, x2; \
213 vpunpckhqdq x3, t2, x3;
214
215#define inpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \
216 vpxor x0, wkey, x0; \
217 vpxor x1, wkey, x1; \
218 vpxor x2, wkey, x2; \
219 vpxor x3, wkey, x3; \
220 \
221 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
222
223#define outunpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \
224 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
225 \
226 vpxor x0, wkey, x0; \
227 vpxor x1, wkey, x1; \
228 vpxor x2, wkey, x2; \
229 vpxor x3, wkey, x3;
230
231SYM_FUNC_START_LOCAL(__twofish_enc_blk8)
232 /* input:
233 * %rdi: ctx, CTX
234 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
235 * output:
236 * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
237 */
238
239 vmovdqu w(CTX), RK1;
240
241 pushq %r13;
242 pushq %rbx;
243 pushq %rcx;
244
245 inpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
246 preload_rgi(RA1);
247 rotate_1l(RD1);
248 inpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
249 rotate_1l(RD2);
250
251 encrypt_cycle(0);
252 encrypt_cycle(1);
253 encrypt_cycle(2);
254 encrypt_cycle(3);
255 encrypt_cycle(4);
256 encrypt_cycle(5);
257 encrypt_cycle(6);
258 encrypt_cycle_last(7);
259
260 vmovdqu (w+4*4)(CTX), RK1;
261
262 popq %rcx;
263 popq %rbx;
264 popq %r13;
265
266 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
267 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
268
269 RET;
270SYM_FUNC_END(__twofish_enc_blk8)
271
272SYM_FUNC_START_LOCAL(__twofish_dec_blk8)
273 /* input:
274 * %rdi: ctx, CTX
275 * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
276 * output:
277 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
278 */
279
280 vmovdqu (w+4*4)(CTX), RK1;
281
282 pushq %r13;
283 pushq %rbx;
284
285 inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
286 preload_rgi(RC1);
287 rotate_1l(RA1);
288 inpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
289 rotate_1l(RA2);
290
291 decrypt_cycle(7);
292 decrypt_cycle(6);
293 decrypt_cycle(5);
294 decrypt_cycle(4);
295 decrypt_cycle(3);
296 decrypt_cycle(2);
297 decrypt_cycle(1);
298 decrypt_cycle_last(0);
299
300 vmovdqu (w)(CTX), RK1;
301
302 popq %rbx;
303 popq %r13;
304
305 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
306 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
307
308 RET;
309SYM_FUNC_END(__twofish_dec_blk8)
310
311SYM_FUNC_START(twofish_ecb_enc_8way)
312 /* input:
313 * %rdi: ctx, CTX
314 * %rsi: dst
315 * %rdx: src
316 */
317 FRAME_BEGIN
318
319 movq %rsi, %r11;
320
321 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
322
323 call __twofish_enc_blk8;
324
325 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
326
327 FRAME_END
328 RET;
329SYM_FUNC_END(twofish_ecb_enc_8way)
330
331SYM_FUNC_START(twofish_ecb_dec_8way)
332 /* input:
333 * %rdi: ctx, CTX
334 * %rsi: dst
335 * %rdx: src
336 */
337 FRAME_BEGIN
338
339 movq %rsi, %r11;
340
341 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
342
343 call __twofish_dec_blk8;
344
345 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
346
347 FRAME_END
348 RET;
349SYM_FUNC_END(twofish_ecb_dec_8way)
350
351SYM_FUNC_START(twofish_cbc_dec_8way)
352 /* input:
353 * %rdi: ctx, CTX
354 * %rsi: dst
355 * %rdx: src
356 */
357 FRAME_BEGIN
358
359 pushq %r12;
360
361 movq %rsi, %r11;
362 movq %rdx, %r12;
363
364 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
365
366 call __twofish_dec_blk8;
367
368 store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
369
370 popq %r12;
371
372 FRAME_END
373 RET;
374SYM_FUNC_END(twofish_cbc_dec_8way)
1/*
2 * Twofish Cipher 8-way parallel algorithm (AVX/x86_64)
3 *
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 *
7 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#include <linux/linkage.h>
27#include <asm/frame.h>
28#include "glue_helper-asm-avx.S"
29
30.file "twofish-avx-x86_64-asm_64.S"
31
32.data
33.align 16
34
35.Lbswap128_mask:
36 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
37.Lxts_gf128mul_and_shl1_mask:
38 .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
39
40.text
41
42/* structure of crypto context */
43#define s0 0
44#define s1 1024
45#define s2 2048
46#define s3 3072
47#define w 4096
48#define k 4128
49
50/**********************************************************************
51 8-way AVX twofish
52 **********************************************************************/
53#define CTX %rdi
54
55#define RA1 %xmm0
56#define RB1 %xmm1
57#define RC1 %xmm2
58#define RD1 %xmm3
59
60#define RA2 %xmm4
61#define RB2 %xmm5
62#define RC2 %xmm6
63#define RD2 %xmm7
64
65#define RX0 %xmm8
66#define RY0 %xmm9
67
68#define RX1 %xmm10
69#define RY1 %xmm11
70
71#define RK1 %xmm12
72#define RK2 %xmm13
73
74#define RT %xmm14
75#define RR %xmm15
76
77#define RID1 %rbp
78#define RID1d %ebp
79#define RID2 %rsi
80#define RID2d %esi
81
82#define RGI1 %rdx
83#define RGI1bl %dl
84#define RGI1bh %dh
85#define RGI2 %rcx
86#define RGI2bl %cl
87#define RGI2bh %ch
88
89#define RGI3 %rax
90#define RGI3bl %al
91#define RGI3bh %ah
92#define RGI4 %rbx
93#define RGI4bl %bl
94#define RGI4bh %bh
95
96#define RGS1 %r8
97#define RGS1d %r8d
98#define RGS2 %r9
99#define RGS2d %r9d
100#define RGS3 %r10
101#define RGS3d %r10d
102
103
104#define lookup_32bit(t0, t1, t2, t3, src, dst, interleave_op, il_reg) \
105 movzbl src ## bl, RID1d; \
106 movzbl src ## bh, RID2d; \
107 shrq $16, src; \
108 movl t0(CTX, RID1, 4), dst ## d; \
109 movl t1(CTX, RID2, 4), RID2d; \
110 movzbl src ## bl, RID1d; \
111 xorl RID2d, dst ## d; \
112 movzbl src ## bh, RID2d; \
113 interleave_op(il_reg); \
114 xorl t2(CTX, RID1, 4), dst ## d; \
115 xorl t3(CTX, RID2, 4), dst ## d;
116
117#define dummy(d) /* do nothing */
118
119#define shr_next(reg) \
120 shrq $16, reg;
121
122#define G(gi1, gi2, x, t0, t1, t2, t3) \
123 lookup_32bit(t0, t1, t2, t3, ##gi1, RGS1, shr_next, ##gi1); \
124 lookup_32bit(t0, t1, t2, t3, ##gi2, RGS3, shr_next, ##gi2); \
125 \
126 lookup_32bit(t0, t1, t2, t3, ##gi1, RGS2, dummy, none); \
127 shlq $32, RGS2; \
128 orq RGS1, RGS2; \
129 lookup_32bit(t0, t1, t2, t3, ##gi2, RGS1, dummy, none); \
130 shlq $32, RGS1; \
131 orq RGS1, RGS3;
132
133#define round_head_2(a, b, x1, y1, x2, y2) \
134 vmovq b ## 1, RGI3; \
135 vpextrq $1, b ## 1, RGI4; \
136 \
137 G(RGI1, RGI2, x1, s0, s1, s2, s3); \
138 vmovq a ## 2, RGI1; \
139 vpextrq $1, a ## 2, RGI2; \
140 vmovq RGS2, x1; \
141 vpinsrq $1, RGS3, x1, x1; \
142 \
143 G(RGI3, RGI4, y1, s1, s2, s3, s0); \
144 vmovq b ## 2, RGI3; \
145 vpextrq $1, b ## 2, RGI4; \
146 vmovq RGS2, y1; \
147 vpinsrq $1, RGS3, y1, y1; \
148 \
149 G(RGI1, RGI2, x2, s0, s1, s2, s3); \
150 vmovq RGS2, x2; \
151 vpinsrq $1, RGS3, x2, x2; \
152 \
153 G(RGI3, RGI4, y2, s1, s2, s3, s0); \
154 vmovq RGS2, y2; \
155 vpinsrq $1, RGS3, y2, y2;
156
157#define encround_tail(a, b, c, d, x, y, prerotate) \
158 vpaddd x, y, x; \
159 vpaddd x, RK1, RT;\
160 prerotate(b); \
161 vpxor RT, c, c; \
162 vpaddd y, x, y; \
163 vpaddd y, RK2, y; \
164 vpsrld $1, c, RT; \
165 vpslld $(32 - 1), c, c; \
166 vpor c, RT, c; \
167 vpxor d, y, d; \
168
169#define decround_tail(a, b, c, d, x, y, prerotate) \
170 vpaddd x, y, x; \
171 vpaddd x, RK1, RT;\
172 prerotate(a); \
173 vpxor RT, c, c; \
174 vpaddd y, x, y; \
175 vpaddd y, RK2, y; \
176 vpxor d, y, d; \
177 vpsrld $1, d, y; \
178 vpslld $(32 - 1), d, d; \
179 vpor d, y, d; \
180
181#define rotate_1l(x) \
182 vpslld $1, x, RR; \
183 vpsrld $(32 - 1), x, x; \
184 vpor x, RR, x;
185
186#define preload_rgi(c) \
187 vmovq c, RGI1; \
188 vpextrq $1, c, RGI2;
189
190#define encrypt_round(n, a, b, c, d, preload, prerotate) \
191 vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
192 vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
193 round_head_2(a, b, RX0, RY0, RX1, RY1); \
194 encround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
195 preload(c ## 1); \
196 encround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);
197
198#define decrypt_round(n, a, b, c, d, preload, prerotate) \
199 vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
200 vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
201 round_head_2(a, b, RX0, RY0, RX1, RY1); \
202 decround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
203 preload(c ## 1); \
204 decround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);
205
206#define encrypt_cycle(n) \
207 encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
208 encrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l);
209
210#define encrypt_cycle_last(n) \
211 encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
212 encrypt_round(((2*n) + 1), RC, RD, RA, RB, dummy, dummy);
213
214#define decrypt_cycle(n) \
215 decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
216 decrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l);
217
218#define decrypt_cycle_last(n) \
219 decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
220 decrypt_round((2*n), RA, RB, RC, RD, dummy, dummy);
221
222#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
223 vpunpckldq x1, x0, t0; \
224 vpunpckhdq x1, x0, t2; \
225 vpunpckldq x3, x2, t1; \
226 vpunpckhdq x3, x2, x3; \
227 \
228 vpunpcklqdq t1, t0, x0; \
229 vpunpckhqdq t1, t0, x1; \
230 vpunpcklqdq x3, t2, x2; \
231 vpunpckhqdq x3, t2, x3;
232
233#define inpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \
234 vpxor x0, wkey, x0; \
235 vpxor x1, wkey, x1; \
236 vpxor x2, wkey, x2; \
237 vpxor x3, wkey, x3; \
238 \
239 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
240
241#define outunpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \
242 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
243 \
244 vpxor x0, wkey, x0; \
245 vpxor x1, wkey, x1; \
246 vpxor x2, wkey, x2; \
247 vpxor x3, wkey, x3;
248
249.align 8
250__twofish_enc_blk8:
251 /* input:
252 * %rdi: ctx, CTX
253 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
254 * output:
255 * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
256 */
257
258 vmovdqu w(CTX), RK1;
259
260 pushq %rbp;
261 pushq %rbx;
262 pushq %rcx;
263
264 inpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
265 preload_rgi(RA1);
266 rotate_1l(RD1);
267 inpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
268 rotate_1l(RD2);
269
270 encrypt_cycle(0);
271 encrypt_cycle(1);
272 encrypt_cycle(2);
273 encrypt_cycle(3);
274 encrypt_cycle(4);
275 encrypt_cycle(5);
276 encrypt_cycle(6);
277 encrypt_cycle_last(7);
278
279 vmovdqu (w+4*4)(CTX), RK1;
280
281 popq %rcx;
282 popq %rbx;
283 popq %rbp;
284
285 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
286 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
287
288 ret;
289ENDPROC(__twofish_enc_blk8)
290
291.align 8
292__twofish_dec_blk8:
293 /* input:
294 * %rdi: ctx, CTX
295 * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
296 * output:
297 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
298 */
299
300 vmovdqu (w+4*4)(CTX), RK1;
301
302 pushq %rbp;
303 pushq %rbx;
304
305 inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
306 preload_rgi(RC1);
307 rotate_1l(RA1);
308 inpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
309 rotate_1l(RA2);
310
311 decrypt_cycle(7);
312 decrypt_cycle(6);
313 decrypt_cycle(5);
314 decrypt_cycle(4);
315 decrypt_cycle(3);
316 decrypt_cycle(2);
317 decrypt_cycle(1);
318 decrypt_cycle_last(0);
319
320 vmovdqu (w)(CTX), RK1;
321
322 popq %rbx;
323 popq %rbp;
324
325 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
326 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
327
328 ret;
329ENDPROC(__twofish_dec_blk8)
330
331ENTRY(twofish_ecb_enc_8way)
332 /* input:
333 * %rdi: ctx, CTX
334 * %rsi: dst
335 * %rdx: src
336 */
337 FRAME_BEGIN
338
339 movq %rsi, %r11;
340
341 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
342
343 call __twofish_enc_blk8;
344
345 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
346
347 FRAME_END
348 ret;
349ENDPROC(twofish_ecb_enc_8way)
350
351ENTRY(twofish_ecb_dec_8way)
352 /* input:
353 * %rdi: ctx, CTX
354 * %rsi: dst
355 * %rdx: src
356 */
357 FRAME_BEGIN
358
359 movq %rsi, %r11;
360
361 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
362
363 call __twofish_dec_blk8;
364
365 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
366
367 FRAME_END
368 ret;
369ENDPROC(twofish_ecb_dec_8way)
370
371ENTRY(twofish_cbc_dec_8way)
372 /* input:
373 * %rdi: ctx, CTX
374 * %rsi: dst
375 * %rdx: src
376 */
377 FRAME_BEGIN
378
379 pushq %r12;
380
381 movq %rsi, %r11;
382 movq %rdx, %r12;
383
384 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
385
386 call __twofish_dec_blk8;
387
388 store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
389
390 popq %r12;
391
392 FRAME_END
393 ret;
394ENDPROC(twofish_cbc_dec_8way)
395
396ENTRY(twofish_ctr_8way)
397 /* input:
398 * %rdi: ctx, CTX
399 * %rsi: dst
400 * %rdx: src
401 * %rcx: iv (little endian, 128bit)
402 */
403 FRAME_BEGIN
404
405 pushq %r12;
406
407 movq %rsi, %r11;
408 movq %rdx, %r12;
409
410 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
411 RD2, RX0, RX1, RY0);
412
413 call __twofish_enc_blk8;
414
415 store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
416
417 popq %r12;
418
419 FRAME_END
420 ret;
421ENDPROC(twofish_ctr_8way)
422
423ENTRY(twofish_xts_enc_8way)
424 /* input:
425 * %rdi: ctx, CTX
426 * %rsi: dst
427 * %rdx: src
428 * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
429 */
430 FRAME_BEGIN
431
432 movq %rsi, %r11;
433
434 /* regs <= src, dst <= IVs, regs <= regs xor IVs */
435 load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
436 RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask);
437
438 call __twofish_enc_blk8;
439
440 /* dst <= regs xor IVs(in dst) */
441 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
442
443 FRAME_END
444 ret;
445ENDPROC(twofish_xts_enc_8way)
446
447ENTRY(twofish_xts_dec_8way)
448 /* input:
449 * %rdi: ctx, CTX
450 * %rsi: dst
451 * %rdx: src
452 * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
453 */
454 FRAME_BEGIN
455
456 movq %rsi, %r11;
457
458 /* regs <= src, dst <= IVs, regs <= regs xor IVs */
459 load_xts_8way(%rcx, %rdx, %rsi, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2,
460 RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask);
461
462 call __twofish_dec_blk8;
463
464 /* dst <= regs xor IVs(in dst) */
465 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
466
467 FRAME_END
468 ret;
469ENDPROC(twofish_xts_dec_8way)