Loading...
1/*
2 * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
3 *
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 *
7 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#include <linux/linkage.h>
27#include "glue_helper-asm-avx.S"
28
29.file "cast6-avx-x86_64-asm_64.S"
30
31.extern cast_s1
32.extern cast_s2
33.extern cast_s3
34.extern cast_s4
35
36/* structure of crypto context */
37#define km 0
38#define kr (12*4*4)
39
40/* s-boxes */
41#define s1 cast_s1
42#define s2 cast_s2
43#define s3 cast_s3
44#define s4 cast_s4
45
46/**********************************************************************
47 8-way AVX cast6
48 **********************************************************************/
49#define CTX %rdi
50
51#define RA1 %xmm0
52#define RB1 %xmm1
53#define RC1 %xmm2
54#define RD1 %xmm3
55
56#define RA2 %xmm4
57#define RB2 %xmm5
58#define RC2 %xmm6
59#define RD2 %xmm7
60
61#define RX %xmm8
62
63#define RKM %xmm9
64#define RKR %xmm10
65#define RKRF %xmm11
66#define RKRR %xmm12
67#define R32 %xmm13
68#define R1ST %xmm14
69
70#define RTMP %xmm15
71
72#define RID1 %rbp
73#define RID1d %ebp
74#define RID2 %rsi
75#define RID2d %esi
76
77#define RGI1 %rdx
78#define RGI1bl %dl
79#define RGI1bh %dh
80#define RGI2 %rcx
81#define RGI2bl %cl
82#define RGI2bh %ch
83
84#define RGI3 %rax
85#define RGI3bl %al
86#define RGI3bh %ah
87#define RGI4 %rbx
88#define RGI4bl %bl
89#define RGI4bh %bh
90
91#define RFS1 %r8
92#define RFS1d %r8d
93#define RFS2 %r9
94#define RFS2d %r9d
95#define RFS3 %r10
96#define RFS3d %r10d
97
98
99#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
100 movzbl src ## bh, RID1d; \
101 movzbl src ## bl, RID2d; \
102 shrq $16, src; \
103 movl s1(, RID1, 4), dst ## d; \
104 op1 s2(, RID2, 4), dst ## d; \
105 movzbl src ## bh, RID1d; \
106 movzbl src ## bl, RID2d; \
107 interleave_op(il_reg); \
108 op2 s3(, RID1, 4), dst ## d; \
109 op3 s4(, RID2, 4), dst ## d;
110
111#define dummy(d) /* do nothing */
112
113#define shr_next(reg) \
114 shrq $16, reg;
115
116#define F_head(a, x, gi1, gi2, op0) \
117 op0 a, RKM, x; \
118 vpslld RKRF, x, RTMP; \
119 vpsrld RKRR, x, x; \
120 vpor RTMP, x, x; \
121 \
122 vmovq x, gi1; \
123 vpextrq $1, x, gi2;
124
125#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
126 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
127 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
128 \
129 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
130 shlq $32, RFS2; \
131 orq RFS1, RFS2; \
132 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
133 shlq $32, RFS1; \
134 orq RFS1, RFS3; \
135 \
136 vmovq RFS2, x; \
137 vpinsrq $1, RFS3, x, x;
138
139#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
140 F_head(b1, RX, RGI1, RGI2, op0); \
141 F_head(b2, RX, RGI3, RGI4, op0); \
142 \
143 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
144 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
145 \
146 vpxor a1, RX, a1; \
147 vpxor a2, RTMP, a2;
148
149#define F1_2(a1, b1, a2, b2) \
150 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
151#define F2_2(a1, b1, a2, b2) \
152 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
153#define F3_2(a1, b1, a2, b2) \
154 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
155
156#define qop(in, out, f) \
157 F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
158
159#define get_round_keys(nn) \
160 vbroadcastss (km+(4*(nn)))(CTX), RKM; \
161 vpand R1ST, RKR, RKRF; \
162 vpsubq RKRF, R32, RKRR; \
163 vpsrldq $1, RKR, RKR;
164
165#define Q(n) \
166 get_round_keys(4*n+0); \
167 qop(RD, RC, 1); \
168 \
169 get_round_keys(4*n+1); \
170 qop(RC, RB, 2); \
171 \
172 get_round_keys(4*n+2); \
173 qop(RB, RA, 3); \
174 \
175 get_round_keys(4*n+3); \
176 qop(RA, RD, 1);
177
178#define QBAR(n) \
179 get_round_keys(4*n+3); \
180 qop(RA, RD, 1); \
181 \
182 get_round_keys(4*n+2); \
183 qop(RB, RA, 3); \
184 \
185 get_round_keys(4*n+1); \
186 qop(RC, RB, 2); \
187 \
188 get_round_keys(4*n+0); \
189 qop(RD, RC, 1);
190
191#define shuffle(mask) \
192 vpshufb mask, RKR, RKR;
193
194#define preload_rkr(n, do_mask, mask) \
195 vbroadcastss .L16_mask, RKR; \
196 /* add 16-bit rotation to key rotations (mod 32) */ \
197 vpxor (kr+n*16)(CTX), RKR, RKR; \
198 do_mask(mask);
199
200#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
201 vpunpckldq x1, x0, t0; \
202 vpunpckhdq x1, x0, t2; \
203 vpunpckldq x3, x2, t1; \
204 vpunpckhdq x3, x2, x3; \
205 \
206 vpunpcklqdq t1, t0, x0; \
207 vpunpckhqdq t1, t0, x1; \
208 vpunpcklqdq x3, t2, x2; \
209 vpunpckhqdq x3, t2, x3;
210
211#define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
212 vpshufb rmask, x0, x0; \
213 vpshufb rmask, x1, x1; \
214 vpshufb rmask, x2, x2; \
215 vpshufb rmask, x3, x3; \
216 \
217 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
218
219#define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
220 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
221 \
222 vpshufb rmask, x0, x0; \
223 vpshufb rmask, x1, x1; \
224 vpshufb rmask, x2, x2; \
225 vpshufb rmask, x3, x3;
226
227.data
228
229.align 16
230.Lxts_gf128mul_and_shl1_mask:
231 .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
232.Lbswap_mask:
233 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
234.Lbswap128_mask:
235 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
236.Lrkr_enc_Q_Q_QBAR_QBAR:
237 .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
238.Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
239 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
240.Lrkr_dec_Q_Q_Q_Q:
241 .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
242.Lrkr_dec_Q_Q_QBAR_QBAR:
243 .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
244.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
245 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
246.L16_mask:
247 .byte 16, 16, 16, 16
248.L32_mask:
249 .byte 32, 0, 0, 0
250.Lfirst_mask:
251 .byte 0x1f, 0, 0, 0
252
253.text
254
255.align 8
256__cast6_enc_blk8:
257 /* input:
258 * %rdi: ctx, CTX
259 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
260 * output:
261 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
262 */
263
264 pushq %rbp;
265 pushq %rbx;
266
267 vmovdqa .Lbswap_mask, RKM;
268 vmovd .Lfirst_mask, R1ST;
269 vmovd .L32_mask, R32;
270
271 inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
272 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
273
274 preload_rkr(0, dummy, none);
275 Q(0);
276 Q(1);
277 Q(2);
278 Q(3);
279 preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
280 Q(4);
281 Q(5);
282 QBAR(6);
283 QBAR(7);
284 preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
285 QBAR(8);
286 QBAR(9);
287 QBAR(10);
288 QBAR(11);
289
290 popq %rbx;
291 popq %rbp;
292
293 vmovdqa .Lbswap_mask, RKM;
294
295 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
296 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
297
298 ret;
299ENDPROC(__cast6_enc_blk8)
300
301.align 8
302__cast6_dec_blk8:
303 /* input:
304 * %rdi: ctx, CTX
305 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
306 * output:
307 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
308 */
309
310 pushq %rbp;
311 pushq %rbx;
312
313 vmovdqa .Lbswap_mask, RKM;
314 vmovd .Lfirst_mask, R1ST;
315 vmovd .L32_mask, R32;
316
317 inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
318 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
319
320 preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
321 Q(11);
322 Q(10);
323 Q(9);
324 Q(8);
325 preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
326 Q(7);
327 Q(6);
328 QBAR(5);
329 QBAR(4);
330 preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
331 QBAR(3);
332 QBAR(2);
333 QBAR(1);
334 QBAR(0);
335
336 popq %rbx;
337 popq %rbp;
338
339 vmovdqa .Lbswap_mask, RKM;
340 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
341 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
342
343 ret;
344ENDPROC(__cast6_dec_blk8)
345
346ENTRY(cast6_ecb_enc_8way)
347 /* input:
348 * %rdi: ctx, CTX
349 * %rsi: dst
350 * %rdx: src
351 */
352
353 movq %rsi, %r11;
354
355 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
356
357 call __cast6_enc_blk8;
358
359 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
360
361 ret;
362ENDPROC(cast6_ecb_enc_8way)
363
364ENTRY(cast6_ecb_dec_8way)
365 /* input:
366 * %rdi: ctx, CTX
367 * %rsi: dst
368 * %rdx: src
369 */
370
371 movq %rsi, %r11;
372
373 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
374
375 call __cast6_dec_blk8;
376
377 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
378
379 ret;
380ENDPROC(cast6_ecb_dec_8way)
381
382ENTRY(cast6_cbc_dec_8way)
383 /* input:
384 * %rdi: ctx, CTX
385 * %rsi: dst
386 * %rdx: src
387 */
388
389 pushq %r12;
390
391 movq %rsi, %r11;
392 movq %rdx, %r12;
393
394 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
395
396 call __cast6_dec_blk8;
397
398 store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
399
400 popq %r12;
401
402 ret;
403ENDPROC(cast6_cbc_dec_8way)
404
405ENTRY(cast6_ctr_8way)
406 /* input:
407 * %rdi: ctx, CTX
408 * %rsi: dst
409 * %rdx: src
410 * %rcx: iv (little endian, 128bit)
411 */
412
413 pushq %r12;
414
415 movq %rsi, %r11;
416 movq %rdx, %r12;
417
418 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
419 RD2, RX, RKR, RKM);
420
421 call __cast6_enc_blk8;
422
423 store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
424
425 popq %r12;
426
427 ret;
428ENDPROC(cast6_ctr_8way)
429
430ENTRY(cast6_xts_enc_8way)
431 /* input:
432 * %rdi: ctx, CTX
433 * %rsi: dst
434 * %rdx: src
435 * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
436 */
437
438 movq %rsi, %r11;
439
440 /* regs <= src, dst <= IVs, regs <= regs xor IVs */
441 load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
442 RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
443
444 call __cast6_enc_blk8;
445
446 /* dst <= regs xor IVs(in dst) */
447 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
448
449 ret;
450ENDPROC(cast6_xts_enc_8way)
451
452ENTRY(cast6_xts_dec_8way)
453 /* input:
454 * %rdi: ctx, CTX
455 * %rsi: dst
456 * %rdx: src
457 * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
458 */
459
460 movq %rsi, %r11;
461
462 /* regs <= src, dst <= IVs, regs <= regs xor IVs */
463 load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
464 RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
465
466 call __cast6_dec_blk8;
467
468 /* dst <= regs xor IVs(in dst) */
469 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
470
471 ret;
472ENDPROC(cast6_xts_dec_8way)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
4 *
5 * Copyright (C) 2012 Johannes Goetzfried
6 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
7 *
8 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
9 */
10
11#include <linux/linkage.h>
12#include <asm/frame.h>
13#include "glue_helper-asm-avx.S"
14
15.file "cast6-avx-x86_64-asm_64.S"
16
17.extern cast_s1
18.extern cast_s2
19.extern cast_s3
20.extern cast_s4
21
22/* structure of crypto context */
23#define km 0
24#define kr (12*4*4)
25
26/* s-boxes */
27#define s1 cast_s1
28#define s2 cast_s2
29#define s3 cast_s3
30#define s4 cast_s4
31
32/**********************************************************************
33 8-way AVX cast6
34 **********************************************************************/
35#define CTX %r15
36
37#define RA1 %xmm0
38#define RB1 %xmm1
39#define RC1 %xmm2
40#define RD1 %xmm3
41
42#define RA2 %xmm4
43#define RB2 %xmm5
44#define RC2 %xmm6
45#define RD2 %xmm7
46
47#define RX %xmm8
48
49#define RKM %xmm9
50#define RKR %xmm10
51#define RKRF %xmm11
52#define RKRR %xmm12
53#define R32 %xmm13
54#define R1ST %xmm14
55
56#define RTMP %xmm15
57
58#define RID1 %rdi
59#define RID1d %edi
60#define RID2 %rsi
61#define RID2d %esi
62
63#define RGI1 %rdx
64#define RGI1bl %dl
65#define RGI1bh %dh
66#define RGI2 %rcx
67#define RGI2bl %cl
68#define RGI2bh %ch
69
70#define RGI3 %rax
71#define RGI3bl %al
72#define RGI3bh %ah
73#define RGI4 %rbx
74#define RGI4bl %bl
75#define RGI4bh %bh
76
77#define RFS1 %r8
78#define RFS1d %r8d
79#define RFS2 %r9
80#define RFS2d %r9d
81#define RFS3 %r10
82#define RFS3d %r10d
83
84
85#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
86 movzbl src ## bh, RID1d; \
87 movzbl src ## bl, RID2d; \
88 shrq $16, src; \
89 movl s1(, RID1, 4), dst ## d; \
90 op1 s2(, RID2, 4), dst ## d; \
91 movzbl src ## bh, RID1d; \
92 movzbl src ## bl, RID2d; \
93 interleave_op(il_reg); \
94 op2 s3(, RID1, 4), dst ## d; \
95 op3 s4(, RID2, 4), dst ## d;
96
97#define dummy(d) /* do nothing */
98
99#define shr_next(reg) \
100 shrq $16, reg;
101
102#define F_head(a, x, gi1, gi2, op0) \
103 op0 a, RKM, x; \
104 vpslld RKRF, x, RTMP; \
105 vpsrld RKRR, x, x; \
106 vpor RTMP, x, x; \
107 \
108 vmovq x, gi1; \
109 vpextrq $1, x, gi2;
110
111#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
112 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
113 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
114 \
115 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
116 shlq $32, RFS2; \
117 orq RFS1, RFS2; \
118 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
119 shlq $32, RFS1; \
120 orq RFS1, RFS3; \
121 \
122 vmovq RFS2, x; \
123 vpinsrq $1, RFS3, x, x;
124
125#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
126 F_head(b1, RX, RGI1, RGI2, op0); \
127 F_head(b2, RX, RGI3, RGI4, op0); \
128 \
129 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
130 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
131 \
132 vpxor a1, RX, a1; \
133 vpxor a2, RTMP, a2;
134
135#define F1_2(a1, b1, a2, b2) \
136 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
137#define F2_2(a1, b1, a2, b2) \
138 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
139#define F3_2(a1, b1, a2, b2) \
140 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
141
142#define qop(in, out, f) \
143 F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
144
145#define get_round_keys(nn) \
146 vbroadcastss (km+(4*(nn)))(CTX), RKM; \
147 vpand R1ST, RKR, RKRF; \
148 vpsubq RKRF, R32, RKRR; \
149 vpsrldq $1, RKR, RKR;
150
151#define Q(n) \
152 get_round_keys(4*n+0); \
153 qop(RD, RC, 1); \
154 \
155 get_round_keys(4*n+1); \
156 qop(RC, RB, 2); \
157 \
158 get_round_keys(4*n+2); \
159 qop(RB, RA, 3); \
160 \
161 get_round_keys(4*n+3); \
162 qop(RA, RD, 1);
163
164#define QBAR(n) \
165 get_round_keys(4*n+3); \
166 qop(RA, RD, 1); \
167 \
168 get_round_keys(4*n+2); \
169 qop(RB, RA, 3); \
170 \
171 get_round_keys(4*n+1); \
172 qop(RC, RB, 2); \
173 \
174 get_round_keys(4*n+0); \
175 qop(RD, RC, 1);
176
177#define shuffle(mask) \
178 vpshufb mask, RKR, RKR;
179
180#define preload_rkr(n, do_mask, mask) \
181 vbroadcastss .L16_mask, RKR; \
182 /* add 16-bit rotation to key rotations (mod 32) */ \
183 vpxor (kr+n*16)(CTX), RKR, RKR; \
184 do_mask(mask);
185
186#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
187 vpunpckldq x1, x0, t0; \
188 vpunpckhdq x1, x0, t2; \
189 vpunpckldq x3, x2, t1; \
190 vpunpckhdq x3, x2, x3; \
191 \
192 vpunpcklqdq t1, t0, x0; \
193 vpunpckhqdq t1, t0, x1; \
194 vpunpcklqdq x3, t2, x2; \
195 vpunpckhqdq x3, t2, x3;
196
197#define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
198 vpshufb rmask, x0, x0; \
199 vpshufb rmask, x1, x1; \
200 vpshufb rmask, x2, x2; \
201 vpshufb rmask, x3, x3; \
202 \
203 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
204
205#define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
206 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
207 \
208 vpshufb rmask, x0, x0; \
209 vpshufb rmask, x1, x1; \
210 vpshufb rmask, x2, x2; \
211 vpshufb rmask, x3, x3;
212
213.section .rodata.cst16, "aM", @progbits, 16
214.align 16
215.Lxts_gf128mul_and_shl1_mask:
216 .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
217.Lbswap_mask:
218 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
219.Lbswap128_mask:
220 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
221.Lrkr_enc_Q_Q_QBAR_QBAR:
222 .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
223.Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
224 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
225.Lrkr_dec_Q_Q_Q_Q:
226 .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
227.Lrkr_dec_Q_Q_QBAR_QBAR:
228 .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
229.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
230 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
231
232.section .rodata.cst4.L16_mask, "aM", @progbits, 4
233.align 4
234.L16_mask:
235 .byte 16, 16, 16, 16
236
237.section .rodata.cst4.L32_mask, "aM", @progbits, 4
238.align 4
239.L32_mask:
240 .byte 32, 0, 0, 0
241
242.section .rodata.cst4.first_mask, "aM", @progbits, 4
243.align 4
244.Lfirst_mask:
245 .byte 0x1f, 0, 0, 0
246
247.text
248
249.align 8
250SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
251 /* input:
252 * %rdi: ctx
253 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
254 * output:
255 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
256 */
257
258 pushq %r15;
259 pushq %rbx;
260
261 movq %rdi, CTX;
262
263 vmovdqa .Lbswap_mask, RKM;
264 vmovd .Lfirst_mask, R1ST;
265 vmovd .L32_mask, R32;
266
267 inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
268 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
269
270 preload_rkr(0, dummy, none);
271 Q(0);
272 Q(1);
273 Q(2);
274 Q(3);
275 preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
276 Q(4);
277 Q(5);
278 QBAR(6);
279 QBAR(7);
280 preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
281 QBAR(8);
282 QBAR(9);
283 QBAR(10);
284 QBAR(11);
285
286 popq %rbx;
287 popq %r15;
288
289 vmovdqa .Lbswap_mask, RKM;
290
291 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
292 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
293
294 ret;
295SYM_FUNC_END(__cast6_enc_blk8)
296
297.align 8
298SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
299 /* input:
300 * %rdi: ctx
301 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
302 * output:
303 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
304 */
305
306 pushq %r15;
307 pushq %rbx;
308
309 movq %rdi, CTX;
310
311 vmovdqa .Lbswap_mask, RKM;
312 vmovd .Lfirst_mask, R1ST;
313 vmovd .L32_mask, R32;
314
315 inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
316 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
317
318 preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
319 Q(11);
320 Q(10);
321 Q(9);
322 Q(8);
323 preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
324 Q(7);
325 Q(6);
326 QBAR(5);
327 QBAR(4);
328 preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
329 QBAR(3);
330 QBAR(2);
331 QBAR(1);
332 QBAR(0);
333
334 popq %rbx;
335 popq %r15;
336
337 vmovdqa .Lbswap_mask, RKM;
338 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
339 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
340
341 ret;
342SYM_FUNC_END(__cast6_dec_blk8)
343
344SYM_FUNC_START(cast6_ecb_enc_8way)
345 /* input:
346 * %rdi: ctx
347 * %rsi: dst
348 * %rdx: src
349 */
350 FRAME_BEGIN
351 pushq %r15;
352
353 movq %rdi, CTX;
354 movq %rsi, %r11;
355
356 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
357
358 call __cast6_enc_blk8;
359
360 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
361
362 popq %r15;
363 FRAME_END
364 ret;
365SYM_FUNC_END(cast6_ecb_enc_8way)
366
367SYM_FUNC_START(cast6_ecb_dec_8way)
368 /* input:
369 * %rdi: ctx
370 * %rsi: dst
371 * %rdx: src
372 */
373 FRAME_BEGIN
374 pushq %r15;
375
376 movq %rdi, CTX;
377 movq %rsi, %r11;
378
379 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
380
381 call __cast6_dec_blk8;
382
383 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
384
385 popq %r15;
386 FRAME_END
387 ret;
388SYM_FUNC_END(cast6_ecb_dec_8way)
389
390SYM_FUNC_START(cast6_cbc_dec_8way)
391 /* input:
392 * %rdi: ctx
393 * %rsi: dst
394 * %rdx: src
395 */
396 FRAME_BEGIN
397 pushq %r12;
398 pushq %r15;
399
400 movq %rdi, CTX;
401 movq %rsi, %r11;
402 movq %rdx, %r12;
403
404 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
405
406 call __cast6_dec_blk8;
407
408 store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
409
410 popq %r15;
411 popq %r12;
412 FRAME_END
413 ret;
414SYM_FUNC_END(cast6_cbc_dec_8way)
415
416SYM_FUNC_START(cast6_ctr_8way)
417 /* input:
418 * %rdi: ctx, CTX
419 * %rsi: dst
420 * %rdx: src
421 * %rcx: iv (little endian, 128bit)
422 */
423 FRAME_BEGIN
424 pushq %r12;
425 pushq %r15
426
427 movq %rdi, CTX;
428 movq %rsi, %r11;
429 movq %rdx, %r12;
430
431 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
432 RD2, RX, RKR, RKM);
433
434 call __cast6_enc_blk8;
435
436 store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
437
438 popq %r15;
439 popq %r12;
440 FRAME_END
441 ret;
442SYM_FUNC_END(cast6_ctr_8way)
443
444SYM_FUNC_START(cast6_xts_enc_8way)
445 /* input:
446 * %rdi: ctx, CTX
447 * %rsi: dst
448 * %rdx: src
449 * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
450 */
451 FRAME_BEGIN
452 pushq %r15;
453
454 movq %rdi, CTX
455 movq %rsi, %r11;
456
457 /* regs <= src, dst <= IVs, regs <= regs xor IVs */
458 load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
459 RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
460
461 call __cast6_enc_blk8;
462
463 /* dst <= regs xor IVs(in dst) */
464 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
465
466 popq %r15;
467 FRAME_END
468 ret;
469SYM_FUNC_END(cast6_xts_enc_8way)
470
471SYM_FUNC_START(cast6_xts_dec_8way)
472 /* input:
473 * %rdi: ctx, CTX
474 * %rsi: dst
475 * %rdx: src
476 * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
477 */
478 FRAME_BEGIN
479 pushq %r15;
480
481 movq %rdi, CTX
482 movq %rsi, %r11;
483
484 /* regs <= src, dst <= IVs, regs <= regs xor IVs */
485 load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
486 RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
487
488 call __cast6_dec_blk8;
489
490 /* dst <= regs xor IVs(in dst) */
491 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
492
493 popq %r15;
494 FRAME_END
495 ret;
496SYM_FUNC_END(cast6_xts_dec_8way)