Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
  3 *
  4 * Copyright (C) 2012 Johannes Goetzfried
  5 *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
  6 *
  7 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License as published by
 11 * the Free Software Foundation; either version 2 of the License, or
 12 * (at your option) any later version.
 13 *
 14 * This program is distributed in the hope that it will be useful,
 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 17 * GNU General Public License for more details.
 18 *
 19 * You should have received a copy of the GNU General Public License
 20 * along with this program; if not, write to the Free Software
 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
 22 * USA
 23 *
 24 */
 25
 26#include <linux/linkage.h>
 
 27#include "glue_helper-asm-avx.S"
 28
 29.file "cast6-avx-x86_64-asm_64.S"
 30
 31.extern cast_s1
 32.extern cast_s2
 33.extern cast_s3
 34.extern cast_s4
 35
 36/* structure of crypto context */
 37#define km	0
 38#define kr	(12*4*4)
 39
 40/* s-boxes */
 41#define s1	cast_s1
 42#define s2	cast_s2
 43#define s3	cast_s3
 44#define s4	cast_s4
 45
 46/**********************************************************************
 47  8-way AVX cast6
 48 **********************************************************************/
 49#define CTX %rdi
 50
 51#define RA1 %xmm0
 52#define RB1 %xmm1
 53#define RC1 %xmm2
 54#define RD1 %xmm3
 55
 56#define RA2 %xmm4
 57#define RB2 %xmm5
 58#define RC2 %xmm6
 59#define RD2 %xmm7
 60
 61#define RX  %xmm8
 62
 63#define RKM  %xmm9
 64#define RKR  %xmm10
 65#define RKRF %xmm11
 66#define RKRR %xmm12
 67#define R32  %xmm13
 68#define R1ST %xmm14
 69
 70#define RTMP %xmm15
 71
 72#define RID1  %rbp
 73#define RID1d %ebp
 74#define RID2  %rsi
 75#define RID2d %esi
 76
 77#define RGI1   %rdx
 78#define RGI1bl %dl
 79#define RGI1bh %dh
 80#define RGI2   %rcx
 81#define RGI2bl %cl
 82#define RGI2bh %ch
 83
 84#define RGI3   %rax
 85#define RGI3bl %al
 86#define RGI3bh %ah
 87#define RGI4   %rbx
 88#define RGI4bl %bl
 89#define RGI4bh %bh
 90
 91#define RFS1  %r8
 92#define RFS1d %r8d
 93#define RFS2  %r9
 94#define RFS2d %r9d
 95#define RFS3  %r10
 96#define RFS3d %r10d
 97
 98
 99#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
100	movzbl		src ## bh,     RID1d;    \
101	movzbl		src ## bl,     RID2d;    \
102	shrq $16,	src;                     \
103	movl		s1(, RID1, 4), dst ## d; \
104	op1		s2(, RID2, 4), dst ## d; \
105	movzbl		src ## bh,     RID1d;    \
106	movzbl		src ## bl,     RID2d;    \
107	interleave_op(il_reg);			 \
108	op2		s3(, RID1, 4), dst ## d; \
109	op3		s4(, RID2, 4), dst ## d;
110
111#define dummy(d) /* do nothing */
112
113#define shr_next(reg) \
114	shrq $16,	reg;
115
116#define F_head(a, x, gi1, gi2, op0) \
117	op0	a,	RKM,  x;                 \
118	vpslld	RKRF,	x,    RTMP;              \
119	vpsrld	RKRR,	x,    x;                 \
120	vpor	RTMP,	x,    x;                 \
121	\
122	vmovq		x,    gi1;               \
123	vpextrq $1,	x,    gi2;
124
125#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
126	lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
127	lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
128	\
129	lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none);     \
130	shlq $32,	RFS2;                                      \
131	orq		RFS1, RFS2;                                \
132	lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none);     \
133	shlq $32,	RFS1;                                      \
134	orq		RFS1, RFS3;                                \
135	\
136	vmovq		RFS2, x;                                   \
137	vpinsrq $1,	RFS3, x, x;
138
139#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
140	F_head(b1, RX, RGI1, RGI2, op0);              \
141	F_head(b2, RX, RGI3, RGI4, op0);              \
142	\
143	F_tail(b1, RX, RGI1, RGI2, op1, op2, op3);    \
144	F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3);  \
145	\
146	vpxor		a1, RX,   a1;                 \
147	vpxor		a2, RTMP, a2;
148
149#define F1_2(a1, b1, a2, b2) \
150	F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
151#define F2_2(a1, b1, a2, b2) \
152	F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
153#define F3_2(a1, b1, a2, b2) \
154	F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
155
156#define qop(in, out, f) \
157	F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
158
159#define get_round_keys(nn) \
160	vbroadcastss	(km+(4*(nn)))(CTX), RKM;        \
161	vpand		R1ST,               RKR,  RKRF; \
162	vpsubq		RKRF,               R32,  RKRR; \
163	vpsrldq $1,	RKR,                RKR;
164
165#define Q(n) \
166	get_round_keys(4*n+0); \
167	qop(RD, RC, 1);        \
168	\
169	get_round_keys(4*n+1); \
170	qop(RC, RB, 2);        \
171	\
172	get_round_keys(4*n+2); \
173	qop(RB, RA, 3);        \
174	\
175	get_round_keys(4*n+3); \
176	qop(RA, RD, 1);
177
178#define QBAR(n) \
179	get_round_keys(4*n+3); \
180	qop(RA, RD, 1);        \
181	\
182	get_round_keys(4*n+2); \
183	qop(RB, RA, 3);        \
184	\
185	get_round_keys(4*n+1); \
186	qop(RC, RB, 2);        \
187	\
188	get_round_keys(4*n+0); \
189	qop(RD, RC, 1);
190
191#define shuffle(mask) \
192	vpshufb		mask,            RKR, RKR;
193
194#define preload_rkr(n, do_mask, mask) \
195	vbroadcastss	.L16_mask,                RKR;      \
196	/* add 16-bit rotation to key rotations (mod 32) */ \
197	vpxor		(kr+n*16)(CTX),           RKR, RKR; \
198	do_mask(mask);
199
200#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
201	vpunpckldq		x1, x0, t0; \
202	vpunpckhdq		x1, x0, t2; \
203	vpunpckldq		x3, x2, t1; \
204	vpunpckhdq		x3, x2, x3; \
205	\
206	vpunpcklqdq		t1, t0, x0; \
207	vpunpckhqdq		t1, t0, x1; \
208	vpunpcklqdq		x3, t2, x2; \
209	vpunpckhqdq		x3, t2, x3;
210
211#define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
212	vpshufb rmask, x0,	x0; \
213	vpshufb rmask, x1,	x1; \
214	vpshufb rmask, x2,	x2; \
215	vpshufb rmask, x3,	x3; \
216	\
217	transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
218
219#define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
220	transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
221	\
222	vpshufb rmask,		x0, x0;       \
223	vpshufb rmask,		x1, x1;       \
224	vpshufb rmask,		x2, x2;       \
225	vpshufb rmask,		x3, x3;
226
227.data
228
229.align 16
230.Lxts_gf128mul_and_shl1_mask:
231	.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
232.Lbswap_mask:
233	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
234.Lbswap128_mask:
235	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
236.Lrkr_enc_Q_Q_QBAR_QBAR:
237	.byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
238.Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
239	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
240.Lrkr_dec_Q_Q_Q_Q:
241	.byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
242.Lrkr_dec_Q_Q_QBAR_QBAR:
243	.byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
244.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
245	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
246.L16_mask:
247	.byte 16, 16, 16, 16
248.L32_mask:
249	.byte 32, 0, 0, 0
250.Lfirst_mask:
251	.byte 0x1f, 0, 0, 0
252
253.text
254
255.align 8
256__cast6_enc_blk8:
257	/* input:
258	 *	%rdi: ctx, CTX
259	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
260	 * output:
261	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
262	 */
263
264	pushq %rbp;
265	pushq %rbx;
266
267	vmovdqa .Lbswap_mask, RKM;
268	vmovd .Lfirst_mask, R1ST;
269	vmovd .L32_mask, R32;
270
271	inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
272	inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
273
274	preload_rkr(0, dummy, none);
275	Q(0);
276	Q(1);
277	Q(2);
278	Q(3);
279	preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
280	Q(4);
281	Q(5);
282	QBAR(6);
283	QBAR(7);
284	preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
285	QBAR(8);
286	QBAR(9);
287	QBAR(10);
288	QBAR(11);
289
290	popq %rbx;
291	popq %rbp;
292
293	vmovdqa .Lbswap_mask, RKM;
294
295	outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
296	outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
297
298	ret;
299ENDPROC(__cast6_enc_blk8)
300
301.align 8
302__cast6_dec_blk8:
303	/* input:
304	 *	%rdi: ctx, CTX
305	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
306	 * output:
307	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
308	 */
309
310	pushq %rbp;
311	pushq %rbx;
312
313	vmovdqa .Lbswap_mask, RKM;
314	vmovd .Lfirst_mask, R1ST;
315	vmovd .L32_mask, R32;
316
317	inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
318	inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
319
320	preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
321	Q(11);
322	Q(10);
323	Q(9);
324	Q(8);
325	preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
326	Q(7);
327	Q(6);
328	QBAR(5);
329	QBAR(4);
330	preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
331	QBAR(3);
332	QBAR(2);
333	QBAR(1);
334	QBAR(0);
335
336	popq %rbx;
337	popq %rbp;
338
339	vmovdqa .Lbswap_mask, RKM;
340	outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
341	outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
342
343	ret;
344ENDPROC(__cast6_dec_blk8)
345
346ENTRY(cast6_ecb_enc_8way)
347	/* input:
348	 *	%rdi: ctx, CTX
349	 *	%rsi: dst
350	 *	%rdx: src
351	 */
 
352
353	movq %rsi, %r11;
354
355	load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
356
357	call __cast6_enc_blk8;
358
359	store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
360
 
361	ret;
362ENDPROC(cast6_ecb_enc_8way)
363
364ENTRY(cast6_ecb_dec_8way)
365	/* input:
366	 *	%rdi: ctx, CTX
367	 *	%rsi: dst
368	 *	%rdx: src
369	 */
 
370
371	movq %rsi, %r11;
372
373	load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
374
375	call __cast6_dec_blk8;
376
377	store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
378
 
379	ret;
380ENDPROC(cast6_ecb_dec_8way)
381
382ENTRY(cast6_cbc_dec_8way)
383	/* input:
384	 *	%rdi: ctx, CTX
385	 *	%rsi: dst
386	 *	%rdx: src
387	 */
 
388
389	pushq %r12;
390
391	movq %rsi, %r11;
392	movq %rdx, %r12;
393
394	load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
395
396	call __cast6_dec_blk8;
397
398	store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
399
400	popq %r12;
401
 
402	ret;
403ENDPROC(cast6_cbc_dec_8way)
404
405ENTRY(cast6_ctr_8way)
406	/* input:
407	 *	%rdi: ctx, CTX
408	 *	%rsi: dst
409	 *	%rdx: src
410	 *	%rcx: iv (little endian, 128bit)
411	 */
 
412
413	pushq %r12;
414
415	movq %rsi, %r11;
416	movq %rdx, %r12;
417
418	load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
419		      RD2, RX, RKR, RKM);
420
421	call __cast6_enc_blk8;
422
423	store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
424
425	popq %r12;
426
 
427	ret;
428ENDPROC(cast6_ctr_8way)
429
430ENTRY(cast6_xts_enc_8way)
431	/* input:
432	 *	%rdi: ctx, CTX
433	 *	%rsi: dst
434	 *	%rdx: src
435	 *	%rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
436	 */
 
437
438	movq %rsi, %r11;
439
440	/* regs <= src, dst <= IVs, regs <= regs xor IVs */
441	load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
442		      RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
443
444	call __cast6_enc_blk8;
445
446	/* dst <= regs xor IVs(in dst) */
447	store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
448
 
449	ret;
450ENDPROC(cast6_xts_enc_8way)
451
452ENTRY(cast6_xts_dec_8way)
453	/* input:
454	 *	%rdi: ctx, CTX
455	 *	%rsi: dst
456	 *	%rdx: src
457	 *	%rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
458	 */
 
459
460	movq %rsi, %r11;
461
462	/* regs <= src, dst <= IVs, regs <= regs xor IVs */
463	load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
464		      RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
465
466	call __cast6_dec_blk8;
467
468	/* dst <= regs xor IVs(in dst) */
469	store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
470
 
471	ret;
472ENDPROC(cast6_xts_dec_8way)
v4.6
  1/*
  2 * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
  3 *
  4 * Copyright (C) 2012 Johannes Goetzfried
  5 *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
  6 *
  7 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License as published by
 11 * the Free Software Foundation; either version 2 of the License, or
 12 * (at your option) any later version.
 13 *
 14 * This program is distributed in the hope that it will be useful,
 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 17 * GNU General Public License for more details.
 18 *
 19 * You should have received a copy of the GNU General Public License
 20 * along with this program; if not, write to the Free Software
 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
 22 * USA
 23 *
 24 */
 25
 26#include <linux/linkage.h>
 27#include <asm/frame.h>
 28#include "glue_helper-asm-avx.S"
 29
 30.file "cast6-avx-x86_64-asm_64.S"
 31
 32.extern cast_s1
 33.extern cast_s2
 34.extern cast_s3
 35.extern cast_s4
 36
 37/* structure of crypto context */
 38#define km	0
 39#define kr	(12*4*4)
 40
 41/* s-boxes */
 42#define s1	cast_s1
 43#define s2	cast_s2
 44#define s3	cast_s3
 45#define s4	cast_s4
 46
 47/**********************************************************************
 48  8-way AVX cast6
 49 **********************************************************************/
 50#define CTX %rdi
 51
 52#define RA1 %xmm0
 53#define RB1 %xmm1
 54#define RC1 %xmm2
 55#define RD1 %xmm3
 56
 57#define RA2 %xmm4
 58#define RB2 %xmm5
 59#define RC2 %xmm6
 60#define RD2 %xmm7
 61
 62#define RX  %xmm8
 63
 64#define RKM  %xmm9
 65#define RKR  %xmm10
 66#define RKRF %xmm11
 67#define RKRR %xmm12
 68#define R32  %xmm13
 69#define R1ST %xmm14
 70
 71#define RTMP %xmm15
 72
 73#define RID1  %rbp
 74#define RID1d %ebp
 75#define RID2  %rsi
 76#define RID2d %esi
 77
 78#define RGI1   %rdx
 79#define RGI1bl %dl
 80#define RGI1bh %dh
 81#define RGI2   %rcx
 82#define RGI2bl %cl
 83#define RGI2bh %ch
 84
 85#define RGI3   %rax
 86#define RGI3bl %al
 87#define RGI3bh %ah
 88#define RGI4   %rbx
 89#define RGI4bl %bl
 90#define RGI4bh %bh
 91
 92#define RFS1  %r8
 93#define RFS1d %r8d
 94#define RFS2  %r9
 95#define RFS2d %r9d
 96#define RFS3  %r10
 97#define RFS3d %r10d
 98
 99
100#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
101	movzbl		src ## bh,     RID1d;    \
102	movzbl		src ## bl,     RID2d;    \
103	shrq $16,	src;                     \
104	movl		s1(, RID1, 4), dst ## d; \
105	op1		s2(, RID2, 4), dst ## d; \
106	movzbl		src ## bh,     RID1d;    \
107	movzbl		src ## bl,     RID2d;    \
108	interleave_op(il_reg);			 \
109	op2		s3(, RID1, 4), dst ## d; \
110	op3		s4(, RID2, 4), dst ## d;
111
112#define dummy(d) /* do nothing */
113
114#define shr_next(reg) \
115	shrq $16,	reg;
116
117#define F_head(a, x, gi1, gi2, op0) \
118	op0	a,	RKM,  x;                 \
119	vpslld	RKRF,	x,    RTMP;              \
120	vpsrld	RKRR,	x,    x;                 \
121	vpor	RTMP,	x,    x;                 \
122	\
123	vmovq		x,    gi1;               \
124	vpextrq $1,	x,    gi2;
125
126#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
127	lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
128	lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
129	\
130	lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none);     \
131	shlq $32,	RFS2;                                      \
132	orq		RFS1, RFS2;                                \
133	lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none);     \
134	shlq $32,	RFS1;                                      \
135	orq		RFS1, RFS3;                                \
136	\
137	vmovq		RFS2, x;                                   \
138	vpinsrq $1,	RFS3, x, x;
139
140#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
141	F_head(b1, RX, RGI1, RGI2, op0);              \
142	F_head(b2, RX, RGI3, RGI4, op0);              \
143	\
144	F_tail(b1, RX, RGI1, RGI2, op1, op2, op3);    \
145	F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3);  \
146	\
147	vpxor		a1, RX,   a1;                 \
148	vpxor		a2, RTMP, a2;
149
150#define F1_2(a1, b1, a2, b2) \
151	F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
152#define F2_2(a1, b1, a2, b2) \
153	F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
154#define F3_2(a1, b1, a2, b2) \
155	F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
156
157#define qop(in, out, f) \
158	F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
159
160#define get_round_keys(nn) \
161	vbroadcastss	(km+(4*(nn)))(CTX), RKM;        \
162	vpand		R1ST,               RKR,  RKRF; \
163	vpsubq		RKRF,               R32,  RKRR; \
164	vpsrldq $1,	RKR,                RKR;
165
166#define Q(n) \
167	get_round_keys(4*n+0); \
168	qop(RD, RC, 1);        \
169	\
170	get_round_keys(4*n+1); \
171	qop(RC, RB, 2);        \
172	\
173	get_round_keys(4*n+2); \
174	qop(RB, RA, 3);        \
175	\
176	get_round_keys(4*n+3); \
177	qop(RA, RD, 1);
178
179#define QBAR(n) \
180	get_round_keys(4*n+3); \
181	qop(RA, RD, 1);        \
182	\
183	get_round_keys(4*n+2); \
184	qop(RB, RA, 3);        \
185	\
186	get_round_keys(4*n+1); \
187	qop(RC, RB, 2);        \
188	\
189	get_round_keys(4*n+0); \
190	qop(RD, RC, 1);
191
192#define shuffle(mask) \
193	vpshufb		mask,            RKR, RKR;
194
195#define preload_rkr(n, do_mask, mask) \
196	vbroadcastss	.L16_mask,                RKR;      \
197	/* add 16-bit rotation to key rotations (mod 32) */ \
198	vpxor		(kr+n*16)(CTX),           RKR, RKR; \
199	do_mask(mask);
200
201#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
202	vpunpckldq		x1, x0, t0; \
203	vpunpckhdq		x1, x0, t2; \
204	vpunpckldq		x3, x2, t1; \
205	vpunpckhdq		x3, x2, x3; \
206	\
207	vpunpcklqdq		t1, t0, x0; \
208	vpunpckhqdq		t1, t0, x1; \
209	vpunpcklqdq		x3, t2, x2; \
210	vpunpckhqdq		x3, t2, x3;
211
212#define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
213	vpshufb rmask, x0,	x0; \
214	vpshufb rmask, x1,	x1; \
215	vpshufb rmask, x2,	x2; \
216	vpshufb rmask, x3,	x3; \
217	\
218	transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
219
220#define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
221	transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
222	\
223	vpshufb rmask,		x0, x0;       \
224	vpshufb rmask,		x1, x1;       \
225	vpshufb rmask,		x2, x2;       \
226	vpshufb rmask,		x3, x3;
227
228.data
229
230.align 16
231.Lxts_gf128mul_and_shl1_mask:
232	.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
233.Lbswap_mask:
234	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
235.Lbswap128_mask:
236	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
237.Lrkr_enc_Q_Q_QBAR_QBAR:
238	.byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
239.Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
240	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
241.Lrkr_dec_Q_Q_Q_Q:
242	.byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
243.Lrkr_dec_Q_Q_QBAR_QBAR:
244	.byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
245.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
246	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
247.L16_mask:
248	.byte 16, 16, 16, 16
249.L32_mask:
250	.byte 32, 0, 0, 0
251.Lfirst_mask:
252	.byte 0x1f, 0, 0, 0
253
254.text
255
256.align 8
257__cast6_enc_blk8:
258	/* input:
259	 *	%rdi: ctx, CTX
260	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
261	 * output:
262	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
263	 */
264
265	pushq %rbp;
266	pushq %rbx;
267
268	vmovdqa .Lbswap_mask, RKM;
269	vmovd .Lfirst_mask, R1ST;
270	vmovd .L32_mask, R32;
271
272	inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
273	inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
274
275	preload_rkr(0, dummy, none);
276	Q(0);
277	Q(1);
278	Q(2);
279	Q(3);
280	preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
281	Q(4);
282	Q(5);
283	QBAR(6);
284	QBAR(7);
285	preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
286	QBAR(8);
287	QBAR(9);
288	QBAR(10);
289	QBAR(11);
290
291	popq %rbx;
292	popq %rbp;
293
294	vmovdqa .Lbswap_mask, RKM;
295
296	outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
297	outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
298
299	ret;
300ENDPROC(__cast6_enc_blk8)
301
302.align 8
303__cast6_dec_blk8:
304	/* input:
305	 *	%rdi: ctx, CTX
306	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
307	 * output:
308	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
309	 */
310
311	pushq %rbp;
312	pushq %rbx;
313
314	vmovdqa .Lbswap_mask, RKM;
315	vmovd .Lfirst_mask, R1ST;
316	vmovd .L32_mask, R32;
317
318	inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
319	inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
320
321	preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
322	Q(11);
323	Q(10);
324	Q(9);
325	Q(8);
326	preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
327	Q(7);
328	Q(6);
329	QBAR(5);
330	QBAR(4);
331	preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
332	QBAR(3);
333	QBAR(2);
334	QBAR(1);
335	QBAR(0);
336
337	popq %rbx;
338	popq %rbp;
339
340	vmovdqa .Lbswap_mask, RKM;
341	outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
342	outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
343
344	ret;
345ENDPROC(__cast6_dec_blk8)
346
347ENTRY(cast6_ecb_enc_8way)
348	/* input:
349	 *	%rdi: ctx, CTX
350	 *	%rsi: dst
351	 *	%rdx: src
352	 */
353	FRAME_BEGIN
354
355	movq %rsi, %r11;
356
357	load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
358
359	call __cast6_enc_blk8;
360
361	store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
362
363	FRAME_END
364	ret;
365ENDPROC(cast6_ecb_enc_8way)
366
367ENTRY(cast6_ecb_dec_8way)
368	/* input:
369	 *	%rdi: ctx, CTX
370	 *	%rsi: dst
371	 *	%rdx: src
372	 */
373	FRAME_BEGIN
374
375	movq %rsi, %r11;
376
377	load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
378
379	call __cast6_dec_blk8;
380
381	store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
382
383	FRAME_END
384	ret;
385ENDPROC(cast6_ecb_dec_8way)
386
387ENTRY(cast6_cbc_dec_8way)
388	/* input:
389	 *	%rdi: ctx, CTX
390	 *	%rsi: dst
391	 *	%rdx: src
392	 */
393	FRAME_BEGIN
394
395	pushq %r12;
396
397	movq %rsi, %r11;
398	movq %rdx, %r12;
399
400	load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
401
402	call __cast6_dec_blk8;
403
404	store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
405
406	popq %r12;
407
408	FRAME_END
409	ret;
410ENDPROC(cast6_cbc_dec_8way)
411
412ENTRY(cast6_ctr_8way)
413	/* input:
414	 *	%rdi: ctx, CTX
415	 *	%rsi: dst
416	 *	%rdx: src
417	 *	%rcx: iv (little endian, 128bit)
418	 */
419	FRAME_BEGIN
420
421	pushq %r12;
422
423	movq %rsi, %r11;
424	movq %rdx, %r12;
425
426	load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
427		      RD2, RX, RKR, RKM);
428
429	call __cast6_enc_blk8;
430
431	store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
432
433	popq %r12;
434
435	FRAME_END
436	ret;
437ENDPROC(cast6_ctr_8way)
438
439ENTRY(cast6_xts_enc_8way)
440	/* input:
441	 *	%rdi: ctx, CTX
442	 *	%rsi: dst
443	 *	%rdx: src
444	 *	%rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
445	 */
446	FRAME_BEGIN
447
448	movq %rsi, %r11;
449
450	/* regs <= src, dst <= IVs, regs <= regs xor IVs */
451	load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
452		      RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
453
454	call __cast6_enc_blk8;
455
456	/* dst <= regs xor IVs(in dst) */
457	store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
458
459	FRAME_END
460	ret;
461ENDPROC(cast6_xts_enc_8way)
462
463ENTRY(cast6_xts_dec_8way)
464	/* input:
465	 *	%rdi: ctx, CTX
466	 *	%rsi: dst
467	 *	%rdx: src
468	 *	%rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
469	 */
470	FRAME_BEGIN
471
472	movq %rsi, %r11;
473
474	/* regs <= src, dst <= IVs, regs <= regs xor IVs */
475	load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
476		      RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
477
478	call __cast6_dec_blk8;
479
480	/* dst <= regs xor IVs(in dst) */
481	store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
482
483	FRAME_END
484	ret;
485ENDPROC(cast6_xts_dec_8way)