Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Glue Code for SSE2 assembler versions of Serpent Cipher
  4 *
  5 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  6 *
  7 * Glue code based on aesni-intel_glue.c by:
  8 *  Copyright (C) 2008, Intel Corp.
  9 *    Author: Huang Ying <ying.huang@intel.com>
 10 *
 11 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
 12 *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13 */
 14
 15#include <linux/module.h>
 
 16#include <linux/types.h>
 17#include <linux/crypto.h>
 18#include <linux/err.h>
 
 19#include <crypto/algapi.h>
 20#include <crypto/b128ops.h>
 21#include <crypto/internal/simd.h>
 22#include <crypto/serpent.h>
 
 
 
 
 
 
 
 23
 24#include "serpent-sse2.h"
 25#include "ecb_cbc_helpers.h"
 
 
 
 
 
 
 
 26
 27static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
 28				   const u8 *key, unsigned int keylen)
 
 
 
 29{
 30	return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
 
 
 
 
 
 
 31}
 32
 33static void serpent_decrypt_cbc_xway(const void *ctx, u8 *dst, const u8 *src)
 
 34{
 35	u8 buf[SERPENT_PARALLEL_BLOCKS - 1][SERPENT_BLOCK_SIZE];
 36	const u8 *s = src;
 
 
 
 
 
 
 
 
 37
 38	if (dst == src)
 39		s = memcpy(buf, src, sizeof(buf));
 40	serpent_dec_blk_xway(ctx, dst, src);
 41	crypto_xor(dst + SERPENT_BLOCK_SIZE, s, sizeof(buf));
 42}
 43
 44static int ecb_encrypt(struct skcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45{
 46	ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
 47	ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_enc_blk_xway);
 48	ECB_BLOCK(1, __serpent_encrypt);
 49	ECB_WALK_END();
 50}
 51
 52static int ecb_decrypt(struct skcipher_request *req)
 
 53{
 54	ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
 55	ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_dec_blk_xway);
 56	ECB_BLOCK(1, __serpent_decrypt);
 57	ECB_WALK_END();
 58}
 59
 60static int cbc_encrypt(struct skcipher_request *req)
 
 61{
 62	CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
 63	CBC_ENC_BLOCK(__serpent_encrypt);
 64	CBC_WALK_END();
 65}
 66
 67static int cbc_decrypt(struct skcipher_request *req)
 
 68{
 69	CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
 70	CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_decrypt_cbc_xway);
 71	CBC_DEC_BLOCK(1, __serpent_decrypt);
 72	CBC_WALK_END();
 73}
 74
 75static struct skcipher_alg serpent_algs[] = {
 76	{
 77		.base.cra_name		= "__ecb(serpent)",
 78		.base.cra_driver_name	= "__ecb-serpent-sse2",
 79		.base.cra_priority	= 400,
 80		.base.cra_flags		= CRYPTO_ALG_INTERNAL,
 81		.base.cra_blocksize	= SERPENT_BLOCK_SIZE,
 82		.base.cra_ctxsize	= sizeof(struct serpent_ctx),
 83		.base.cra_module	= THIS_MODULE,
 84		.min_keysize		= SERPENT_MIN_KEY_SIZE,
 85		.max_keysize		= SERPENT_MAX_KEY_SIZE,
 86		.setkey			= serpent_setkey_skcipher,
 87		.encrypt		= ecb_encrypt,
 88		.decrypt		= ecb_decrypt,
 89	}, {
 90		.base.cra_name		= "__cbc(serpent)",
 91		.base.cra_driver_name	= "__cbc-serpent-sse2",
 92		.base.cra_priority	= 400,
 93		.base.cra_flags		= CRYPTO_ALG_INTERNAL,
 94		.base.cra_blocksize	= SERPENT_BLOCK_SIZE,
 95		.base.cra_ctxsize	= sizeof(struct serpent_ctx),
 96		.base.cra_module	= THIS_MODULE,
 97		.min_keysize		= SERPENT_MIN_KEY_SIZE,
 98		.max_keysize		= SERPENT_MAX_KEY_SIZE,
 99		.ivsize			= SERPENT_BLOCK_SIZE,
100		.setkey			= serpent_setkey_skcipher,
101		.encrypt		= cbc_encrypt,
102		.decrypt		= cbc_decrypt,
103	},
104};
105
106static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
108static int __init serpent_sse2_init(void)
109{
110	if (!boot_cpu_has(X86_FEATURE_XMM2)) {
111		printk(KERN_INFO "SSE2 instructions are not detected.\n");
112		return -ENODEV;
113	}
114
115	return simd_register_skciphers_compat(serpent_algs,
116					      ARRAY_SIZE(serpent_algs),
117					      serpent_simd_algs);
118}
119
120static void __exit serpent_sse2_exit(void)
121{
122	simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
123				  serpent_simd_algs);
124}
125
126module_init(serpent_sse2_init);
127module_exit(serpent_sse2_exit);
128
129MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
130MODULE_LICENSE("GPL");
131MODULE_ALIAS_CRYPTO("serpent");
v4.10.11
 
  1/*
  2 * Glue Code for SSE2 assembler versions of Serpent Cipher
  3 *
  4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  5 *
  6 * Glue code based on aesni-intel_glue.c by:
  7 *  Copyright (C) 2008, Intel Corp.
  8 *    Author: Huang Ying <ying.huang@intel.com>
  9 *
 10 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
 11 *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 12 * CTR part based on code (crypto/ctr.c) by:
 13 *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
 14 *
 15 * This program is free software; you can redistribute it and/or modify
 16 * it under the terms of the GNU General Public License as published by
 17 * the Free Software Foundation; either version 2 of the License, or
 18 * (at your option) any later version.
 19 *
 20 * This program is distributed in the hope that it will be useful,
 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 23 * GNU General Public License for more details.
 24 *
 25 * You should have received a copy of the GNU General Public License
 26 * along with this program; if not, write to the Free Software
 27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
 28 * USA
 29 *
 30 */
 31
 32#include <linux/module.h>
 33#include <linux/hardirq.h>
 34#include <linux/types.h>
 35#include <linux/crypto.h>
 36#include <linux/err.h>
 37#include <crypto/ablk_helper.h>
 38#include <crypto/algapi.h>
 
 
 39#include <crypto/serpent.h>
 40#include <crypto/cryptd.h>
 41#include <crypto/b128ops.h>
 42#include <crypto/ctr.h>
 43#include <crypto/lrw.h>
 44#include <crypto/xts.h>
 45#include <asm/crypto/serpent-sse2.h>
 46#include <asm/crypto/glue_helper.h>
 47
 48static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
 49{
 50	u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
 51	unsigned int j;
 52
 53	for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
 54		ivs[j] = src[j];
 55
 56	serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
 57
 58	for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
 59		u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
 60}
 61
 62static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 63{
 64	be128 ctrblk;
 65
 66	le128_to_be128(&ctrblk, iv);
 67	le128_inc(iv);
 68
 69	__serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
 70	u128_xor(dst, src, (u128 *)&ctrblk);
 71}
 72
 73static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
 74				   le128 *iv)
 75{
 76	be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
 77	unsigned int i;
 78
 79	for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
 80		if (dst != src)
 81			dst[i] = src[i];
 82
 83		le128_to_be128(&ctrblks[i], iv);
 84		le128_inc(iv);
 85	}
 86
 87	serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
 
 
 
 88}
 89
 90static const struct common_glue_ctx serpent_enc = {
 91	.num_funcs = 2,
 92	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
 93
 94	.funcs = { {
 95		.num_blocks = SERPENT_PARALLEL_BLOCKS,
 96		.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
 97	}, {
 98		.num_blocks = 1,
 99		.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
100	} }
101};
102
103static const struct common_glue_ctx serpent_ctr = {
104	.num_funcs = 2,
105	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
106
107	.funcs = { {
108		.num_blocks = SERPENT_PARALLEL_BLOCKS,
109		.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
110	}, {
111		.num_blocks = 1,
112		.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
113	} }
114};
115
116static const struct common_glue_ctx serpent_dec = {
117	.num_funcs = 2,
118	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
119
120	.funcs = { {
121		.num_blocks = SERPENT_PARALLEL_BLOCKS,
122		.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
123	}, {
124		.num_blocks = 1,
125		.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
126	} }
127};
128
129static const struct common_glue_ctx serpent_dec_cbc = {
130	.num_funcs = 2,
131	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
132
133	.funcs = { {
134		.num_blocks = SERPENT_PARALLEL_BLOCKS,
135		.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
136	}, {
137		.num_blocks = 1,
138		.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
139	} }
140};
141
142static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
143		       struct scatterlist *src, unsigned int nbytes)
144{
145	return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
 
 
 
146}
147
148static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
149		       struct scatterlist *src, unsigned int nbytes)
150{
151	return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
 
 
 
152}
153
154static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
155		       struct scatterlist *src, unsigned int nbytes)
156{
157	return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
158				     dst, src, nbytes);
 
159}
160
161static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
162		       struct scatterlist *src, unsigned int nbytes)
163{
164	return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
165				       nbytes);
 
 
166}
167
168static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
169		     struct scatterlist *src, unsigned int nbytes)
170{
171	return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
172}
173
174static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
175{
176	return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
177			      NULL, fpu_enabled, nbytes);
178}
179
180static inline void serpent_fpu_end(bool fpu_enabled)
181{
182	glue_fpu_end(fpu_enabled);
183}
184
185struct crypt_priv {
186	struct serpent_ctx *ctx;
187	bool fpu_enabled;
 
 
 
 
 
 
 
 
 
188};
189
190static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
191{
192	const unsigned int bsize = SERPENT_BLOCK_SIZE;
193	struct crypt_priv *ctx = priv;
194	int i;
195
196	ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
197
198	if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
199		serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
200		return;
201	}
202
203	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
204		__serpent_encrypt(ctx->ctx, srcdst, srcdst);
205}
206
207static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
208{
209	const unsigned int bsize = SERPENT_BLOCK_SIZE;
210	struct crypt_priv *ctx = priv;
211	int i;
212
213	ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
214
215	if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
216		serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
217		return;
218	}
219
220	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
221		__serpent_decrypt(ctx->ctx, srcdst, srcdst);
222}
223
224struct serpent_lrw_ctx {
225	struct lrw_table_ctx lrw_table;
226	struct serpent_ctx serpent_ctx;
227};
228
229static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
230			      unsigned int keylen)
231{
232	struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
233	int err;
234
235	err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
236							SERPENT_BLOCK_SIZE);
237	if (err)
238		return err;
239
240	return lrw_init_table(&ctx->lrw_table, key + keylen -
241						SERPENT_BLOCK_SIZE);
242}
243
244static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
245		       struct scatterlist *src, unsigned int nbytes)
246{
247	struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
248	be128 buf[SERPENT_PARALLEL_BLOCKS];
249	struct crypt_priv crypt_ctx = {
250		.ctx = &ctx->serpent_ctx,
251		.fpu_enabled = false,
252	};
253	struct lrw_crypt_req req = {
254		.tbuf = buf,
255		.tbuflen = sizeof(buf),
256
257		.table_ctx = &ctx->lrw_table,
258		.crypt_ctx = &crypt_ctx,
259		.crypt_fn = encrypt_callback,
260	};
261	int ret;
262
263	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
264	ret = lrw_crypt(desc, dst, src, nbytes, &req);
265	serpent_fpu_end(crypt_ctx.fpu_enabled);
266
267	return ret;
268}
269
270static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
271		       struct scatterlist *src, unsigned int nbytes)
272{
273	struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
274	be128 buf[SERPENT_PARALLEL_BLOCKS];
275	struct crypt_priv crypt_ctx = {
276		.ctx = &ctx->serpent_ctx,
277		.fpu_enabled = false,
278	};
279	struct lrw_crypt_req req = {
280		.tbuf = buf,
281		.tbuflen = sizeof(buf),
282
283		.table_ctx = &ctx->lrw_table,
284		.crypt_ctx = &crypt_ctx,
285		.crypt_fn = decrypt_callback,
286	};
287	int ret;
288
289	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
290	ret = lrw_crypt(desc, dst, src, nbytes, &req);
291	serpent_fpu_end(crypt_ctx.fpu_enabled);
292
293	return ret;
294}
295
296static void lrw_exit_tfm(struct crypto_tfm *tfm)
297{
298	struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
299
300	lrw_free_table(&ctx->lrw_table);
301}
302
303struct serpent_xts_ctx {
304	struct serpent_ctx tweak_ctx;
305	struct serpent_ctx crypt_ctx;
306};
307
308static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
309			      unsigned int keylen)
310{
311	struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
312	int err;
313
314	err = xts_check_key(tfm, key, keylen);
315	if (err)
316		return err;
317
318	/* first half of xts-key is for crypt */
319	err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
320	if (err)
321		return err;
322
323	/* second half of xts-key is for tweak */
324	return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
325}
326
327static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
328		       struct scatterlist *src, unsigned int nbytes)
329{
330	struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
331	be128 buf[SERPENT_PARALLEL_BLOCKS];
332	struct crypt_priv crypt_ctx = {
333		.ctx = &ctx->crypt_ctx,
334		.fpu_enabled = false,
335	};
336	struct xts_crypt_req req = {
337		.tbuf = buf,
338		.tbuflen = sizeof(buf),
339
340		.tweak_ctx = &ctx->tweak_ctx,
341		.tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
342		.crypt_ctx = &crypt_ctx,
343		.crypt_fn = encrypt_callback,
344	};
345	int ret;
346
347	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
348	ret = xts_crypt(desc, dst, src, nbytes, &req);
349	serpent_fpu_end(crypt_ctx.fpu_enabled);
350
351	return ret;
352}
353
354static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
355		       struct scatterlist *src, unsigned int nbytes)
356{
357	struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
358	be128 buf[SERPENT_PARALLEL_BLOCKS];
359	struct crypt_priv crypt_ctx = {
360		.ctx = &ctx->crypt_ctx,
361		.fpu_enabled = false,
362	};
363	struct xts_crypt_req req = {
364		.tbuf = buf,
365		.tbuflen = sizeof(buf),
366
367		.tweak_ctx = &ctx->tweak_ctx,
368		.tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
369		.crypt_ctx = &crypt_ctx,
370		.crypt_fn = decrypt_callback,
371	};
372	int ret;
373
374	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
375	ret = xts_crypt(desc, dst, src, nbytes, &req);
376	serpent_fpu_end(crypt_ctx.fpu_enabled);
377
378	return ret;
379}
380
381static struct crypto_alg serpent_algs[10] = { {
382	.cra_name		= "__ecb-serpent-sse2",
383	.cra_driver_name	= "__driver-ecb-serpent-sse2",
384	.cra_priority		= 0,
385	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
386				  CRYPTO_ALG_INTERNAL,
387	.cra_blocksize		= SERPENT_BLOCK_SIZE,
388	.cra_ctxsize		= sizeof(struct serpent_ctx),
389	.cra_alignmask		= 0,
390	.cra_type		= &crypto_blkcipher_type,
391	.cra_module		= THIS_MODULE,
392	.cra_u = {
393		.blkcipher = {
394			.min_keysize	= SERPENT_MIN_KEY_SIZE,
395			.max_keysize	= SERPENT_MAX_KEY_SIZE,
396			.setkey		= serpent_setkey,
397			.encrypt	= ecb_encrypt,
398			.decrypt	= ecb_decrypt,
399		},
400	},
401}, {
402	.cra_name		= "__cbc-serpent-sse2",
403	.cra_driver_name	= "__driver-cbc-serpent-sse2",
404	.cra_priority		= 0,
405	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
406				  CRYPTO_ALG_INTERNAL,
407	.cra_blocksize		= SERPENT_BLOCK_SIZE,
408	.cra_ctxsize		= sizeof(struct serpent_ctx),
409	.cra_alignmask		= 0,
410	.cra_type		= &crypto_blkcipher_type,
411	.cra_module		= THIS_MODULE,
412	.cra_u = {
413		.blkcipher = {
414			.min_keysize	= SERPENT_MIN_KEY_SIZE,
415			.max_keysize	= SERPENT_MAX_KEY_SIZE,
416			.setkey		= serpent_setkey,
417			.encrypt	= cbc_encrypt,
418			.decrypt	= cbc_decrypt,
419		},
420	},
421}, {
422	.cra_name		= "__ctr-serpent-sse2",
423	.cra_driver_name	= "__driver-ctr-serpent-sse2",
424	.cra_priority		= 0,
425	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
426				  CRYPTO_ALG_INTERNAL,
427	.cra_blocksize		= 1,
428	.cra_ctxsize		= sizeof(struct serpent_ctx),
429	.cra_alignmask		= 0,
430	.cra_type		= &crypto_blkcipher_type,
431	.cra_module		= THIS_MODULE,
432	.cra_u = {
433		.blkcipher = {
434			.min_keysize	= SERPENT_MIN_KEY_SIZE,
435			.max_keysize	= SERPENT_MAX_KEY_SIZE,
436			.ivsize		= SERPENT_BLOCK_SIZE,
437			.setkey		= serpent_setkey,
438			.encrypt	= ctr_crypt,
439			.decrypt	= ctr_crypt,
440		},
441	},
442}, {
443	.cra_name		= "__lrw-serpent-sse2",
444	.cra_driver_name	= "__driver-lrw-serpent-sse2",
445	.cra_priority		= 0,
446	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
447				  CRYPTO_ALG_INTERNAL,
448	.cra_blocksize		= SERPENT_BLOCK_SIZE,
449	.cra_ctxsize		= sizeof(struct serpent_lrw_ctx),
450	.cra_alignmask		= 0,
451	.cra_type		= &crypto_blkcipher_type,
452	.cra_module		= THIS_MODULE,
453	.cra_exit		= lrw_exit_tfm,
454	.cra_u = {
455		.blkcipher = {
456			.min_keysize	= SERPENT_MIN_KEY_SIZE +
457					  SERPENT_BLOCK_SIZE,
458			.max_keysize	= SERPENT_MAX_KEY_SIZE +
459					  SERPENT_BLOCK_SIZE,
460			.ivsize		= SERPENT_BLOCK_SIZE,
461			.setkey		= lrw_serpent_setkey,
462			.encrypt	= lrw_encrypt,
463			.decrypt	= lrw_decrypt,
464		},
465	},
466}, {
467	.cra_name		= "__xts-serpent-sse2",
468	.cra_driver_name	= "__driver-xts-serpent-sse2",
469	.cra_priority		= 0,
470	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
471				  CRYPTO_ALG_INTERNAL,
472	.cra_blocksize		= SERPENT_BLOCK_SIZE,
473	.cra_ctxsize		= sizeof(struct serpent_xts_ctx),
474	.cra_alignmask		= 0,
475	.cra_type		= &crypto_blkcipher_type,
476	.cra_module		= THIS_MODULE,
477	.cra_u = {
478		.blkcipher = {
479			.min_keysize	= SERPENT_MIN_KEY_SIZE * 2,
480			.max_keysize	= SERPENT_MAX_KEY_SIZE * 2,
481			.ivsize		= SERPENT_BLOCK_SIZE,
482			.setkey		= xts_serpent_setkey,
483			.encrypt	= xts_encrypt,
484			.decrypt	= xts_decrypt,
485		},
486	},
487}, {
488	.cra_name		= "ecb(serpent)",
489	.cra_driver_name	= "ecb-serpent-sse2",
490	.cra_priority		= 400,
491	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
492	.cra_blocksize		= SERPENT_BLOCK_SIZE,
493	.cra_ctxsize		= sizeof(struct async_helper_ctx),
494	.cra_alignmask		= 0,
495	.cra_type		= &crypto_ablkcipher_type,
496	.cra_module		= THIS_MODULE,
497	.cra_init		= ablk_init,
498	.cra_exit		= ablk_exit,
499	.cra_u = {
500		.ablkcipher = {
501			.min_keysize	= SERPENT_MIN_KEY_SIZE,
502			.max_keysize	= SERPENT_MAX_KEY_SIZE,
503			.setkey		= ablk_set_key,
504			.encrypt	= ablk_encrypt,
505			.decrypt	= ablk_decrypt,
506		},
507	},
508}, {
509	.cra_name		= "cbc(serpent)",
510	.cra_driver_name	= "cbc-serpent-sse2",
511	.cra_priority		= 400,
512	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
513	.cra_blocksize		= SERPENT_BLOCK_SIZE,
514	.cra_ctxsize		= sizeof(struct async_helper_ctx),
515	.cra_alignmask		= 0,
516	.cra_type		= &crypto_ablkcipher_type,
517	.cra_module		= THIS_MODULE,
518	.cra_init		= ablk_init,
519	.cra_exit		= ablk_exit,
520	.cra_u = {
521		.ablkcipher = {
522			.min_keysize	= SERPENT_MIN_KEY_SIZE,
523			.max_keysize	= SERPENT_MAX_KEY_SIZE,
524			.ivsize		= SERPENT_BLOCK_SIZE,
525			.setkey		= ablk_set_key,
526			.encrypt	= __ablk_encrypt,
527			.decrypt	= ablk_decrypt,
528		},
529	},
530}, {
531	.cra_name		= "ctr(serpent)",
532	.cra_driver_name	= "ctr-serpent-sse2",
533	.cra_priority		= 400,
534	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
535	.cra_blocksize		= 1,
536	.cra_ctxsize		= sizeof(struct async_helper_ctx),
537	.cra_alignmask		= 0,
538	.cra_type		= &crypto_ablkcipher_type,
539	.cra_module		= THIS_MODULE,
540	.cra_init		= ablk_init,
541	.cra_exit		= ablk_exit,
542	.cra_u = {
543		.ablkcipher = {
544			.min_keysize	= SERPENT_MIN_KEY_SIZE,
545			.max_keysize	= SERPENT_MAX_KEY_SIZE,
546			.ivsize		= SERPENT_BLOCK_SIZE,
547			.setkey		= ablk_set_key,
548			.encrypt	= ablk_encrypt,
549			.decrypt	= ablk_encrypt,
550			.geniv		= "chainiv",
551		},
552	},
553}, {
554	.cra_name		= "lrw(serpent)",
555	.cra_driver_name	= "lrw-serpent-sse2",
556	.cra_priority		= 400,
557	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
558	.cra_blocksize		= SERPENT_BLOCK_SIZE,
559	.cra_ctxsize		= sizeof(struct async_helper_ctx),
560	.cra_alignmask		= 0,
561	.cra_type		= &crypto_ablkcipher_type,
562	.cra_module		= THIS_MODULE,
563	.cra_init		= ablk_init,
564	.cra_exit		= ablk_exit,
565	.cra_u = {
566		.ablkcipher = {
567			.min_keysize	= SERPENT_MIN_KEY_SIZE +
568					  SERPENT_BLOCK_SIZE,
569			.max_keysize	= SERPENT_MAX_KEY_SIZE +
570					  SERPENT_BLOCK_SIZE,
571			.ivsize		= SERPENT_BLOCK_SIZE,
572			.setkey		= ablk_set_key,
573			.encrypt	= ablk_encrypt,
574			.decrypt	= ablk_decrypt,
575		},
576	},
577}, {
578	.cra_name		= "xts(serpent)",
579	.cra_driver_name	= "xts-serpent-sse2",
580	.cra_priority		= 400,
581	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
582	.cra_blocksize		= SERPENT_BLOCK_SIZE,
583	.cra_ctxsize		= sizeof(struct async_helper_ctx),
584	.cra_alignmask		= 0,
585	.cra_type		= &crypto_ablkcipher_type,
586	.cra_module		= THIS_MODULE,
587	.cra_init		= ablk_init,
588	.cra_exit		= ablk_exit,
589	.cra_u = {
590		.ablkcipher = {
591			.min_keysize	= SERPENT_MIN_KEY_SIZE * 2,
592			.max_keysize	= SERPENT_MAX_KEY_SIZE * 2,
593			.ivsize		= SERPENT_BLOCK_SIZE,
594			.setkey		= ablk_set_key,
595			.encrypt	= ablk_encrypt,
596			.decrypt	= ablk_decrypt,
597		},
598	},
599} };
600
601static int __init serpent_sse2_init(void)
602{
603	if (!boot_cpu_has(X86_FEATURE_XMM2)) {
604		printk(KERN_INFO "SSE2 instructions are not detected.\n");
605		return -ENODEV;
606	}
607
608	return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
 
 
609}
610
611static void __exit serpent_sse2_exit(void)
612{
613	crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
 
614}
615
616module_init(serpent_sse2_init);
617module_exit(serpent_sse2_exit);
618
619MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
620MODULE_LICENSE("GPL");
621MODULE_ALIAS_CRYPTO("serpent");