Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Glue Code for SSE2 assembler versions of Serpent Cipher
  4 *
  5 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  6 *
  7 * Glue code based on aesni-intel_glue.c by:
  8 *  Copyright (C) 2008, Intel Corp.
  9 *    Author: Huang Ying <ying.huang@intel.com>
 10 *
 11 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
 12 *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 13 */
 14
 15#include <linux/module.h>
 16#include <linux/types.h>
 17#include <linux/crypto.h>
 18#include <linux/err.h>
 19#include <crypto/algapi.h>
 20#include <crypto/b128ops.h>
 21#include <crypto/internal/simd.h>
 22#include <crypto/serpent.h>
 23
 24#include "serpent-sse2.h"
 25#include "ecb_cbc_helpers.h"
 26
 27static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
 28				   const u8 *key, unsigned int keylen)
 29{
 30	return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
 31}
 32
 33static void serpent_decrypt_cbc_xway(const void *ctx, u8 *dst, const u8 *src)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34{
 35	u8 buf[SERPENT_PARALLEL_BLOCKS - 1][SERPENT_BLOCK_SIZE];
 36	const u8 *s = src;
 37
 38	if (dst == src)
 39		s = memcpy(buf, src, sizeof(buf));
 40	serpent_dec_blk_xway(ctx, dst, src);
 41	crypto_xor(dst + SERPENT_BLOCK_SIZE, s, sizeof(buf));
 
 
 
 
 
 42}
 43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44static int ecb_encrypt(struct skcipher_request *req)
 45{
 46	ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
 47	ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_enc_blk_xway);
 48	ECB_BLOCK(1, __serpent_encrypt);
 49	ECB_WALK_END();
 50}
 51
 52static int ecb_decrypt(struct skcipher_request *req)
 53{
 54	ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
 55	ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_dec_blk_xway);
 56	ECB_BLOCK(1, __serpent_decrypt);
 57	ECB_WALK_END();
 58}
 59
 60static int cbc_encrypt(struct skcipher_request *req)
 61{
 62	CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
 63	CBC_ENC_BLOCK(__serpent_encrypt);
 64	CBC_WALK_END();
 65}
 66
 67static int cbc_decrypt(struct skcipher_request *req)
 68{
 69	CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
 70	CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_decrypt_cbc_xway);
 71	CBC_DEC_BLOCK(1, __serpent_decrypt);
 72	CBC_WALK_END();
 
 
 73}
 74
 75static struct skcipher_alg serpent_algs[] = {
 76	{
 77		.base.cra_name		= "__ecb(serpent)",
 78		.base.cra_driver_name	= "__ecb-serpent-sse2",
 79		.base.cra_priority	= 400,
 80		.base.cra_flags		= CRYPTO_ALG_INTERNAL,
 81		.base.cra_blocksize	= SERPENT_BLOCK_SIZE,
 82		.base.cra_ctxsize	= sizeof(struct serpent_ctx),
 83		.base.cra_module	= THIS_MODULE,
 84		.min_keysize		= SERPENT_MIN_KEY_SIZE,
 85		.max_keysize		= SERPENT_MAX_KEY_SIZE,
 86		.setkey			= serpent_setkey_skcipher,
 87		.encrypt		= ecb_encrypt,
 88		.decrypt		= ecb_decrypt,
 89	}, {
 90		.base.cra_name		= "__cbc(serpent)",
 91		.base.cra_driver_name	= "__cbc-serpent-sse2",
 92		.base.cra_priority	= 400,
 93		.base.cra_flags		= CRYPTO_ALG_INTERNAL,
 94		.base.cra_blocksize	= SERPENT_BLOCK_SIZE,
 95		.base.cra_ctxsize	= sizeof(struct serpent_ctx),
 96		.base.cra_module	= THIS_MODULE,
 97		.min_keysize		= SERPENT_MIN_KEY_SIZE,
 98		.max_keysize		= SERPENT_MAX_KEY_SIZE,
 99		.ivsize			= SERPENT_BLOCK_SIZE,
100		.setkey			= serpent_setkey_skcipher,
101		.encrypt		= cbc_encrypt,
102		.decrypt		= cbc_decrypt,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103	},
104};
105
106static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
107
108static int __init serpent_sse2_init(void)
109{
110	if (!boot_cpu_has(X86_FEATURE_XMM2)) {
111		printk(KERN_INFO "SSE2 instructions are not detected.\n");
112		return -ENODEV;
113	}
114
115	return simd_register_skciphers_compat(serpent_algs,
116					      ARRAY_SIZE(serpent_algs),
117					      serpent_simd_algs);
118}
119
120static void __exit serpent_sse2_exit(void)
121{
122	simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
123				  serpent_simd_algs);
124}
125
126module_init(serpent_sse2_init);
127module_exit(serpent_sse2_exit);
128
129MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
130MODULE_LICENSE("GPL");
131MODULE_ALIAS_CRYPTO("serpent");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Glue Code for SSE2 assembler versions of Serpent Cipher
  4 *
  5 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  6 *
  7 * Glue code based on aesni-intel_glue.c by:
  8 *  Copyright (C) 2008, Intel Corp.
  9 *    Author: Huang Ying <ying.huang@intel.com>
 10 *
 11 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
 12 *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 13 * CTR part based on code (crypto/ctr.c) by:
 14 *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
 15 */
 16
 17#include <linux/module.h>
 18#include <linux/types.h>
 19#include <linux/crypto.h>
 20#include <linux/err.h>
 21#include <crypto/algapi.h>
 22#include <crypto/b128ops.h>
 23#include <crypto/internal/simd.h>
 24#include <crypto/serpent.h>
 25#include <asm/crypto/serpent-sse2.h>
 26#include <asm/crypto/glue_helper.h>
 
 27
 28static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
 29				   const u8 *key, unsigned int keylen)
 30{
 31	return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
 32}
 33
 34static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
 35{
 36	u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
 37	unsigned int j;
 38
 39	for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
 40		ivs[j] = src[j];
 41
 42	serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
 43
 44	for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
 45		u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
 46}
 47
 48static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 49{
 50	be128 ctrblk;
 51
 52	le128_to_be128(&ctrblk, iv);
 53	le128_inc(iv);
 54
 55	__serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
 56	u128_xor(dst, src, (u128 *)&ctrblk);
 57}
 58
 59static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
 60				   le128 *iv)
 61{
 62	be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
 63	unsigned int i;
 64
 65	for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
 66		if (dst != src)
 67			dst[i] = src[i];
 68
 69		le128_to_be128(&ctrblks[i], iv);
 70		le128_inc(iv);
 71	}
 72
 73	serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
 74}
 75
 76static const struct common_glue_ctx serpent_enc = {
 77	.num_funcs = 2,
 78	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
 79
 80	.funcs = { {
 81		.num_blocks = SERPENT_PARALLEL_BLOCKS,
 82		.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
 83	}, {
 84		.num_blocks = 1,
 85		.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
 86	} }
 87};
 88
 89static const struct common_glue_ctx serpent_ctr = {
 90	.num_funcs = 2,
 91	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
 92
 93	.funcs = { {
 94		.num_blocks = SERPENT_PARALLEL_BLOCKS,
 95		.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
 96	}, {
 97		.num_blocks = 1,
 98		.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
 99	} }
100};
101
102static const struct common_glue_ctx serpent_dec = {
103	.num_funcs = 2,
104	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
105
106	.funcs = { {
107		.num_blocks = SERPENT_PARALLEL_BLOCKS,
108		.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
109	}, {
110		.num_blocks = 1,
111		.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
112	} }
113};
114
115static const struct common_glue_ctx serpent_dec_cbc = {
116	.num_funcs = 2,
117	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
118
119	.funcs = { {
120		.num_blocks = SERPENT_PARALLEL_BLOCKS,
121		.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
122	}, {
123		.num_blocks = 1,
124		.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
125	} }
126};
127
128static int ecb_encrypt(struct skcipher_request *req)
129{
130	return glue_ecb_req_128bit(&serpent_enc, req);
 
 
 
131}
132
133static int ecb_decrypt(struct skcipher_request *req)
134{
135	return glue_ecb_req_128bit(&serpent_dec, req);
 
 
 
136}
137
138static int cbc_encrypt(struct skcipher_request *req)
139{
140	return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
141					   req);
 
142}
143
144static int cbc_decrypt(struct skcipher_request *req)
145{
146	return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
147}
148
149static int ctr_crypt(struct skcipher_request *req)
150{
151	return glue_ctr_req_128bit(&serpent_ctr, req);
152}
153
154static struct skcipher_alg serpent_algs[] = {
155	{
156		.base.cra_name		= "__ecb(serpent)",
157		.base.cra_driver_name	= "__ecb-serpent-sse2",
158		.base.cra_priority	= 400,
159		.base.cra_flags		= CRYPTO_ALG_INTERNAL,
160		.base.cra_blocksize	= SERPENT_BLOCK_SIZE,
161		.base.cra_ctxsize	= sizeof(struct serpent_ctx),
162		.base.cra_module	= THIS_MODULE,
163		.min_keysize		= SERPENT_MIN_KEY_SIZE,
164		.max_keysize		= SERPENT_MAX_KEY_SIZE,
165		.setkey			= serpent_setkey_skcipher,
166		.encrypt		= ecb_encrypt,
167		.decrypt		= ecb_decrypt,
168	}, {
169		.base.cra_name		= "__cbc(serpent)",
170		.base.cra_driver_name	= "__cbc-serpent-sse2",
171		.base.cra_priority	= 400,
172		.base.cra_flags		= CRYPTO_ALG_INTERNAL,
173		.base.cra_blocksize	= SERPENT_BLOCK_SIZE,
174		.base.cra_ctxsize	= sizeof(struct serpent_ctx),
175		.base.cra_module	= THIS_MODULE,
176		.min_keysize		= SERPENT_MIN_KEY_SIZE,
177		.max_keysize		= SERPENT_MAX_KEY_SIZE,
178		.ivsize			= SERPENT_BLOCK_SIZE,
179		.setkey			= serpent_setkey_skcipher,
180		.encrypt		= cbc_encrypt,
181		.decrypt		= cbc_decrypt,
182	}, {
183		.base.cra_name		= "__ctr(serpent)",
184		.base.cra_driver_name	= "__ctr-serpent-sse2",
185		.base.cra_priority	= 400,
186		.base.cra_flags		= CRYPTO_ALG_INTERNAL,
187		.base.cra_blocksize	= 1,
188		.base.cra_ctxsize	= sizeof(struct serpent_ctx),
189		.base.cra_module	= THIS_MODULE,
190		.min_keysize		= SERPENT_MIN_KEY_SIZE,
191		.max_keysize		= SERPENT_MAX_KEY_SIZE,
192		.ivsize			= SERPENT_BLOCK_SIZE,
193		.chunksize		= SERPENT_BLOCK_SIZE,
194		.setkey			= serpent_setkey_skcipher,
195		.encrypt		= ctr_crypt,
196		.decrypt		= ctr_crypt,
197	},
198};
199
200static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
201
202static int __init serpent_sse2_init(void)
203{
204	if (!boot_cpu_has(X86_FEATURE_XMM2)) {
205		printk(KERN_INFO "SSE2 instructions are not detected.\n");
206		return -ENODEV;
207	}
208
209	return simd_register_skciphers_compat(serpent_algs,
210					      ARRAY_SIZE(serpent_algs),
211					      serpent_simd_algs);
212}
213
214static void __exit serpent_sse2_exit(void)
215{
216	simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
217				  serpent_simd_algs);
218}
219
220module_init(serpent_sse2_init);
221module_exit(serpent_sse2_exit);
222
223MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
224MODULE_LICENSE("GPL");
225MODULE_ALIAS_CRYPTO("serpent");