Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Glue Code for SSE2 assembler versions of Serpent Cipher
4 *
5 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
6 *
7 * Glue code based on aesni-intel_glue.c by:
8 * Copyright (C) 2008, Intel Corp.
9 * Author: Huang Ying <ying.huang@intel.com>
10 *
11 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
12 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/crypto.h>
18#include <linux/err.h>
19#include <crypto/algapi.h>
20#include <crypto/b128ops.h>
21#include <crypto/internal/simd.h>
22#include <crypto/serpent.h>
23
24#include "serpent-sse2.h"
25#include "ecb_cbc_helpers.h"
26
27static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
28 const u8 *key, unsigned int keylen)
29{
30 return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
31}
32
33static void serpent_decrypt_cbc_xway(const void *ctx, u8 *dst, const u8 *src)
34{
35 u8 buf[SERPENT_PARALLEL_BLOCKS - 1][SERPENT_BLOCK_SIZE];
36 const u8 *s = src;
37
38 if (dst == src)
39 s = memcpy(buf, src, sizeof(buf));
40 serpent_dec_blk_xway(ctx, dst, src);
41 crypto_xor(dst + SERPENT_BLOCK_SIZE, s, sizeof(buf));
42}
43
44static int ecb_encrypt(struct skcipher_request *req)
45{
46 ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
47 ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_enc_blk_xway);
48 ECB_BLOCK(1, __serpent_encrypt);
49 ECB_WALK_END();
50}
51
52static int ecb_decrypt(struct skcipher_request *req)
53{
54 ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
55 ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_dec_blk_xway);
56 ECB_BLOCK(1, __serpent_decrypt);
57 ECB_WALK_END();
58}
59
60static int cbc_encrypt(struct skcipher_request *req)
61{
62 CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
63 CBC_ENC_BLOCK(__serpent_encrypt);
64 CBC_WALK_END();
65}
66
67static int cbc_decrypt(struct skcipher_request *req)
68{
69 CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
70 CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_decrypt_cbc_xway);
71 CBC_DEC_BLOCK(1, __serpent_decrypt);
72 CBC_WALK_END();
73}
74
75static struct skcipher_alg serpent_algs[] = {
76 {
77 .base.cra_name = "__ecb(serpent)",
78 .base.cra_driver_name = "__ecb-serpent-sse2",
79 .base.cra_priority = 400,
80 .base.cra_flags = CRYPTO_ALG_INTERNAL,
81 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
82 .base.cra_ctxsize = sizeof(struct serpent_ctx),
83 .base.cra_module = THIS_MODULE,
84 .min_keysize = SERPENT_MIN_KEY_SIZE,
85 .max_keysize = SERPENT_MAX_KEY_SIZE,
86 .setkey = serpent_setkey_skcipher,
87 .encrypt = ecb_encrypt,
88 .decrypt = ecb_decrypt,
89 }, {
90 .base.cra_name = "__cbc(serpent)",
91 .base.cra_driver_name = "__cbc-serpent-sse2",
92 .base.cra_priority = 400,
93 .base.cra_flags = CRYPTO_ALG_INTERNAL,
94 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
95 .base.cra_ctxsize = sizeof(struct serpent_ctx),
96 .base.cra_module = THIS_MODULE,
97 .min_keysize = SERPENT_MIN_KEY_SIZE,
98 .max_keysize = SERPENT_MAX_KEY_SIZE,
99 .ivsize = SERPENT_BLOCK_SIZE,
100 .setkey = serpent_setkey_skcipher,
101 .encrypt = cbc_encrypt,
102 .decrypt = cbc_decrypt,
103 },
104};
105
106static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
107
108static int __init serpent_sse2_init(void)
109{
110 if (!boot_cpu_has(X86_FEATURE_XMM2)) {
111 printk(KERN_INFO "SSE2 instructions are not detected.\n");
112 return -ENODEV;
113 }
114
115 return simd_register_skciphers_compat(serpent_algs,
116 ARRAY_SIZE(serpent_algs),
117 serpent_simd_algs);
118}
119
120static void __exit serpent_sse2_exit(void)
121{
122 simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
123 serpent_simd_algs);
124}
125
126module_init(serpent_sse2_init);
127module_exit(serpent_sse2_exit);
128
129MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
130MODULE_LICENSE("GPL");
131MODULE_ALIAS_CRYPTO("serpent");
1/*
2 * Glue Code for SSE2 assembler versions of Serpent Cipher
3 *
4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5 *
6 * Glue code based on aesni-intel_glue.c by:
7 * Copyright (C) 2008, Intel Corp.
8 * Author: Huang Ying <ying.huang@intel.com>
9 *
10 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
11 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
12 * CTR part based on code (crypto/ctr.c) by:
13 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 * USA
29 *
30 */
31
32#include <linux/module.h>
33#include <linux/types.h>
34#include <linux/crypto.h>
35#include <linux/err.h>
36#include <crypto/algapi.h>
37#include <crypto/b128ops.h>
38#include <crypto/internal/simd.h>
39#include <crypto/serpent.h>
40#include <asm/crypto/serpent-sse2.h>
41#include <asm/crypto/glue_helper.h>
42
43static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
44 const u8 *key, unsigned int keylen)
45{
46 return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
47}
48
49static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
50{
51 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
52 unsigned int j;
53
54 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
55 ivs[j] = src[j];
56
57 serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
58
59 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
60 u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
61}
62
63static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
64{
65 be128 ctrblk;
66
67 le128_to_be128(&ctrblk, iv);
68 le128_inc(iv);
69
70 __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
71 u128_xor(dst, src, (u128 *)&ctrblk);
72}
73
74static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
75 le128 *iv)
76{
77 be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
78 unsigned int i;
79
80 for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
81 if (dst != src)
82 dst[i] = src[i];
83
84 le128_to_be128(&ctrblks[i], iv);
85 le128_inc(iv);
86 }
87
88 serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
89}
90
91static const struct common_glue_ctx serpent_enc = {
92 .num_funcs = 2,
93 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
94
95 .funcs = { {
96 .num_blocks = SERPENT_PARALLEL_BLOCKS,
97 .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
98 }, {
99 .num_blocks = 1,
100 .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
101 } }
102};
103
104static const struct common_glue_ctx serpent_ctr = {
105 .num_funcs = 2,
106 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
107
108 .funcs = { {
109 .num_blocks = SERPENT_PARALLEL_BLOCKS,
110 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
111 }, {
112 .num_blocks = 1,
113 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
114 } }
115};
116
117static const struct common_glue_ctx serpent_dec = {
118 .num_funcs = 2,
119 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
120
121 .funcs = { {
122 .num_blocks = SERPENT_PARALLEL_BLOCKS,
123 .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
124 }, {
125 .num_blocks = 1,
126 .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
127 } }
128};
129
130static const struct common_glue_ctx serpent_dec_cbc = {
131 .num_funcs = 2,
132 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
133
134 .funcs = { {
135 .num_blocks = SERPENT_PARALLEL_BLOCKS,
136 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
137 }, {
138 .num_blocks = 1,
139 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
140 } }
141};
142
143static int ecb_encrypt(struct skcipher_request *req)
144{
145 return glue_ecb_req_128bit(&serpent_enc, req);
146}
147
148static int ecb_decrypt(struct skcipher_request *req)
149{
150 return glue_ecb_req_128bit(&serpent_dec, req);
151}
152
153static int cbc_encrypt(struct skcipher_request *req)
154{
155 return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
156 req);
157}
158
159static int cbc_decrypt(struct skcipher_request *req)
160{
161 return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
162}
163
164static int ctr_crypt(struct skcipher_request *req)
165{
166 return glue_ctr_req_128bit(&serpent_ctr, req);
167}
168
169static struct skcipher_alg serpent_algs[] = {
170 {
171 .base.cra_name = "__ecb(serpent)",
172 .base.cra_driver_name = "__ecb-serpent-sse2",
173 .base.cra_priority = 400,
174 .base.cra_flags = CRYPTO_ALG_INTERNAL,
175 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
176 .base.cra_ctxsize = sizeof(struct serpent_ctx),
177 .base.cra_module = THIS_MODULE,
178 .min_keysize = SERPENT_MIN_KEY_SIZE,
179 .max_keysize = SERPENT_MAX_KEY_SIZE,
180 .setkey = serpent_setkey_skcipher,
181 .encrypt = ecb_encrypt,
182 .decrypt = ecb_decrypt,
183 }, {
184 .base.cra_name = "__cbc(serpent)",
185 .base.cra_driver_name = "__cbc-serpent-sse2",
186 .base.cra_priority = 400,
187 .base.cra_flags = CRYPTO_ALG_INTERNAL,
188 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
189 .base.cra_ctxsize = sizeof(struct serpent_ctx),
190 .base.cra_module = THIS_MODULE,
191 .min_keysize = SERPENT_MIN_KEY_SIZE,
192 .max_keysize = SERPENT_MAX_KEY_SIZE,
193 .ivsize = SERPENT_BLOCK_SIZE,
194 .setkey = serpent_setkey_skcipher,
195 .encrypt = cbc_encrypt,
196 .decrypt = cbc_decrypt,
197 }, {
198 .base.cra_name = "__ctr(serpent)",
199 .base.cra_driver_name = "__ctr-serpent-sse2",
200 .base.cra_priority = 400,
201 .base.cra_flags = CRYPTO_ALG_INTERNAL,
202 .base.cra_blocksize = 1,
203 .base.cra_ctxsize = sizeof(struct serpent_ctx),
204 .base.cra_module = THIS_MODULE,
205 .min_keysize = SERPENT_MIN_KEY_SIZE,
206 .max_keysize = SERPENT_MAX_KEY_SIZE,
207 .ivsize = SERPENT_BLOCK_SIZE,
208 .chunksize = SERPENT_BLOCK_SIZE,
209 .setkey = serpent_setkey_skcipher,
210 .encrypt = ctr_crypt,
211 .decrypt = ctr_crypt,
212 },
213};
214
215static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
216
217static int __init serpent_sse2_init(void)
218{
219 if (!boot_cpu_has(X86_FEATURE_XMM2)) {
220 printk(KERN_INFO "SSE2 instructions are not detected.\n");
221 return -ENODEV;
222 }
223
224 return simd_register_skciphers_compat(serpent_algs,
225 ARRAY_SIZE(serpent_algs),
226 serpent_simd_algs);
227}
228
229static void __exit serpent_sse2_exit(void)
230{
231 simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
232 serpent_simd_algs);
233}
234
235module_init(serpent_sse2_init);
236module_exit(serpent_sse2_exit);
237
238MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
239MODULE_LICENSE("GPL");
240MODULE_ALIAS_CRYPTO("serpent");