Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linear symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers.
6 *
7 * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
8 */
9
10#include <linux/cryptouser.h>
11#include <linux/err.h>
12#include <linux/export.h>
13#include <linux/kernel.h>
14#include <linux/seq_file.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <net/netlink.h>
18#include "skcipher.h"
19
20static inline struct crypto_lskcipher *__crypto_lskcipher_cast(
21 struct crypto_tfm *tfm)
22{
23 return container_of(tfm, struct crypto_lskcipher, base);
24}
25
26static inline struct lskcipher_alg *__crypto_lskcipher_alg(
27 struct crypto_alg *alg)
28{
29 return container_of(alg, struct lskcipher_alg, co.base);
30}
31
32static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
33 const u8 *key, unsigned int keylen)
34{
35 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
36 struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
37 u8 *buffer, *alignbuffer;
38 unsigned long absize;
39 int ret;
40
41 absize = keylen + alignmask;
42 buffer = kmalloc(absize, GFP_ATOMIC);
43 if (!buffer)
44 return -ENOMEM;
45
46 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
47 memcpy(alignbuffer, key, keylen);
48 ret = cipher->setkey(tfm, alignbuffer, keylen);
49 kfree_sensitive(buffer);
50 return ret;
51}
52
53int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key,
54 unsigned int keylen)
55{
56 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
57 struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
58
59 if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize)
60 return -EINVAL;
61
62 if ((unsigned long)key & alignmask)
63 return lskcipher_setkey_unaligned(tfm, key, keylen);
64 else
65 return cipher->setkey(tfm, key, keylen);
66}
67EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
68
69static int crypto_lskcipher_crypt_unaligned(
70 struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
71 u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
72 u8 *dst, unsigned len, u8 *iv, u32 flags))
73{
74 unsigned statesize = crypto_lskcipher_statesize(tfm);
75 unsigned ivsize = crypto_lskcipher_ivsize(tfm);
76 unsigned bs = crypto_lskcipher_blocksize(tfm);
77 unsigned cs = crypto_lskcipher_chunksize(tfm);
78 int err;
79 u8 *tiv;
80 u8 *p;
81
82 BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE ||
83 MAX_CIPHER_ALIGNMASK >= PAGE_SIZE);
84
85 tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC);
86 if (!tiv)
87 return -ENOMEM;
88
89 memcpy(tiv, iv, ivsize + statesize);
90
91 p = kmalloc(PAGE_SIZE, GFP_ATOMIC);
92 err = -ENOMEM;
93 if (!p)
94 goto out;
95
96 while (len >= bs) {
97 unsigned chunk = min((unsigned)PAGE_SIZE, len);
98 int err;
99
100 if (chunk > cs)
101 chunk &= ~(cs - 1);
102
103 memcpy(p, src, chunk);
104 err = crypt(tfm, p, p, chunk, tiv, CRYPTO_LSKCIPHER_FLAG_FINAL);
105 if (err)
106 goto out;
107
108 memcpy(dst, p, chunk);
109 src += chunk;
110 dst += chunk;
111 len -= chunk;
112 }
113
114 err = len ? -EINVAL : 0;
115
116out:
117 memcpy(iv, tiv, ivsize + statesize);
118 kfree_sensitive(p);
119 kfree_sensitive(tiv);
120 return err;
121}
122
123static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
124 u8 *dst, unsigned len, u8 *iv,
125 int (*crypt)(struct crypto_lskcipher *tfm,
126 const u8 *src, u8 *dst,
127 unsigned len, u8 *iv,
128 u32 flags))
129{
130 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
131
132 if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
133 alignmask)
134 return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
135 crypt);
136
137 return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
138}
139
140int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
141 u8 *dst, unsigned len, u8 *iv)
142{
143 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
144
145 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
146}
147EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
148
149int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
150 u8 *dst, unsigned len, u8 *iv)
151{
152 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
153
154 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
155}
156EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
157
158static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
159 int (*crypt)(struct crypto_lskcipher *tfm,
160 const u8 *src, u8 *dst,
161 unsigned len, u8 *ivs,
162 u32 flags))
163{
164 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
165 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
166 u8 *ivs = skcipher_request_ctx(req);
167 struct crypto_lskcipher *tfm = *ctx;
168 struct skcipher_walk walk;
169 unsigned ivsize;
170 u32 flags;
171 int err;
172
173 ivsize = crypto_lskcipher_ivsize(tfm);
174 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(skcipher) + 1);
175 memcpy(ivs, req->iv, ivsize);
176
177 flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
178
179 if (req->base.flags & CRYPTO_SKCIPHER_REQ_CONT)
180 flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
181
182 if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
183 flags |= CRYPTO_LSKCIPHER_FLAG_FINAL;
184
185 err = skcipher_walk_virt(&walk, req, false);
186
187 while (walk.nbytes) {
188 err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr,
189 walk.nbytes, ivs,
190 flags & ~(walk.nbytes == walk.total ?
191 0 : CRYPTO_LSKCIPHER_FLAG_FINAL));
192 err = skcipher_walk_done(&walk, err);
193 flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
194 }
195
196 memcpy(req->iv, ivs, ivsize);
197
198 return err;
199}
200
201int crypto_lskcipher_encrypt_sg(struct skcipher_request *req)
202{
203 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
204 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
205 struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
206
207 return crypto_lskcipher_crypt_sg(req, alg->encrypt);
208}
209
210int crypto_lskcipher_decrypt_sg(struct skcipher_request *req)
211{
212 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
213 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
214 struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
215
216 return crypto_lskcipher_crypt_sg(req, alg->decrypt);
217}
218
219static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm)
220{
221 struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
222 struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
223
224 alg->exit(skcipher);
225}
226
227static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm)
228{
229 struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
230 struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
231
232 if (alg->exit)
233 skcipher->base.exit = crypto_lskcipher_exit_tfm;
234
235 if (alg->init)
236 return alg->init(skcipher);
237
238 return 0;
239}
240
241static void crypto_lskcipher_free_instance(struct crypto_instance *inst)
242{
243 struct lskcipher_instance *skcipher =
244 container_of(inst, struct lskcipher_instance, s.base);
245
246 skcipher->free(skcipher);
247}
248
249static void __maybe_unused crypto_lskcipher_show(
250 struct seq_file *m, struct crypto_alg *alg)
251{
252 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
253
254 seq_printf(m, "type : lskcipher\n");
255 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
256 seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize);
257 seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize);
258 seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize);
259 seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize);
260 seq_printf(m, "statesize : %u\n", skcipher->co.statesize);
261}
262
263static int __maybe_unused crypto_lskcipher_report(
264 struct sk_buff *skb, struct crypto_alg *alg)
265{
266 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
267 struct crypto_report_blkcipher rblkcipher;
268
269 memset(&rblkcipher, 0, sizeof(rblkcipher));
270
271 strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type));
272 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
273
274 rblkcipher.blocksize = alg->cra_blocksize;
275 rblkcipher.min_keysize = skcipher->co.min_keysize;
276 rblkcipher.max_keysize = skcipher->co.max_keysize;
277 rblkcipher.ivsize = skcipher->co.ivsize;
278
279 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
280 sizeof(rblkcipher), &rblkcipher);
281}
282
283static const struct crypto_type crypto_lskcipher_type = {
284 .extsize = crypto_alg_extsize,
285 .init_tfm = crypto_lskcipher_init_tfm,
286 .free = crypto_lskcipher_free_instance,
287#ifdef CONFIG_PROC_FS
288 .show = crypto_lskcipher_show,
289#endif
290#if IS_ENABLED(CONFIG_CRYPTO_USER)
291 .report = crypto_lskcipher_report,
292#endif
293 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
294 .maskset = CRYPTO_ALG_TYPE_MASK,
295 .type = CRYPTO_ALG_TYPE_LSKCIPHER,
296 .tfmsize = offsetof(struct crypto_lskcipher, base),
297};
298
299static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
300{
301 struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
302
303 crypto_free_lskcipher(*ctx);
304}
305
306int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm)
307{
308 struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
309 struct crypto_alg *calg = tfm->__crt_alg;
310 struct crypto_lskcipher *skcipher;
311
312 if (!crypto_mod_get(calg))
313 return -EAGAIN;
314
315 skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type);
316 if (IS_ERR(skcipher)) {
317 crypto_mod_put(calg);
318 return PTR_ERR(skcipher);
319 }
320
321 *ctx = skcipher;
322 tfm->exit = crypto_lskcipher_exit_tfm_sg;
323
324 return 0;
325}
326
327int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
328 struct crypto_instance *inst,
329 const char *name, u32 type, u32 mask)
330{
331 spawn->base.frontend = &crypto_lskcipher_type;
332 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
333}
334EXPORT_SYMBOL_GPL(crypto_grab_lskcipher);
335
336struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
337 u32 type, u32 mask)
338{
339 return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask);
340}
341EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher);
342
343static int lskcipher_prepare_alg(struct lskcipher_alg *alg)
344{
345 struct crypto_alg *base = &alg->co.base;
346 int err;
347
348 err = skcipher_prepare_alg_common(&alg->co);
349 if (err)
350 return err;
351
352 if (alg->co.chunksize & (alg->co.chunksize - 1))
353 return -EINVAL;
354
355 base->cra_type = &crypto_lskcipher_type;
356 base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER;
357
358 return 0;
359}
360
361int crypto_register_lskcipher(struct lskcipher_alg *alg)
362{
363 struct crypto_alg *base = &alg->co.base;
364 int err;
365
366 err = lskcipher_prepare_alg(alg);
367 if (err)
368 return err;
369
370 return crypto_register_alg(base);
371}
372EXPORT_SYMBOL_GPL(crypto_register_lskcipher);
373
374void crypto_unregister_lskcipher(struct lskcipher_alg *alg)
375{
376 crypto_unregister_alg(&alg->co.base);
377}
378EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher);
379
380int crypto_register_lskciphers(struct lskcipher_alg *algs, int count)
381{
382 int i, ret;
383
384 for (i = 0; i < count; i++) {
385 ret = crypto_register_lskcipher(&algs[i]);
386 if (ret)
387 goto err;
388 }
389
390 return 0;
391
392err:
393 for (--i; i >= 0; --i)
394 crypto_unregister_lskcipher(&algs[i]);
395
396 return ret;
397}
398EXPORT_SYMBOL_GPL(crypto_register_lskciphers);
399
400void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count)
401{
402 int i;
403
404 for (i = count - 1; i >= 0; --i)
405 crypto_unregister_lskcipher(&algs[i]);
406}
407EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers);
408
409int lskcipher_register_instance(struct crypto_template *tmpl,
410 struct lskcipher_instance *inst)
411{
412 int err;
413
414 if (WARN_ON(!inst->free))
415 return -EINVAL;
416
417 err = lskcipher_prepare_alg(&inst->alg);
418 if (err)
419 return err;
420
421 return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst));
422}
423EXPORT_SYMBOL_GPL(lskcipher_register_instance);
424
425static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key,
426 unsigned int keylen)
427{
428 struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm);
429
430 crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
431 crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) &
432 CRYPTO_TFM_REQ_MASK);
433 return crypto_lskcipher_setkey(cipher, key, keylen);
434}
435
436static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm)
437{
438 struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
439 struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
440 struct crypto_lskcipher_spawn *spawn;
441 struct crypto_lskcipher *cipher;
442
443 spawn = lskcipher_instance_ctx(inst);
444 cipher = crypto_spawn_lskcipher(spawn);
445 if (IS_ERR(cipher))
446 return PTR_ERR(cipher);
447
448 *ctx = cipher;
449 return 0;
450}
451
452static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm)
453{
454 struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
455
456 crypto_free_lskcipher(*ctx);
457}
458
459static void lskcipher_free_instance_simple(struct lskcipher_instance *inst)
460{
461 crypto_drop_lskcipher(lskcipher_instance_ctx(inst));
462 kfree(inst);
463}
464
465/**
466 * lskcipher_alloc_instance_simple - allocate instance of simple block cipher
467 *
468 * Allocate an lskcipher_instance for a simple block cipher mode of operation,
469 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
470 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
471 * alignmask, and priority are set from the underlying cipher but can be
472 * overridden if needed. The tfm context defaults to
473 * struct crypto_lskcipher *, and default ->setkey(), ->init(), and
474 * ->exit() methods are installed.
475 *
476 * @tmpl: the template being instantiated
477 * @tb: the template parameters
478 *
479 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
480 * needs to register the instance.
481 */
482struct lskcipher_instance *lskcipher_alloc_instance_simple(
483 struct crypto_template *tmpl, struct rtattr **tb)
484{
485 u32 mask;
486 struct lskcipher_instance *inst;
487 struct crypto_lskcipher_spawn *spawn;
488 char ecb_name[CRYPTO_MAX_ALG_NAME];
489 struct lskcipher_alg *cipher_alg;
490 const char *cipher_name;
491 int err;
492
493 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
494 if (err)
495 return ERR_PTR(err);
496
497 cipher_name = crypto_attr_alg_name(tb[1]);
498 if (IS_ERR(cipher_name))
499 return ERR_CAST(cipher_name);
500
501 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
502 if (!inst)
503 return ERR_PTR(-ENOMEM);
504
505 spawn = lskcipher_instance_ctx(inst);
506 err = crypto_grab_lskcipher(spawn,
507 lskcipher_crypto_instance(inst),
508 cipher_name, 0, mask);
509
510 ecb_name[0] = 0;
511 if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) {
512 err = -ENAMETOOLONG;
513 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
514 cipher_name) >= CRYPTO_MAX_ALG_NAME)
515 goto err_free_inst;
516
517 err = crypto_grab_lskcipher(spawn,
518 lskcipher_crypto_instance(inst),
519 ecb_name, 0, mask);
520 }
521
522 if (err)
523 goto err_free_inst;
524
525 cipher_alg = crypto_lskcipher_spawn_alg(spawn);
526
527 err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name,
528 &cipher_alg->co.base);
529 if (err)
530 goto err_free_inst;
531
532 if (ecb_name[0]) {
533 int len;
534
535 err = -EINVAL;
536 len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4],
537 sizeof(ecb_name));
538 if (len < 2)
539 goto err_free_inst;
540
541 if (ecb_name[len - 1] != ')')
542 goto err_free_inst;
543
544 ecb_name[len - 1] = 0;
545
546 err = -ENAMETOOLONG;
547 if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME,
548 "%s(%s)", tmpl->name, ecb_name) >=
549 CRYPTO_MAX_ALG_NAME)
550 goto err_free_inst;
551
552 if (strcmp(ecb_name, cipher_name) &&
553 snprintf(inst->alg.co.base.cra_driver_name,
554 CRYPTO_MAX_ALG_NAME,
555 "%s(%s)", tmpl->name, cipher_name) >=
556 CRYPTO_MAX_ALG_NAME)
557 goto err_free_inst;
558 } else {
559 /* Don't allow nesting. */
560 err = -ELOOP;
561 if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE))
562 goto err_free_inst;
563 }
564
565 err = -EINVAL;
566 if (cipher_alg->co.ivsize)
567 goto err_free_inst;
568
569 inst->free = lskcipher_free_instance_simple;
570
571 /* Default algorithm properties, can be overridden */
572 inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize;
573 inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask;
574 inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority;
575 inst->alg.co.min_keysize = cipher_alg->co.min_keysize;
576 inst->alg.co.max_keysize = cipher_alg->co.max_keysize;
577 inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize;
578 inst->alg.co.statesize = cipher_alg->co.statesize;
579
580 /* Use struct crypto_lskcipher * by default, can be overridden */
581 inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *);
582 inst->alg.setkey = lskcipher_setkey_simple;
583 inst->alg.init = lskcipher_init_tfm_simple;
584 inst->alg.exit = lskcipher_exit_tfm_simple;
585
586 return inst;
587
588err_free_inst:
589 lskcipher_free_instance_simple(inst);
590 return ERR_PTR(err);
591}
592EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linear symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers.
6 *
7 * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
8 */
9
10#include <linux/cryptouser.h>
11#include <linux/err.h>
12#include <linux/export.h>
13#include <linux/kernel.h>
14#include <linux/seq_file.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <net/netlink.h>
18#include "skcipher.h"
19
20static inline struct crypto_lskcipher *__crypto_lskcipher_cast(
21 struct crypto_tfm *tfm)
22{
23 return container_of(tfm, struct crypto_lskcipher, base);
24}
25
26static inline struct lskcipher_alg *__crypto_lskcipher_alg(
27 struct crypto_alg *alg)
28{
29 return container_of(alg, struct lskcipher_alg, co.base);
30}
31
32static inline struct crypto_istat_cipher *lskcipher_get_stat(
33 struct lskcipher_alg *alg)
34{
35 return skcipher_get_stat_common(&alg->co);
36}
37
38static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err)
39{
40 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
41
42 if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
43 return err;
44
45 if (err)
46 atomic64_inc(&istat->err_cnt);
47
48 return err;
49}
50
51static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
52 const u8 *key, unsigned int keylen)
53{
54 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
55 struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
56 u8 *buffer, *alignbuffer;
57 unsigned long absize;
58 int ret;
59
60 absize = keylen + alignmask;
61 buffer = kmalloc(absize, GFP_ATOMIC);
62 if (!buffer)
63 return -ENOMEM;
64
65 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
66 memcpy(alignbuffer, key, keylen);
67 ret = cipher->setkey(tfm, alignbuffer, keylen);
68 kfree_sensitive(buffer);
69 return ret;
70}
71
72int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key,
73 unsigned int keylen)
74{
75 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
76 struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
77
78 if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize)
79 return -EINVAL;
80
81 if ((unsigned long)key & alignmask)
82 return lskcipher_setkey_unaligned(tfm, key, keylen);
83 else
84 return cipher->setkey(tfm, key, keylen);
85}
86EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
87
88static int crypto_lskcipher_crypt_unaligned(
89 struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
90 u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
91 u8 *dst, unsigned len, u8 *iv, u32 flags))
92{
93 unsigned statesize = crypto_lskcipher_statesize(tfm);
94 unsigned ivsize = crypto_lskcipher_ivsize(tfm);
95 unsigned bs = crypto_lskcipher_blocksize(tfm);
96 unsigned cs = crypto_lskcipher_chunksize(tfm);
97 int err;
98 u8 *tiv;
99 u8 *p;
100
101 BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE ||
102 MAX_CIPHER_ALIGNMASK >= PAGE_SIZE);
103
104 tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC);
105 if (!tiv)
106 return -ENOMEM;
107
108 memcpy(tiv, iv, ivsize + statesize);
109
110 p = kmalloc(PAGE_SIZE, GFP_ATOMIC);
111 err = -ENOMEM;
112 if (!p)
113 goto out;
114
115 while (len >= bs) {
116 unsigned chunk = min((unsigned)PAGE_SIZE, len);
117 int err;
118
119 if (chunk > cs)
120 chunk &= ~(cs - 1);
121
122 memcpy(p, src, chunk);
123 err = crypt(tfm, p, p, chunk, tiv, CRYPTO_LSKCIPHER_FLAG_FINAL);
124 if (err)
125 goto out;
126
127 memcpy(dst, p, chunk);
128 src += chunk;
129 dst += chunk;
130 len -= chunk;
131 }
132
133 err = len ? -EINVAL : 0;
134
135out:
136 memcpy(iv, tiv, ivsize + statesize);
137 kfree_sensitive(p);
138 kfree_sensitive(tiv);
139 return err;
140}
141
142static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
143 u8 *dst, unsigned len, u8 *iv,
144 int (*crypt)(struct crypto_lskcipher *tfm,
145 const u8 *src, u8 *dst,
146 unsigned len, u8 *iv,
147 u32 flags))
148{
149 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
150 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
151 int ret;
152
153 if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
154 alignmask) {
155 ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
156 crypt);
157 goto out;
158 }
159
160 ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
161
162out:
163 return crypto_lskcipher_errstat(alg, ret);
164}
165
166int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
167 u8 *dst, unsigned len, u8 *iv)
168{
169 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
170
171 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
172 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
173
174 atomic64_inc(&istat->encrypt_cnt);
175 atomic64_add(len, &istat->encrypt_tlen);
176 }
177
178 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
179}
180EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
181
182int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
183 u8 *dst, unsigned len, u8 *iv)
184{
185 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
186
187 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
188 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
189
190 atomic64_inc(&istat->decrypt_cnt);
191 atomic64_add(len, &istat->decrypt_tlen);
192 }
193
194 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
195}
196EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
197
198static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
199 int (*crypt)(struct crypto_lskcipher *tfm,
200 const u8 *src, u8 *dst,
201 unsigned len, u8 *ivs,
202 u32 flags))
203{
204 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
205 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
206 u8 *ivs = skcipher_request_ctx(req);
207 struct crypto_lskcipher *tfm = *ctx;
208 struct skcipher_walk walk;
209 unsigned ivsize;
210 u32 flags;
211 int err;
212
213 ivsize = crypto_lskcipher_ivsize(tfm);
214 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(skcipher) + 1);
215 memcpy(ivs, req->iv, ivsize);
216
217 flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
218
219 if (req->base.flags & CRYPTO_SKCIPHER_REQ_CONT)
220 flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
221
222 if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
223 flags |= CRYPTO_LSKCIPHER_FLAG_FINAL;
224
225 err = skcipher_walk_virt(&walk, req, false);
226
227 while (walk.nbytes) {
228 err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr,
229 walk.nbytes, ivs,
230 flags & ~(walk.nbytes == walk.total ?
231 0 : CRYPTO_LSKCIPHER_FLAG_FINAL));
232 err = skcipher_walk_done(&walk, err);
233 flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
234 }
235
236 memcpy(req->iv, ivs, ivsize);
237
238 return err;
239}
240
241int crypto_lskcipher_encrypt_sg(struct skcipher_request *req)
242{
243 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
244 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
245 struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
246
247 return crypto_lskcipher_crypt_sg(req, alg->encrypt);
248}
249
250int crypto_lskcipher_decrypt_sg(struct skcipher_request *req)
251{
252 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
253 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
254 struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
255
256 return crypto_lskcipher_crypt_sg(req, alg->decrypt);
257}
258
259static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm)
260{
261 struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
262 struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
263
264 alg->exit(skcipher);
265}
266
267static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm)
268{
269 struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
270 struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
271
272 if (alg->exit)
273 skcipher->base.exit = crypto_lskcipher_exit_tfm;
274
275 if (alg->init)
276 return alg->init(skcipher);
277
278 return 0;
279}
280
281static void crypto_lskcipher_free_instance(struct crypto_instance *inst)
282{
283 struct lskcipher_instance *skcipher =
284 container_of(inst, struct lskcipher_instance, s.base);
285
286 skcipher->free(skcipher);
287}
288
289static void __maybe_unused crypto_lskcipher_show(
290 struct seq_file *m, struct crypto_alg *alg)
291{
292 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
293
294 seq_printf(m, "type : lskcipher\n");
295 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
296 seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize);
297 seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize);
298 seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize);
299 seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize);
300 seq_printf(m, "statesize : %u\n", skcipher->co.statesize);
301}
302
303static int __maybe_unused crypto_lskcipher_report(
304 struct sk_buff *skb, struct crypto_alg *alg)
305{
306 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
307 struct crypto_report_blkcipher rblkcipher;
308
309 memset(&rblkcipher, 0, sizeof(rblkcipher));
310
311 strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type));
312 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
313
314 rblkcipher.blocksize = alg->cra_blocksize;
315 rblkcipher.min_keysize = skcipher->co.min_keysize;
316 rblkcipher.max_keysize = skcipher->co.max_keysize;
317 rblkcipher.ivsize = skcipher->co.ivsize;
318
319 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
320 sizeof(rblkcipher), &rblkcipher);
321}
322
323static int __maybe_unused crypto_lskcipher_report_stat(
324 struct sk_buff *skb, struct crypto_alg *alg)
325{
326 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
327 struct crypto_istat_cipher *istat;
328 struct crypto_stat_cipher rcipher;
329
330 istat = lskcipher_get_stat(skcipher);
331
332 memset(&rcipher, 0, sizeof(rcipher));
333
334 strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
335
336 rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
337 rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
338 rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
339 rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
340 rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
341
342 return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
343}
344
345static const struct crypto_type crypto_lskcipher_type = {
346 .extsize = crypto_alg_extsize,
347 .init_tfm = crypto_lskcipher_init_tfm,
348 .free = crypto_lskcipher_free_instance,
349#ifdef CONFIG_PROC_FS
350 .show = crypto_lskcipher_show,
351#endif
352#if IS_ENABLED(CONFIG_CRYPTO_USER)
353 .report = crypto_lskcipher_report,
354#endif
355#ifdef CONFIG_CRYPTO_STATS
356 .report_stat = crypto_lskcipher_report_stat,
357#endif
358 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
359 .maskset = CRYPTO_ALG_TYPE_MASK,
360 .type = CRYPTO_ALG_TYPE_LSKCIPHER,
361 .tfmsize = offsetof(struct crypto_lskcipher, base),
362};
363
364static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
365{
366 struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
367
368 crypto_free_lskcipher(*ctx);
369}
370
371int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm)
372{
373 struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
374 struct crypto_alg *calg = tfm->__crt_alg;
375 struct crypto_lskcipher *skcipher;
376
377 if (!crypto_mod_get(calg))
378 return -EAGAIN;
379
380 skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type);
381 if (IS_ERR(skcipher)) {
382 crypto_mod_put(calg);
383 return PTR_ERR(skcipher);
384 }
385
386 *ctx = skcipher;
387 tfm->exit = crypto_lskcipher_exit_tfm_sg;
388
389 return 0;
390}
391
392int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
393 struct crypto_instance *inst,
394 const char *name, u32 type, u32 mask)
395{
396 spawn->base.frontend = &crypto_lskcipher_type;
397 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
398}
399EXPORT_SYMBOL_GPL(crypto_grab_lskcipher);
400
401struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
402 u32 type, u32 mask)
403{
404 return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask);
405}
406EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher);
407
408static int lskcipher_prepare_alg(struct lskcipher_alg *alg)
409{
410 struct crypto_alg *base = &alg->co.base;
411 int err;
412
413 err = skcipher_prepare_alg_common(&alg->co);
414 if (err)
415 return err;
416
417 if (alg->co.chunksize & (alg->co.chunksize - 1))
418 return -EINVAL;
419
420 base->cra_type = &crypto_lskcipher_type;
421 base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER;
422
423 return 0;
424}
425
426int crypto_register_lskcipher(struct lskcipher_alg *alg)
427{
428 struct crypto_alg *base = &alg->co.base;
429 int err;
430
431 err = lskcipher_prepare_alg(alg);
432 if (err)
433 return err;
434
435 return crypto_register_alg(base);
436}
437EXPORT_SYMBOL_GPL(crypto_register_lskcipher);
438
439void crypto_unregister_lskcipher(struct lskcipher_alg *alg)
440{
441 crypto_unregister_alg(&alg->co.base);
442}
443EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher);
444
445int crypto_register_lskciphers(struct lskcipher_alg *algs, int count)
446{
447 int i, ret;
448
449 for (i = 0; i < count; i++) {
450 ret = crypto_register_lskcipher(&algs[i]);
451 if (ret)
452 goto err;
453 }
454
455 return 0;
456
457err:
458 for (--i; i >= 0; --i)
459 crypto_unregister_lskcipher(&algs[i]);
460
461 return ret;
462}
463EXPORT_SYMBOL_GPL(crypto_register_lskciphers);
464
465void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count)
466{
467 int i;
468
469 for (i = count - 1; i >= 0; --i)
470 crypto_unregister_lskcipher(&algs[i]);
471}
472EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers);
473
474int lskcipher_register_instance(struct crypto_template *tmpl,
475 struct lskcipher_instance *inst)
476{
477 int err;
478
479 if (WARN_ON(!inst->free))
480 return -EINVAL;
481
482 err = lskcipher_prepare_alg(&inst->alg);
483 if (err)
484 return err;
485
486 return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst));
487}
488EXPORT_SYMBOL_GPL(lskcipher_register_instance);
489
490static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key,
491 unsigned int keylen)
492{
493 struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm);
494
495 crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
496 crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) &
497 CRYPTO_TFM_REQ_MASK);
498 return crypto_lskcipher_setkey(cipher, key, keylen);
499}
500
501static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm)
502{
503 struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
504 struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
505 struct crypto_lskcipher_spawn *spawn;
506 struct crypto_lskcipher *cipher;
507
508 spawn = lskcipher_instance_ctx(inst);
509 cipher = crypto_spawn_lskcipher(spawn);
510 if (IS_ERR(cipher))
511 return PTR_ERR(cipher);
512
513 *ctx = cipher;
514 return 0;
515}
516
517static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm)
518{
519 struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
520
521 crypto_free_lskcipher(*ctx);
522}
523
524static void lskcipher_free_instance_simple(struct lskcipher_instance *inst)
525{
526 crypto_drop_lskcipher(lskcipher_instance_ctx(inst));
527 kfree(inst);
528}
529
530/**
531 * lskcipher_alloc_instance_simple - allocate instance of simple block cipher
532 *
533 * Allocate an lskcipher_instance for a simple block cipher mode of operation,
534 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
535 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
536 * alignmask, and priority are set from the underlying cipher but can be
537 * overridden if needed. The tfm context defaults to
538 * struct crypto_lskcipher *, and default ->setkey(), ->init(), and
539 * ->exit() methods are installed.
540 *
541 * @tmpl: the template being instantiated
542 * @tb: the template parameters
543 *
544 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
545 * needs to register the instance.
546 */
547struct lskcipher_instance *lskcipher_alloc_instance_simple(
548 struct crypto_template *tmpl, struct rtattr **tb)
549{
550 u32 mask;
551 struct lskcipher_instance *inst;
552 struct crypto_lskcipher_spawn *spawn;
553 char ecb_name[CRYPTO_MAX_ALG_NAME];
554 struct lskcipher_alg *cipher_alg;
555 const char *cipher_name;
556 int err;
557
558 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
559 if (err)
560 return ERR_PTR(err);
561
562 cipher_name = crypto_attr_alg_name(tb[1]);
563 if (IS_ERR(cipher_name))
564 return ERR_CAST(cipher_name);
565
566 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
567 if (!inst)
568 return ERR_PTR(-ENOMEM);
569
570 spawn = lskcipher_instance_ctx(inst);
571 err = crypto_grab_lskcipher(spawn,
572 lskcipher_crypto_instance(inst),
573 cipher_name, 0, mask);
574
575 ecb_name[0] = 0;
576 if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) {
577 err = -ENAMETOOLONG;
578 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
579 cipher_name) >= CRYPTO_MAX_ALG_NAME)
580 goto err_free_inst;
581
582 err = crypto_grab_lskcipher(spawn,
583 lskcipher_crypto_instance(inst),
584 ecb_name, 0, mask);
585 }
586
587 if (err)
588 goto err_free_inst;
589
590 cipher_alg = crypto_lskcipher_spawn_alg(spawn);
591
592 err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name,
593 &cipher_alg->co.base);
594 if (err)
595 goto err_free_inst;
596
597 if (ecb_name[0]) {
598 int len;
599
600 err = -EINVAL;
601 len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4],
602 sizeof(ecb_name));
603 if (len < 2)
604 goto err_free_inst;
605
606 if (ecb_name[len - 1] != ')')
607 goto err_free_inst;
608
609 ecb_name[len - 1] = 0;
610
611 err = -ENAMETOOLONG;
612 if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME,
613 "%s(%s)", tmpl->name, ecb_name) >=
614 CRYPTO_MAX_ALG_NAME)
615 goto err_free_inst;
616
617 if (strcmp(ecb_name, cipher_name) &&
618 snprintf(inst->alg.co.base.cra_driver_name,
619 CRYPTO_MAX_ALG_NAME,
620 "%s(%s)", tmpl->name, cipher_name) >=
621 CRYPTO_MAX_ALG_NAME)
622 goto err_free_inst;
623 } else {
624 /* Don't allow nesting. */
625 err = -ELOOP;
626 if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE))
627 goto err_free_inst;
628 }
629
630 err = -EINVAL;
631 if (cipher_alg->co.ivsize)
632 goto err_free_inst;
633
634 inst->free = lskcipher_free_instance_simple;
635
636 /* Default algorithm properties, can be overridden */
637 inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize;
638 inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask;
639 inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority;
640 inst->alg.co.min_keysize = cipher_alg->co.min_keysize;
641 inst->alg.co.max_keysize = cipher_alg->co.max_keysize;
642 inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize;
643 inst->alg.co.statesize = cipher_alg->co.statesize;
644
645 /* Use struct crypto_lskcipher * by default, can be overridden */
646 inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *);
647 inst->alg.setkey = lskcipher_setkey_simple;
648 inst->alg.init = lskcipher_init_tfm_simple;
649 inst->alg.exit = lskcipher_exit_tfm_simple;
650
651 return inst;
652
653err_free_inst:
654 lskcipher_free_instance_simple(inst);
655 return ERR_PTR(err);
656}
657EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple);