Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
8 *
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
10 */
11
12#include <crypto/internal/aead.h>
13#include <crypto/internal/cipher.h>
14#include <crypto/internal/skcipher.h>
15#include <crypto/scatterwalk.h>
16#include <linux/bug.h>
17#include <linux/cryptouser.h>
18#include <linux/err.h>
19#include <linux/kernel.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26#include <net/netlink.h>
27#include "skcipher.h"
28
29#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
30
31enum {
32 SKCIPHER_WALK_PHYS = 1 << 0,
33 SKCIPHER_WALK_SLOW = 1 << 1,
34 SKCIPHER_WALK_COPY = 1 << 2,
35 SKCIPHER_WALK_DIFF = 1 << 3,
36 SKCIPHER_WALK_SLEEP = 1 << 4,
37};
38
39struct skcipher_walk_buffer {
40 struct list_head entry;
41 struct scatter_walk dst;
42 unsigned int len;
43 u8 *data;
44 u8 buffer[];
45};
46
47static const struct crypto_type crypto_skcipher_type;
48
49static int skcipher_walk_next(struct skcipher_walk *walk);
50
51static inline void skcipher_map_src(struct skcipher_walk *walk)
52{
53 walk->src.virt.addr = scatterwalk_map(&walk->in);
54}
55
56static inline void skcipher_map_dst(struct skcipher_walk *walk)
57{
58 walk->dst.virt.addr = scatterwalk_map(&walk->out);
59}
60
61static inline void skcipher_unmap_src(struct skcipher_walk *walk)
62{
63 scatterwalk_unmap(walk->src.virt.addr);
64}
65
66static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
67{
68 scatterwalk_unmap(walk->dst.virt.addr);
69}
70
71static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
72{
73 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
74}
75
76/* Get a spot of the specified length that does not straddle a page.
77 * The caller needs to ensure that there is enough space for this operation.
78 */
79static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
80{
81 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
82
83 return max(start, end_page);
84}
85
86static inline struct skcipher_alg *__crypto_skcipher_alg(
87 struct crypto_alg *alg)
88{
89 return container_of(alg, struct skcipher_alg, base);
90}
91
92static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
93{
94 u8 *addr;
95
96 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
97 addr = skcipher_get_spot(addr, bsize);
98 scatterwalk_copychunks(addr, &walk->out, bsize,
99 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
100 return 0;
101}
102
103int skcipher_walk_done(struct skcipher_walk *walk, int err)
104{
105 unsigned int n = walk->nbytes;
106 unsigned int nbytes = 0;
107
108 if (!n)
109 goto finish;
110
111 if (likely(err >= 0)) {
112 n -= err;
113 nbytes = walk->total - n;
114 }
115
116 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
117 SKCIPHER_WALK_SLOW |
118 SKCIPHER_WALK_COPY |
119 SKCIPHER_WALK_DIFF)))) {
120unmap_src:
121 skcipher_unmap_src(walk);
122 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
123 skcipher_unmap_dst(walk);
124 goto unmap_src;
125 } else if (walk->flags & SKCIPHER_WALK_COPY) {
126 skcipher_map_dst(walk);
127 memcpy(walk->dst.virt.addr, walk->page, n);
128 skcipher_unmap_dst(walk);
129 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
130 if (err > 0) {
131 /*
132 * Didn't process all bytes. Either the algorithm is
133 * broken, or this was the last step and it turned out
134 * the message wasn't evenly divisible into blocks but
135 * the algorithm requires it.
136 */
137 err = -EINVAL;
138 nbytes = 0;
139 } else
140 n = skcipher_done_slow(walk, n);
141 }
142
143 if (err > 0)
144 err = 0;
145
146 walk->total = nbytes;
147 walk->nbytes = 0;
148
149 scatterwalk_advance(&walk->in, n);
150 scatterwalk_advance(&walk->out, n);
151 scatterwalk_done(&walk->in, 0, nbytes);
152 scatterwalk_done(&walk->out, 1, nbytes);
153
154 if (nbytes) {
155 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
156 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
157 return skcipher_walk_next(walk);
158 }
159
160finish:
161 /* Short-circuit for the common/fast path. */
162 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
163 goto out;
164
165 if (walk->flags & SKCIPHER_WALK_PHYS)
166 goto out;
167
168 if (walk->iv != walk->oiv)
169 memcpy(walk->oiv, walk->iv, walk->ivsize);
170 if (walk->buffer != walk->page)
171 kfree(walk->buffer);
172 if (walk->page)
173 free_page((unsigned long)walk->page);
174
175out:
176 return err;
177}
178EXPORT_SYMBOL_GPL(skcipher_walk_done);
179
180void skcipher_walk_complete(struct skcipher_walk *walk, int err)
181{
182 struct skcipher_walk_buffer *p, *tmp;
183
184 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
185 u8 *data;
186
187 if (err)
188 goto done;
189
190 data = p->data;
191 if (!data) {
192 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
193 data = skcipher_get_spot(data, walk->stride);
194 }
195
196 scatterwalk_copychunks(data, &p->dst, p->len, 1);
197
198 if (offset_in_page(p->data) + p->len + walk->stride >
199 PAGE_SIZE)
200 free_page((unsigned long)p->data);
201
202done:
203 list_del(&p->entry);
204 kfree(p);
205 }
206
207 if (!err && walk->iv != walk->oiv)
208 memcpy(walk->oiv, walk->iv, walk->ivsize);
209 if (walk->buffer != walk->page)
210 kfree(walk->buffer);
211 if (walk->page)
212 free_page((unsigned long)walk->page);
213}
214EXPORT_SYMBOL_GPL(skcipher_walk_complete);
215
216static void skcipher_queue_write(struct skcipher_walk *walk,
217 struct skcipher_walk_buffer *p)
218{
219 p->dst = walk->out;
220 list_add_tail(&p->entry, &walk->buffers);
221}
222
223static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
224{
225 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
226 unsigned alignmask = walk->alignmask;
227 struct skcipher_walk_buffer *p;
228 unsigned a;
229 unsigned n;
230 u8 *buffer;
231 void *v;
232
233 if (!phys) {
234 if (!walk->buffer)
235 walk->buffer = walk->page;
236 buffer = walk->buffer;
237 if (buffer)
238 goto ok;
239 }
240
241 /* Start with the minimum alignment of kmalloc. */
242 a = crypto_tfm_ctx_alignment() - 1;
243 n = bsize;
244
245 if (phys) {
246 /* Calculate the minimum alignment of p->buffer. */
247 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
248 n += sizeof(*p);
249 }
250
251 /* Minimum size to align p->buffer by alignmask. */
252 n += alignmask & ~a;
253
254 /* Minimum size to ensure p->buffer does not straddle a page. */
255 n += (bsize - 1) & ~(alignmask | a);
256
257 v = kzalloc(n, skcipher_walk_gfp(walk));
258 if (!v)
259 return skcipher_walk_done(walk, -ENOMEM);
260
261 if (phys) {
262 p = v;
263 p->len = bsize;
264 skcipher_queue_write(walk, p);
265 buffer = p->buffer;
266 } else {
267 walk->buffer = v;
268 buffer = v;
269 }
270
271ok:
272 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
273 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
274 walk->src.virt.addr = walk->dst.virt.addr;
275
276 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
277
278 walk->nbytes = bsize;
279 walk->flags |= SKCIPHER_WALK_SLOW;
280
281 return 0;
282}
283
284static int skcipher_next_copy(struct skcipher_walk *walk)
285{
286 struct skcipher_walk_buffer *p;
287 u8 *tmp = walk->page;
288
289 skcipher_map_src(walk);
290 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
291 skcipher_unmap_src(walk);
292
293 walk->src.virt.addr = tmp;
294 walk->dst.virt.addr = tmp;
295
296 if (!(walk->flags & SKCIPHER_WALK_PHYS))
297 return 0;
298
299 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
300 if (!p)
301 return -ENOMEM;
302
303 p->data = walk->page;
304 p->len = walk->nbytes;
305 skcipher_queue_write(walk, p);
306
307 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
308 PAGE_SIZE)
309 walk->page = NULL;
310 else
311 walk->page += walk->nbytes;
312
313 return 0;
314}
315
316static int skcipher_next_fast(struct skcipher_walk *walk)
317{
318 unsigned long diff;
319
320 walk->src.phys.page = scatterwalk_page(&walk->in);
321 walk->src.phys.offset = offset_in_page(walk->in.offset);
322 walk->dst.phys.page = scatterwalk_page(&walk->out);
323 walk->dst.phys.offset = offset_in_page(walk->out.offset);
324
325 if (walk->flags & SKCIPHER_WALK_PHYS)
326 return 0;
327
328 diff = walk->src.phys.offset - walk->dst.phys.offset;
329 diff |= walk->src.virt.page - walk->dst.virt.page;
330
331 skcipher_map_src(walk);
332 walk->dst.virt.addr = walk->src.virt.addr;
333
334 if (diff) {
335 walk->flags |= SKCIPHER_WALK_DIFF;
336 skcipher_map_dst(walk);
337 }
338
339 return 0;
340}
341
342static int skcipher_walk_next(struct skcipher_walk *walk)
343{
344 unsigned int bsize;
345 unsigned int n;
346 int err;
347
348 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
349 SKCIPHER_WALK_DIFF);
350
351 n = walk->total;
352 bsize = min(walk->stride, max(n, walk->blocksize));
353 n = scatterwalk_clamp(&walk->in, n);
354 n = scatterwalk_clamp(&walk->out, n);
355
356 if (unlikely(n < bsize)) {
357 if (unlikely(walk->total < walk->blocksize))
358 return skcipher_walk_done(walk, -EINVAL);
359
360slow_path:
361 err = skcipher_next_slow(walk, bsize);
362 goto set_phys_lowmem;
363 }
364
365 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
366 if (!walk->page) {
367 gfp_t gfp = skcipher_walk_gfp(walk);
368
369 walk->page = (void *)__get_free_page(gfp);
370 if (!walk->page)
371 goto slow_path;
372 }
373
374 walk->nbytes = min_t(unsigned, n,
375 PAGE_SIZE - offset_in_page(walk->page));
376 walk->flags |= SKCIPHER_WALK_COPY;
377 err = skcipher_next_copy(walk);
378 goto set_phys_lowmem;
379 }
380
381 walk->nbytes = n;
382
383 return skcipher_next_fast(walk);
384
385set_phys_lowmem:
386 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
387 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
388 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
389 walk->src.phys.offset &= PAGE_SIZE - 1;
390 walk->dst.phys.offset &= PAGE_SIZE - 1;
391 }
392 return err;
393}
394
395static int skcipher_copy_iv(struct skcipher_walk *walk)
396{
397 unsigned a = crypto_tfm_ctx_alignment() - 1;
398 unsigned alignmask = walk->alignmask;
399 unsigned ivsize = walk->ivsize;
400 unsigned bs = walk->stride;
401 unsigned aligned_bs;
402 unsigned size;
403 u8 *iv;
404
405 aligned_bs = ALIGN(bs, alignmask + 1);
406
407 /* Minimum size to align buffer by alignmask. */
408 size = alignmask & ~a;
409
410 if (walk->flags & SKCIPHER_WALK_PHYS)
411 size += ivsize;
412 else {
413 size += aligned_bs + ivsize;
414
415 /* Minimum size to ensure buffer does not straddle a page. */
416 size += (bs - 1) & ~(alignmask | a);
417 }
418
419 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
420 if (!walk->buffer)
421 return -ENOMEM;
422
423 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
424 iv = skcipher_get_spot(iv, bs) + aligned_bs;
425
426 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
427 return 0;
428}
429
430static int skcipher_walk_first(struct skcipher_walk *walk)
431{
432 if (WARN_ON_ONCE(in_hardirq()))
433 return -EDEADLK;
434
435 walk->buffer = NULL;
436 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
437 int err = skcipher_copy_iv(walk);
438 if (err)
439 return err;
440 }
441
442 walk->page = NULL;
443
444 return skcipher_walk_next(walk);
445}
446
447static int skcipher_walk_skcipher(struct skcipher_walk *walk,
448 struct skcipher_request *req)
449{
450 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
451 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
452
453 walk->total = req->cryptlen;
454 walk->nbytes = 0;
455 walk->iv = req->iv;
456 walk->oiv = req->iv;
457
458 if (unlikely(!walk->total))
459 return 0;
460
461 scatterwalk_start(&walk->in, req->src);
462 scatterwalk_start(&walk->out, req->dst);
463
464 walk->flags &= ~SKCIPHER_WALK_SLEEP;
465 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
466 SKCIPHER_WALK_SLEEP : 0;
467
468 walk->blocksize = crypto_skcipher_blocksize(tfm);
469 walk->ivsize = crypto_skcipher_ivsize(tfm);
470 walk->alignmask = crypto_skcipher_alignmask(tfm);
471
472 if (alg->co.base.cra_type != &crypto_skcipher_type)
473 walk->stride = alg->co.chunksize;
474 else
475 walk->stride = alg->walksize;
476
477 return skcipher_walk_first(walk);
478}
479
480int skcipher_walk_virt(struct skcipher_walk *walk,
481 struct skcipher_request *req, bool atomic)
482{
483 int err;
484
485 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
486
487 walk->flags &= ~SKCIPHER_WALK_PHYS;
488
489 err = skcipher_walk_skcipher(walk, req);
490
491 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
492
493 return err;
494}
495EXPORT_SYMBOL_GPL(skcipher_walk_virt);
496
497int skcipher_walk_async(struct skcipher_walk *walk,
498 struct skcipher_request *req)
499{
500 walk->flags |= SKCIPHER_WALK_PHYS;
501
502 INIT_LIST_HEAD(&walk->buffers);
503
504 return skcipher_walk_skcipher(walk, req);
505}
506EXPORT_SYMBOL_GPL(skcipher_walk_async);
507
508static int skcipher_walk_aead_common(struct skcipher_walk *walk,
509 struct aead_request *req, bool atomic)
510{
511 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
512 int err;
513
514 walk->nbytes = 0;
515 walk->iv = req->iv;
516 walk->oiv = req->iv;
517
518 if (unlikely(!walk->total))
519 return 0;
520
521 walk->flags &= ~SKCIPHER_WALK_PHYS;
522
523 scatterwalk_start(&walk->in, req->src);
524 scatterwalk_start(&walk->out, req->dst);
525
526 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
527 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
528
529 scatterwalk_done(&walk->in, 0, walk->total);
530 scatterwalk_done(&walk->out, 0, walk->total);
531
532 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
533 walk->flags |= SKCIPHER_WALK_SLEEP;
534 else
535 walk->flags &= ~SKCIPHER_WALK_SLEEP;
536
537 walk->blocksize = crypto_aead_blocksize(tfm);
538 walk->stride = crypto_aead_chunksize(tfm);
539 walk->ivsize = crypto_aead_ivsize(tfm);
540 walk->alignmask = crypto_aead_alignmask(tfm);
541
542 err = skcipher_walk_first(walk);
543
544 if (atomic)
545 walk->flags &= ~SKCIPHER_WALK_SLEEP;
546
547 return err;
548}
549
550int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
551 struct aead_request *req, bool atomic)
552{
553 walk->total = req->cryptlen;
554
555 return skcipher_walk_aead_common(walk, req, atomic);
556}
557EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
558
559int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
560 struct aead_request *req, bool atomic)
561{
562 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
563
564 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
565
566 return skcipher_walk_aead_common(walk, req, atomic);
567}
568EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
569
570static void skcipher_set_needkey(struct crypto_skcipher *tfm)
571{
572 if (crypto_skcipher_max_keysize(tfm) != 0)
573 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
574}
575
576static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
577 const u8 *key, unsigned int keylen)
578{
579 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
580 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
581 u8 *buffer, *alignbuffer;
582 unsigned long absize;
583 int ret;
584
585 absize = keylen + alignmask;
586 buffer = kmalloc(absize, GFP_ATOMIC);
587 if (!buffer)
588 return -ENOMEM;
589
590 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
591 memcpy(alignbuffer, key, keylen);
592 ret = cipher->setkey(tfm, alignbuffer, keylen);
593 kfree_sensitive(buffer);
594 return ret;
595}
596
597int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
598 unsigned int keylen)
599{
600 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
601 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
602 int err;
603
604 if (cipher->co.base.cra_type != &crypto_skcipher_type) {
605 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
606
607 crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
608 crypto_lskcipher_set_flags(*ctx,
609 crypto_skcipher_get_flags(tfm) &
610 CRYPTO_TFM_REQ_MASK);
611 err = crypto_lskcipher_setkey(*ctx, key, keylen);
612 goto out;
613 }
614
615 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
616 return -EINVAL;
617
618 if ((unsigned long)key & alignmask)
619 err = skcipher_setkey_unaligned(tfm, key, keylen);
620 else
621 err = cipher->setkey(tfm, key, keylen);
622
623out:
624 if (unlikely(err)) {
625 skcipher_set_needkey(tfm);
626 return err;
627 }
628
629 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
630 return 0;
631}
632EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
633
634int crypto_skcipher_encrypt(struct skcipher_request *req)
635{
636 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
637 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
638
639 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
640 return -ENOKEY;
641 if (alg->co.base.cra_type != &crypto_skcipher_type)
642 return crypto_lskcipher_encrypt_sg(req);
643 return alg->encrypt(req);
644}
645EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
646
647int crypto_skcipher_decrypt(struct skcipher_request *req)
648{
649 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
650 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
651
652 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
653 return -ENOKEY;
654 if (alg->co.base.cra_type != &crypto_skcipher_type)
655 return crypto_lskcipher_decrypt_sg(req);
656 return alg->decrypt(req);
657}
658EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
659
660static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
661{
662 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
663 u8 *ivs = skcipher_request_ctx(req);
664
665 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
666
667 memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
668 crypto_skcipher_statesize(tfm));
669
670 return 0;
671}
672
673static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
674{
675 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
676 u8 *ivs = skcipher_request_ctx(req);
677
678 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
679
680 memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
681 crypto_skcipher_statesize(tfm));
682
683 return 0;
684}
685
686static int skcipher_noexport(struct skcipher_request *req, void *out)
687{
688 return 0;
689}
690
691static int skcipher_noimport(struct skcipher_request *req, const void *in)
692{
693 return 0;
694}
695
696int crypto_skcipher_export(struct skcipher_request *req, void *out)
697{
698 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
699 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
700
701 if (alg->co.base.cra_type != &crypto_skcipher_type)
702 return crypto_lskcipher_export(req, out);
703 return alg->export(req, out);
704}
705EXPORT_SYMBOL_GPL(crypto_skcipher_export);
706
707int crypto_skcipher_import(struct skcipher_request *req, const void *in)
708{
709 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
710 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
711
712 if (alg->co.base.cra_type != &crypto_skcipher_type)
713 return crypto_lskcipher_import(req, in);
714 return alg->import(req, in);
715}
716EXPORT_SYMBOL_GPL(crypto_skcipher_import);
717
718static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
719{
720 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
721 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
722
723 alg->exit(skcipher);
724}
725
726static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
727{
728 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
729 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
730
731 skcipher_set_needkey(skcipher);
732
733 if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
734 unsigned am = crypto_skcipher_alignmask(skcipher);
735 unsigned reqsize;
736
737 reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
738 reqsize += crypto_skcipher_ivsize(skcipher);
739 reqsize += crypto_skcipher_statesize(skcipher);
740 crypto_skcipher_set_reqsize(skcipher, reqsize);
741
742 return crypto_init_lskcipher_ops_sg(tfm);
743 }
744
745 if (alg->exit)
746 skcipher->base.exit = crypto_skcipher_exit_tfm;
747
748 if (alg->init)
749 return alg->init(skcipher);
750
751 return 0;
752}
753
754static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
755{
756 if (alg->cra_type != &crypto_skcipher_type)
757 return sizeof(struct crypto_lskcipher *);
758
759 return crypto_alg_extsize(alg);
760}
761
762static void crypto_skcipher_free_instance(struct crypto_instance *inst)
763{
764 struct skcipher_instance *skcipher =
765 container_of(inst, struct skcipher_instance, s.base);
766
767 skcipher->free(skcipher);
768}
769
770static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
771 __maybe_unused;
772static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
773{
774 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
775
776 seq_printf(m, "type : skcipher\n");
777 seq_printf(m, "async : %s\n",
778 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
779 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
780 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
781 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
782 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
783 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
784 seq_printf(m, "walksize : %u\n", skcipher->walksize);
785 seq_printf(m, "statesize : %u\n", skcipher->statesize);
786}
787
788static int __maybe_unused crypto_skcipher_report(
789 struct sk_buff *skb, struct crypto_alg *alg)
790{
791 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
792 struct crypto_report_blkcipher rblkcipher;
793
794 memset(&rblkcipher, 0, sizeof(rblkcipher));
795
796 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
797 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
798
799 rblkcipher.blocksize = alg->cra_blocksize;
800 rblkcipher.min_keysize = skcipher->min_keysize;
801 rblkcipher.max_keysize = skcipher->max_keysize;
802 rblkcipher.ivsize = skcipher->ivsize;
803
804 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
805 sizeof(rblkcipher), &rblkcipher);
806}
807
808static const struct crypto_type crypto_skcipher_type = {
809 .extsize = crypto_skcipher_extsize,
810 .init_tfm = crypto_skcipher_init_tfm,
811 .free = crypto_skcipher_free_instance,
812#ifdef CONFIG_PROC_FS
813 .show = crypto_skcipher_show,
814#endif
815#if IS_ENABLED(CONFIG_CRYPTO_USER)
816 .report = crypto_skcipher_report,
817#endif
818 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
819 .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
820 .type = CRYPTO_ALG_TYPE_SKCIPHER,
821 .tfmsize = offsetof(struct crypto_skcipher, base),
822};
823
824int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
825 struct crypto_instance *inst,
826 const char *name, u32 type, u32 mask)
827{
828 spawn->base.frontend = &crypto_skcipher_type;
829 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
830}
831EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
832
833struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
834 u32 type, u32 mask)
835{
836 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
837}
838EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
839
840struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
841 const char *alg_name, u32 type, u32 mask)
842{
843 struct crypto_skcipher *tfm;
844
845 /* Only sync algorithms allowed. */
846 mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
847
848 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
849
850 /*
851 * Make sure we do not allocate something that might get used with
852 * an on-stack request: check the request size.
853 */
854 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
855 MAX_SYNC_SKCIPHER_REQSIZE)) {
856 crypto_free_skcipher(tfm);
857 return ERR_PTR(-EINVAL);
858 }
859
860 return (struct crypto_sync_skcipher *)tfm;
861}
862EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
863
864int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
865{
866 return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
867}
868EXPORT_SYMBOL_GPL(crypto_has_skcipher);
869
870int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
871{
872 struct crypto_alg *base = &alg->base;
873
874 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
875 alg->statesize > PAGE_SIZE / 2 ||
876 (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
877 return -EINVAL;
878
879 if (!alg->chunksize)
880 alg->chunksize = base->cra_blocksize;
881
882 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
883
884 return 0;
885}
886
887static int skcipher_prepare_alg(struct skcipher_alg *alg)
888{
889 struct crypto_alg *base = &alg->base;
890 int err;
891
892 err = skcipher_prepare_alg_common(&alg->co);
893 if (err)
894 return err;
895
896 if (alg->walksize > PAGE_SIZE / 8)
897 return -EINVAL;
898
899 if (!alg->walksize)
900 alg->walksize = alg->chunksize;
901
902 if (!alg->statesize) {
903 alg->import = skcipher_noimport;
904 alg->export = skcipher_noexport;
905 } else if (!(alg->import && alg->export))
906 return -EINVAL;
907
908 base->cra_type = &crypto_skcipher_type;
909 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
910
911 return 0;
912}
913
914int crypto_register_skcipher(struct skcipher_alg *alg)
915{
916 struct crypto_alg *base = &alg->base;
917 int err;
918
919 err = skcipher_prepare_alg(alg);
920 if (err)
921 return err;
922
923 return crypto_register_alg(base);
924}
925EXPORT_SYMBOL_GPL(crypto_register_skcipher);
926
927void crypto_unregister_skcipher(struct skcipher_alg *alg)
928{
929 crypto_unregister_alg(&alg->base);
930}
931EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
932
933int crypto_register_skciphers(struct skcipher_alg *algs, int count)
934{
935 int i, ret;
936
937 for (i = 0; i < count; i++) {
938 ret = crypto_register_skcipher(&algs[i]);
939 if (ret)
940 goto err;
941 }
942
943 return 0;
944
945err:
946 for (--i; i >= 0; --i)
947 crypto_unregister_skcipher(&algs[i]);
948
949 return ret;
950}
951EXPORT_SYMBOL_GPL(crypto_register_skciphers);
952
953void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
954{
955 int i;
956
957 for (i = count - 1; i >= 0; --i)
958 crypto_unregister_skcipher(&algs[i]);
959}
960EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
961
962int skcipher_register_instance(struct crypto_template *tmpl,
963 struct skcipher_instance *inst)
964{
965 int err;
966
967 if (WARN_ON(!inst->free))
968 return -EINVAL;
969
970 err = skcipher_prepare_alg(&inst->alg);
971 if (err)
972 return err;
973
974 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
975}
976EXPORT_SYMBOL_GPL(skcipher_register_instance);
977
978static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
979 unsigned int keylen)
980{
981 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
982
983 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
984 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
985 CRYPTO_TFM_REQ_MASK);
986 return crypto_cipher_setkey(cipher, key, keylen);
987}
988
989static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
990{
991 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
992 struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
993 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
994 struct crypto_cipher *cipher;
995
996 cipher = crypto_spawn_cipher(spawn);
997 if (IS_ERR(cipher))
998 return PTR_ERR(cipher);
999
1000 ctx->cipher = cipher;
1001 return 0;
1002}
1003
1004static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
1005{
1006 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1007
1008 crypto_free_cipher(ctx->cipher);
1009}
1010
1011static void skcipher_free_instance_simple(struct skcipher_instance *inst)
1012{
1013 crypto_drop_cipher(skcipher_instance_ctx(inst));
1014 kfree(inst);
1015}
1016
1017/**
1018 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
1019 *
1020 * Allocate an skcipher_instance for a simple block cipher mode of operation,
1021 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
1022 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
1023 * alignmask, and priority are set from the underlying cipher but can be
1024 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
1025 * default ->setkey(), ->init(), and ->exit() methods are installed.
1026 *
1027 * @tmpl: the template being instantiated
1028 * @tb: the template parameters
1029 *
1030 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
1031 * needs to register the instance.
1032 */
1033struct skcipher_instance *skcipher_alloc_instance_simple(
1034 struct crypto_template *tmpl, struct rtattr **tb)
1035{
1036 u32 mask;
1037 struct skcipher_instance *inst;
1038 struct crypto_cipher_spawn *spawn;
1039 struct crypto_alg *cipher_alg;
1040 int err;
1041
1042 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
1043 if (err)
1044 return ERR_PTR(err);
1045
1046 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1047 if (!inst)
1048 return ERR_PTR(-ENOMEM);
1049 spawn = skcipher_instance_ctx(inst);
1050
1051 err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
1052 crypto_attr_alg_name(tb[1]), 0, mask);
1053 if (err)
1054 goto err_free_inst;
1055 cipher_alg = crypto_spawn_cipher_alg(spawn);
1056
1057 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
1058 cipher_alg);
1059 if (err)
1060 goto err_free_inst;
1061
1062 inst->free = skcipher_free_instance_simple;
1063
1064 /* Default algorithm properties, can be overridden */
1065 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
1066 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
1067 inst->alg.base.cra_priority = cipher_alg->cra_priority;
1068 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
1069 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
1070 inst->alg.ivsize = cipher_alg->cra_blocksize;
1071
1072 /* Use skcipher_ctx_simple by default, can be overridden */
1073 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
1074 inst->alg.setkey = skcipher_setkey_simple;
1075 inst->alg.init = skcipher_init_tfm_simple;
1076 inst->alg.exit = skcipher_exit_tfm_simple;
1077
1078 return inst;
1079
1080err_free_inst:
1081 skcipher_free_instance_simple(inst);
1082 return ERR_PTR(err);
1083}
1084EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
1085
1086MODULE_LICENSE("GPL");
1087MODULE_DESCRIPTION("Symmetric key cipher type");
1088MODULE_IMPORT_NS("CRYPTO_INTERNAL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
8 *
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
10 */
11
12#include <crypto/internal/aead.h>
13#include <crypto/internal/skcipher.h>
14#include <crypto/scatterwalk.h>
15#include <linux/bug.h>
16#include <linux/cryptouser.h>
17#include <linux/compiler.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/rtnetlink.h>
21#include <linux/seq_file.h>
22#include <net/netlink.h>
23
24#include "internal.h"
25
26enum {
27 SKCIPHER_WALK_PHYS = 1 << 0,
28 SKCIPHER_WALK_SLOW = 1 << 1,
29 SKCIPHER_WALK_COPY = 1 << 2,
30 SKCIPHER_WALK_DIFF = 1 << 3,
31 SKCIPHER_WALK_SLEEP = 1 << 4,
32};
33
34struct skcipher_walk_buffer {
35 struct list_head entry;
36 struct scatter_walk dst;
37 unsigned int len;
38 u8 *data;
39 u8 buffer[];
40};
41
42static int skcipher_walk_next(struct skcipher_walk *walk);
43
44static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
45{
46 if (PageHighMem(scatterwalk_page(walk)))
47 kunmap_atomic(vaddr);
48}
49
50static inline void *skcipher_map(struct scatter_walk *walk)
51{
52 struct page *page = scatterwalk_page(walk);
53
54 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
55 offset_in_page(walk->offset);
56}
57
58static inline void skcipher_map_src(struct skcipher_walk *walk)
59{
60 walk->src.virt.addr = skcipher_map(&walk->in);
61}
62
63static inline void skcipher_map_dst(struct skcipher_walk *walk)
64{
65 walk->dst.virt.addr = skcipher_map(&walk->out);
66}
67
68static inline void skcipher_unmap_src(struct skcipher_walk *walk)
69{
70 skcipher_unmap(&walk->in, walk->src.virt.addr);
71}
72
73static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
74{
75 skcipher_unmap(&walk->out, walk->dst.virt.addr);
76}
77
78static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
79{
80 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
81}
82
83/* Get a spot of the specified length that does not straddle a page.
84 * The caller needs to ensure that there is enough space for this operation.
85 */
86static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
87{
88 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
89
90 return max(start, end_page);
91}
92
93static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
94{
95 u8 *addr;
96
97 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
98 addr = skcipher_get_spot(addr, bsize);
99 scatterwalk_copychunks(addr, &walk->out, bsize,
100 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
101 return 0;
102}
103
104int skcipher_walk_done(struct skcipher_walk *walk, int err)
105{
106 unsigned int n = walk->nbytes;
107 unsigned int nbytes = 0;
108
109 if (!n)
110 goto finish;
111
112 if (likely(err >= 0)) {
113 n -= err;
114 nbytes = walk->total - n;
115 }
116
117 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
118 SKCIPHER_WALK_SLOW |
119 SKCIPHER_WALK_COPY |
120 SKCIPHER_WALK_DIFF)))) {
121unmap_src:
122 skcipher_unmap_src(walk);
123 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
124 skcipher_unmap_dst(walk);
125 goto unmap_src;
126 } else if (walk->flags & SKCIPHER_WALK_COPY) {
127 skcipher_map_dst(walk);
128 memcpy(walk->dst.virt.addr, walk->page, n);
129 skcipher_unmap_dst(walk);
130 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
131 if (err > 0) {
132 /*
133 * Didn't process all bytes. Either the algorithm is
134 * broken, or this was the last step and it turned out
135 * the message wasn't evenly divisible into blocks but
136 * the algorithm requires it.
137 */
138 err = -EINVAL;
139 nbytes = 0;
140 } else
141 n = skcipher_done_slow(walk, n);
142 }
143
144 if (err > 0)
145 err = 0;
146
147 walk->total = nbytes;
148 walk->nbytes = 0;
149
150 scatterwalk_advance(&walk->in, n);
151 scatterwalk_advance(&walk->out, n);
152 scatterwalk_done(&walk->in, 0, nbytes);
153 scatterwalk_done(&walk->out, 1, nbytes);
154
155 if (nbytes) {
156 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
157 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
158 return skcipher_walk_next(walk);
159 }
160
161finish:
162 /* Short-circuit for the common/fast path. */
163 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
164 goto out;
165
166 if (walk->flags & SKCIPHER_WALK_PHYS)
167 goto out;
168
169 if (walk->iv != walk->oiv)
170 memcpy(walk->oiv, walk->iv, walk->ivsize);
171 if (walk->buffer != walk->page)
172 kfree(walk->buffer);
173 if (walk->page)
174 free_page((unsigned long)walk->page);
175
176out:
177 return err;
178}
179EXPORT_SYMBOL_GPL(skcipher_walk_done);
180
181void skcipher_walk_complete(struct skcipher_walk *walk, int err)
182{
183 struct skcipher_walk_buffer *p, *tmp;
184
185 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
186 u8 *data;
187
188 if (err)
189 goto done;
190
191 data = p->data;
192 if (!data) {
193 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
194 data = skcipher_get_spot(data, walk->stride);
195 }
196
197 scatterwalk_copychunks(data, &p->dst, p->len, 1);
198
199 if (offset_in_page(p->data) + p->len + walk->stride >
200 PAGE_SIZE)
201 free_page((unsigned long)p->data);
202
203done:
204 list_del(&p->entry);
205 kfree(p);
206 }
207
208 if (!err && walk->iv != walk->oiv)
209 memcpy(walk->oiv, walk->iv, walk->ivsize);
210 if (walk->buffer != walk->page)
211 kfree(walk->buffer);
212 if (walk->page)
213 free_page((unsigned long)walk->page);
214}
215EXPORT_SYMBOL_GPL(skcipher_walk_complete);
216
217static void skcipher_queue_write(struct skcipher_walk *walk,
218 struct skcipher_walk_buffer *p)
219{
220 p->dst = walk->out;
221 list_add_tail(&p->entry, &walk->buffers);
222}
223
224static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
225{
226 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
227 unsigned alignmask = walk->alignmask;
228 struct skcipher_walk_buffer *p;
229 unsigned a;
230 unsigned n;
231 u8 *buffer;
232 void *v;
233
234 if (!phys) {
235 if (!walk->buffer)
236 walk->buffer = walk->page;
237 buffer = walk->buffer;
238 if (buffer)
239 goto ok;
240 }
241
242 /* Start with the minimum alignment of kmalloc. */
243 a = crypto_tfm_ctx_alignment() - 1;
244 n = bsize;
245
246 if (phys) {
247 /* Calculate the minimum alignment of p->buffer. */
248 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
249 n += sizeof(*p);
250 }
251
252 /* Minimum size to align p->buffer by alignmask. */
253 n += alignmask & ~a;
254
255 /* Minimum size to ensure p->buffer does not straddle a page. */
256 n += (bsize - 1) & ~(alignmask | a);
257
258 v = kzalloc(n, skcipher_walk_gfp(walk));
259 if (!v)
260 return skcipher_walk_done(walk, -ENOMEM);
261
262 if (phys) {
263 p = v;
264 p->len = bsize;
265 skcipher_queue_write(walk, p);
266 buffer = p->buffer;
267 } else {
268 walk->buffer = v;
269 buffer = v;
270 }
271
272ok:
273 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
274 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
275 walk->src.virt.addr = walk->dst.virt.addr;
276
277 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
278
279 walk->nbytes = bsize;
280 walk->flags |= SKCIPHER_WALK_SLOW;
281
282 return 0;
283}
284
285static int skcipher_next_copy(struct skcipher_walk *walk)
286{
287 struct skcipher_walk_buffer *p;
288 u8 *tmp = walk->page;
289
290 skcipher_map_src(walk);
291 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
292 skcipher_unmap_src(walk);
293
294 walk->src.virt.addr = tmp;
295 walk->dst.virt.addr = tmp;
296
297 if (!(walk->flags & SKCIPHER_WALK_PHYS))
298 return 0;
299
300 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
301 if (!p)
302 return -ENOMEM;
303
304 p->data = walk->page;
305 p->len = walk->nbytes;
306 skcipher_queue_write(walk, p);
307
308 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
309 PAGE_SIZE)
310 walk->page = NULL;
311 else
312 walk->page += walk->nbytes;
313
314 return 0;
315}
316
317static int skcipher_next_fast(struct skcipher_walk *walk)
318{
319 unsigned long diff;
320
321 walk->src.phys.page = scatterwalk_page(&walk->in);
322 walk->src.phys.offset = offset_in_page(walk->in.offset);
323 walk->dst.phys.page = scatterwalk_page(&walk->out);
324 walk->dst.phys.offset = offset_in_page(walk->out.offset);
325
326 if (walk->flags & SKCIPHER_WALK_PHYS)
327 return 0;
328
329 diff = walk->src.phys.offset - walk->dst.phys.offset;
330 diff |= walk->src.virt.page - walk->dst.virt.page;
331
332 skcipher_map_src(walk);
333 walk->dst.virt.addr = walk->src.virt.addr;
334
335 if (diff) {
336 walk->flags |= SKCIPHER_WALK_DIFF;
337 skcipher_map_dst(walk);
338 }
339
340 return 0;
341}
342
343static int skcipher_walk_next(struct skcipher_walk *walk)
344{
345 unsigned int bsize;
346 unsigned int n;
347 int err;
348
349 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
350 SKCIPHER_WALK_DIFF);
351
352 n = walk->total;
353 bsize = min(walk->stride, max(n, walk->blocksize));
354 n = scatterwalk_clamp(&walk->in, n);
355 n = scatterwalk_clamp(&walk->out, n);
356
357 if (unlikely(n < bsize)) {
358 if (unlikely(walk->total < walk->blocksize))
359 return skcipher_walk_done(walk, -EINVAL);
360
361slow_path:
362 err = skcipher_next_slow(walk, bsize);
363 goto set_phys_lowmem;
364 }
365
366 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
367 if (!walk->page) {
368 gfp_t gfp = skcipher_walk_gfp(walk);
369
370 walk->page = (void *)__get_free_page(gfp);
371 if (!walk->page)
372 goto slow_path;
373 }
374
375 walk->nbytes = min_t(unsigned, n,
376 PAGE_SIZE - offset_in_page(walk->page));
377 walk->flags |= SKCIPHER_WALK_COPY;
378 err = skcipher_next_copy(walk);
379 goto set_phys_lowmem;
380 }
381
382 walk->nbytes = n;
383
384 return skcipher_next_fast(walk);
385
386set_phys_lowmem:
387 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
388 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
389 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
390 walk->src.phys.offset &= PAGE_SIZE - 1;
391 walk->dst.phys.offset &= PAGE_SIZE - 1;
392 }
393 return err;
394}
395
396static int skcipher_copy_iv(struct skcipher_walk *walk)
397{
398 unsigned a = crypto_tfm_ctx_alignment() - 1;
399 unsigned alignmask = walk->alignmask;
400 unsigned ivsize = walk->ivsize;
401 unsigned bs = walk->stride;
402 unsigned aligned_bs;
403 unsigned size;
404 u8 *iv;
405
406 aligned_bs = ALIGN(bs, alignmask + 1);
407
408 /* Minimum size to align buffer by alignmask. */
409 size = alignmask & ~a;
410
411 if (walk->flags & SKCIPHER_WALK_PHYS)
412 size += ivsize;
413 else {
414 size += aligned_bs + ivsize;
415
416 /* Minimum size to ensure buffer does not straddle a page. */
417 size += (bs - 1) & ~(alignmask | a);
418 }
419
420 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
421 if (!walk->buffer)
422 return -ENOMEM;
423
424 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
425 iv = skcipher_get_spot(iv, bs) + aligned_bs;
426
427 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
428 return 0;
429}
430
431static int skcipher_walk_first(struct skcipher_walk *walk)
432{
433 if (WARN_ON_ONCE(in_irq()))
434 return -EDEADLK;
435
436 walk->buffer = NULL;
437 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
438 int err = skcipher_copy_iv(walk);
439 if (err)
440 return err;
441 }
442
443 walk->page = NULL;
444
445 return skcipher_walk_next(walk);
446}
447
448static int skcipher_walk_skcipher(struct skcipher_walk *walk,
449 struct skcipher_request *req)
450{
451 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
452
453 walk->total = req->cryptlen;
454 walk->nbytes = 0;
455 walk->iv = req->iv;
456 walk->oiv = req->iv;
457
458 if (unlikely(!walk->total))
459 return 0;
460
461 scatterwalk_start(&walk->in, req->src);
462 scatterwalk_start(&walk->out, req->dst);
463
464 walk->flags &= ~SKCIPHER_WALK_SLEEP;
465 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
466 SKCIPHER_WALK_SLEEP : 0;
467
468 walk->blocksize = crypto_skcipher_blocksize(tfm);
469 walk->stride = crypto_skcipher_walksize(tfm);
470 walk->ivsize = crypto_skcipher_ivsize(tfm);
471 walk->alignmask = crypto_skcipher_alignmask(tfm);
472
473 return skcipher_walk_first(walk);
474}
475
476int skcipher_walk_virt(struct skcipher_walk *walk,
477 struct skcipher_request *req, bool atomic)
478{
479 int err;
480
481 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
482
483 walk->flags &= ~SKCIPHER_WALK_PHYS;
484
485 err = skcipher_walk_skcipher(walk, req);
486
487 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
488
489 return err;
490}
491EXPORT_SYMBOL_GPL(skcipher_walk_virt);
492
493void skcipher_walk_atomise(struct skcipher_walk *walk)
494{
495 walk->flags &= ~SKCIPHER_WALK_SLEEP;
496}
497EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
498
499int skcipher_walk_async(struct skcipher_walk *walk,
500 struct skcipher_request *req)
501{
502 walk->flags |= SKCIPHER_WALK_PHYS;
503
504 INIT_LIST_HEAD(&walk->buffers);
505
506 return skcipher_walk_skcipher(walk, req);
507}
508EXPORT_SYMBOL_GPL(skcipher_walk_async);
509
510static int skcipher_walk_aead_common(struct skcipher_walk *walk,
511 struct aead_request *req, bool atomic)
512{
513 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
514 int err;
515
516 walk->nbytes = 0;
517 walk->iv = req->iv;
518 walk->oiv = req->iv;
519
520 if (unlikely(!walk->total))
521 return 0;
522
523 walk->flags &= ~SKCIPHER_WALK_PHYS;
524
525 scatterwalk_start(&walk->in, req->src);
526 scatterwalk_start(&walk->out, req->dst);
527
528 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
529 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
530
531 scatterwalk_done(&walk->in, 0, walk->total);
532 scatterwalk_done(&walk->out, 0, walk->total);
533
534 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
535 walk->flags |= SKCIPHER_WALK_SLEEP;
536 else
537 walk->flags &= ~SKCIPHER_WALK_SLEEP;
538
539 walk->blocksize = crypto_aead_blocksize(tfm);
540 walk->stride = crypto_aead_chunksize(tfm);
541 walk->ivsize = crypto_aead_ivsize(tfm);
542 walk->alignmask = crypto_aead_alignmask(tfm);
543
544 err = skcipher_walk_first(walk);
545
546 if (atomic)
547 walk->flags &= ~SKCIPHER_WALK_SLEEP;
548
549 return err;
550}
551
552int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
553 bool atomic)
554{
555 walk->total = req->cryptlen;
556
557 return skcipher_walk_aead_common(walk, req, atomic);
558}
559EXPORT_SYMBOL_GPL(skcipher_walk_aead);
560
561int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
562 struct aead_request *req, bool atomic)
563{
564 walk->total = req->cryptlen;
565
566 return skcipher_walk_aead_common(walk, req, atomic);
567}
568EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
569
570int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
571 struct aead_request *req, bool atomic)
572{
573 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
574
575 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
576
577 return skcipher_walk_aead_common(walk, req, atomic);
578}
579EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
580
581static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
582{
583 if (alg->cra_type == &crypto_blkcipher_type)
584 return sizeof(struct crypto_blkcipher *);
585
586 if (alg->cra_type == &crypto_ablkcipher_type)
587 return sizeof(struct crypto_ablkcipher *);
588
589 return crypto_alg_extsize(alg);
590}
591
592static void skcipher_set_needkey(struct crypto_skcipher *tfm)
593{
594 if (tfm->keysize)
595 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
596}
597
598static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
599 const u8 *key, unsigned int keylen)
600{
601 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
602 struct crypto_blkcipher *blkcipher = *ctx;
603 int err;
604
605 crypto_blkcipher_clear_flags(blkcipher, ~0);
606 crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
607 CRYPTO_TFM_REQ_MASK);
608 err = crypto_blkcipher_setkey(blkcipher, key, keylen);
609 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
610 CRYPTO_TFM_RES_MASK);
611 if (unlikely(err)) {
612 skcipher_set_needkey(tfm);
613 return err;
614 }
615
616 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
617 return 0;
618}
619
620static int skcipher_crypt_blkcipher(struct skcipher_request *req,
621 int (*crypt)(struct blkcipher_desc *,
622 struct scatterlist *,
623 struct scatterlist *,
624 unsigned int))
625{
626 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
627 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
628 struct blkcipher_desc desc = {
629 .tfm = *ctx,
630 .info = req->iv,
631 .flags = req->base.flags,
632 };
633
634
635 return crypt(&desc, req->dst, req->src, req->cryptlen);
636}
637
638static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
639{
640 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
641 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
642 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
643
644 return skcipher_crypt_blkcipher(req, alg->encrypt);
645}
646
647static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
648{
649 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
650 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
651 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
652
653 return skcipher_crypt_blkcipher(req, alg->decrypt);
654}
655
656static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
657{
658 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
659
660 crypto_free_blkcipher(*ctx);
661}
662
663static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
664{
665 struct crypto_alg *calg = tfm->__crt_alg;
666 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
667 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
668 struct crypto_blkcipher *blkcipher;
669 struct crypto_tfm *btfm;
670
671 if (!crypto_mod_get(calg))
672 return -EAGAIN;
673
674 btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
675 CRYPTO_ALG_TYPE_MASK);
676 if (IS_ERR(btfm)) {
677 crypto_mod_put(calg);
678 return PTR_ERR(btfm);
679 }
680
681 blkcipher = __crypto_blkcipher_cast(btfm);
682 *ctx = blkcipher;
683 tfm->exit = crypto_exit_skcipher_ops_blkcipher;
684
685 skcipher->setkey = skcipher_setkey_blkcipher;
686 skcipher->encrypt = skcipher_encrypt_blkcipher;
687 skcipher->decrypt = skcipher_decrypt_blkcipher;
688
689 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
690 skcipher->keysize = calg->cra_blkcipher.max_keysize;
691
692 skcipher_set_needkey(skcipher);
693
694 return 0;
695}
696
697static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
698 const u8 *key, unsigned int keylen)
699{
700 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
701 struct crypto_ablkcipher *ablkcipher = *ctx;
702 int err;
703
704 crypto_ablkcipher_clear_flags(ablkcipher, ~0);
705 crypto_ablkcipher_set_flags(ablkcipher,
706 crypto_skcipher_get_flags(tfm) &
707 CRYPTO_TFM_REQ_MASK);
708 err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
709 crypto_skcipher_set_flags(tfm,
710 crypto_ablkcipher_get_flags(ablkcipher) &
711 CRYPTO_TFM_RES_MASK);
712 if (unlikely(err)) {
713 skcipher_set_needkey(tfm);
714 return err;
715 }
716
717 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
718 return 0;
719}
720
721static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
722 int (*crypt)(struct ablkcipher_request *))
723{
724 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
725 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
726 struct ablkcipher_request *subreq = skcipher_request_ctx(req);
727
728 ablkcipher_request_set_tfm(subreq, *ctx);
729 ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
730 req->base.complete, req->base.data);
731 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
732 req->iv);
733
734 return crypt(subreq);
735}
736
737static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
738{
739 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
740 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
741 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
742
743 return skcipher_crypt_ablkcipher(req, alg->encrypt);
744}
745
746static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
747{
748 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
749 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
750 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
751
752 return skcipher_crypt_ablkcipher(req, alg->decrypt);
753}
754
755static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
756{
757 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
758
759 crypto_free_ablkcipher(*ctx);
760}
761
762static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
763{
764 struct crypto_alg *calg = tfm->__crt_alg;
765 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
766 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
767 struct crypto_ablkcipher *ablkcipher;
768 struct crypto_tfm *abtfm;
769
770 if (!crypto_mod_get(calg))
771 return -EAGAIN;
772
773 abtfm = __crypto_alloc_tfm(calg, 0, 0);
774 if (IS_ERR(abtfm)) {
775 crypto_mod_put(calg);
776 return PTR_ERR(abtfm);
777 }
778
779 ablkcipher = __crypto_ablkcipher_cast(abtfm);
780 *ctx = ablkcipher;
781 tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
782
783 skcipher->setkey = skcipher_setkey_ablkcipher;
784 skcipher->encrypt = skcipher_encrypt_ablkcipher;
785 skcipher->decrypt = skcipher_decrypt_ablkcipher;
786
787 skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
788 skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
789 sizeof(struct ablkcipher_request);
790 skcipher->keysize = calg->cra_ablkcipher.max_keysize;
791
792 skcipher_set_needkey(skcipher);
793
794 return 0;
795}
796
797static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
798 const u8 *key, unsigned int keylen)
799{
800 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
801 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
802 u8 *buffer, *alignbuffer;
803 unsigned long absize;
804 int ret;
805
806 absize = keylen + alignmask;
807 buffer = kmalloc(absize, GFP_ATOMIC);
808 if (!buffer)
809 return -ENOMEM;
810
811 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
812 memcpy(alignbuffer, key, keylen);
813 ret = cipher->setkey(tfm, alignbuffer, keylen);
814 kzfree(buffer);
815 return ret;
816}
817
818static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
819 unsigned int keylen)
820{
821 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
822 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
823 int err;
824
825 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
826 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
827 return -EINVAL;
828 }
829
830 if ((unsigned long)key & alignmask)
831 err = skcipher_setkey_unaligned(tfm, key, keylen);
832 else
833 err = cipher->setkey(tfm, key, keylen);
834
835 if (unlikely(err)) {
836 skcipher_set_needkey(tfm);
837 return err;
838 }
839
840 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
841 return 0;
842}
843
844int crypto_skcipher_encrypt(struct skcipher_request *req)
845{
846 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
847 struct crypto_alg *alg = tfm->base.__crt_alg;
848 unsigned int cryptlen = req->cryptlen;
849 int ret;
850
851 crypto_stats_get(alg);
852 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
853 ret = -ENOKEY;
854 else
855 ret = tfm->encrypt(req);
856 crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
857 return ret;
858}
859EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
860
861int crypto_skcipher_decrypt(struct skcipher_request *req)
862{
863 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
864 struct crypto_alg *alg = tfm->base.__crt_alg;
865 unsigned int cryptlen = req->cryptlen;
866 int ret;
867
868 crypto_stats_get(alg);
869 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
870 ret = -ENOKEY;
871 else
872 ret = tfm->decrypt(req);
873 crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
874 return ret;
875}
876EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
877
878static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
879{
880 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
881 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
882
883 alg->exit(skcipher);
884}
885
886static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
887{
888 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
889 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
890
891 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
892 return crypto_init_skcipher_ops_blkcipher(tfm);
893
894 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
895 return crypto_init_skcipher_ops_ablkcipher(tfm);
896
897 skcipher->setkey = skcipher_setkey;
898 skcipher->encrypt = alg->encrypt;
899 skcipher->decrypt = alg->decrypt;
900 skcipher->ivsize = alg->ivsize;
901 skcipher->keysize = alg->max_keysize;
902
903 skcipher_set_needkey(skcipher);
904
905 if (alg->exit)
906 skcipher->base.exit = crypto_skcipher_exit_tfm;
907
908 if (alg->init)
909 return alg->init(skcipher);
910
911 return 0;
912}
913
914static void crypto_skcipher_free_instance(struct crypto_instance *inst)
915{
916 struct skcipher_instance *skcipher =
917 container_of(inst, struct skcipher_instance, s.base);
918
919 skcipher->free(skcipher);
920}
921
922static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
923 __maybe_unused;
924static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
925{
926 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
927 base);
928
929 seq_printf(m, "type : skcipher\n");
930 seq_printf(m, "async : %s\n",
931 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
932 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
933 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
934 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
935 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
936 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
937 seq_printf(m, "walksize : %u\n", skcipher->walksize);
938}
939
940#ifdef CONFIG_NET
941static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
942{
943 struct crypto_report_blkcipher rblkcipher;
944 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
945 base);
946
947 memset(&rblkcipher, 0, sizeof(rblkcipher));
948
949 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
950 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
951
952 rblkcipher.blocksize = alg->cra_blocksize;
953 rblkcipher.min_keysize = skcipher->min_keysize;
954 rblkcipher.max_keysize = skcipher->max_keysize;
955 rblkcipher.ivsize = skcipher->ivsize;
956
957 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
958 sizeof(rblkcipher), &rblkcipher);
959}
960#else
961static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
962{
963 return -ENOSYS;
964}
965#endif
966
967static const struct crypto_type crypto_skcipher_type2 = {
968 .extsize = crypto_skcipher_extsize,
969 .init_tfm = crypto_skcipher_init_tfm,
970 .free = crypto_skcipher_free_instance,
971#ifdef CONFIG_PROC_FS
972 .show = crypto_skcipher_show,
973#endif
974 .report = crypto_skcipher_report,
975 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
976 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
977 .type = CRYPTO_ALG_TYPE_SKCIPHER,
978 .tfmsize = offsetof(struct crypto_skcipher, base),
979};
980
981int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
982 const char *name, u32 type, u32 mask)
983{
984 spawn->base.frontend = &crypto_skcipher_type2;
985 return crypto_grab_spawn(&spawn->base, name, type, mask);
986}
987EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
988
989struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
990 u32 type, u32 mask)
991{
992 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
993}
994EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
995
996struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
997 const char *alg_name, u32 type, u32 mask)
998{
999 struct crypto_skcipher *tfm;
1000
1001 /* Only sync algorithms allowed. */
1002 mask |= CRYPTO_ALG_ASYNC;
1003
1004 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
1005
1006 /*
1007 * Make sure we do not allocate something that might get used with
1008 * an on-stack request: check the request size.
1009 */
1010 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
1011 MAX_SYNC_SKCIPHER_REQSIZE)) {
1012 crypto_free_skcipher(tfm);
1013 return ERR_PTR(-EINVAL);
1014 }
1015
1016 return (struct crypto_sync_skcipher *)tfm;
1017}
1018EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
1019
1020int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
1021{
1022 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
1023 type, mask);
1024}
1025EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
1026
1027static int skcipher_prepare_alg(struct skcipher_alg *alg)
1028{
1029 struct crypto_alg *base = &alg->base;
1030
1031 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
1032 alg->walksize > PAGE_SIZE / 8)
1033 return -EINVAL;
1034
1035 if (!alg->chunksize)
1036 alg->chunksize = base->cra_blocksize;
1037 if (!alg->walksize)
1038 alg->walksize = alg->chunksize;
1039
1040 base->cra_type = &crypto_skcipher_type2;
1041 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
1042 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
1043
1044 return 0;
1045}
1046
1047int crypto_register_skcipher(struct skcipher_alg *alg)
1048{
1049 struct crypto_alg *base = &alg->base;
1050 int err;
1051
1052 err = skcipher_prepare_alg(alg);
1053 if (err)
1054 return err;
1055
1056 return crypto_register_alg(base);
1057}
1058EXPORT_SYMBOL_GPL(crypto_register_skcipher);
1059
1060void crypto_unregister_skcipher(struct skcipher_alg *alg)
1061{
1062 crypto_unregister_alg(&alg->base);
1063}
1064EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1065
1066int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1067{
1068 int i, ret;
1069
1070 for (i = 0; i < count; i++) {
1071 ret = crypto_register_skcipher(&algs[i]);
1072 if (ret)
1073 goto err;
1074 }
1075
1076 return 0;
1077
1078err:
1079 for (--i; i >= 0; --i)
1080 crypto_unregister_skcipher(&algs[i]);
1081
1082 return ret;
1083}
1084EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1085
1086void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1087{
1088 int i;
1089
1090 for (i = count - 1; i >= 0; --i)
1091 crypto_unregister_skcipher(&algs[i]);
1092}
1093EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1094
1095int skcipher_register_instance(struct crypto_template *tmpl,
1096 struct skcipher_instance *inst)
1097{
1098 int err;
1099
1100 err = skcipher_prepare_alg(&inst->alg);
1101 if (err)
1102 return err;
1103
1104 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1105}
1106EXPORT_SYMBOL_GPL(skcipher_register_instance);
1107
1108static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
1109 unsigned int keylen)
1110{
1111 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
1112 int err;
1113
1114 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
1115 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
1116 CRYPTO_TFM_REQ_MASK);
1117 err = crypto_cipher_setkey(cipher, key, keylen);
1118 crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) &
1119 CRYPTO_TFM_RES_MASK);
1120 return err;
1121}
1122
1123static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
1124{
1125 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
1126 struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
1127 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1128 struct crypto_cipher *cipher;
1129
1130 cipher = crypto_spawn_cipher(spawn);
1131 if (IS_ERR(cipher))
1132 return PTR_ERR(cipher);
1133
1134 ctx->cipher = cipher;
1135 return 0;
1136}
1137
1138static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
1139{
1140 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1141
1142 crypto_free_cipher(ctx->cipher);
1143}
1144
1145static void skcipher_free_instance_simple(struct skcipher_instance *inst)
1146{
1147 crypto_drop_spawn(skcipher_instance_ctx(inst));
1148 kfree(inst);
1149}
1150
1151/**
1152 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
1153 *
1154 * Allocate an skcipher_instance for a simple block cipher mode of operation,
1155 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
1156 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
1157 * alignmask, and priority are set from the underlying cipher but can be
1158 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
1159 * default ->setkey(), ->init(), and ->exit() methods are installed.
1160 *
1161 * @tmpl: the template being instantiated
1162 * @tb: the template parameters
1163 * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is
1164 * returned here. It must be dropped with crypto_mod_put().
1165 *
1166 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
1167 * needs to register the instance.
1168 */
1169struct skcipher_instance *
1170skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
1171 struct crypto_alg **cipher_alg_ret)
1172{
1173 struct crypto_attr_type *algt;
1174 struct crypto_alg *cipher_alg;
1175 struct skcipher_instance *inst;
1176 struct crypto_spawn *spawn;
1177 u32 mask;
1178 int err;
1179
1180 algt = crypto_get_attr_type(tb);
1181 if (IS_ERR(algt))
1182 return ERR_CAST(algt);
1183
1184 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
1185 return ERR_PTR(-EINVAL);
1186
1187 mask = CRYPTO_ALG_TYPE_MASK |
1188 crypto_requires_off(algt->type, algt->mask,
1189 CRYPTO_ALG_NEED_FALLBACK);
1190
1191 cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
1192 if (IS_ERR(cipher_alg))
1193 return ERR_CAST(cipher_alg);
1194
1195 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1196 if (!inst) {
1197 err = -ENOMEM;
1198 goto err_put_cipher_alg;
1199 }
1200 spawn = skcipher_instance_ctx(inst);
1201
1202 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
1203 cipher_alg);
1204 if (err)
1205 goto err_free_inst;
1206
1207 err = crypto_init_spawn(spawn, cipher_alg,
1208 skcipher_crypto_instance(inst),
1209 CRYPTO_ALG_TYPE_MASK);
1210 if (err)
1211 goto err_free_inst;
1212 inst->free = skcipher_free_instance_simple;
1213
1214 /* Default algorithm properties, can be overridden */
1215 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
1216 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
1217 inst->alg.base.cra_priority = cipher_alg->cra_priority;
1218 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
1219 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
1220 inst->alg.ivsize = cipher_alg->cra_blocksize;
1221
1222 /* Use skcipher_ctx_simple by default, can be overridden */
1223 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
1224 inst->alg.setkey = skcipher_setkey_simple;
1225 inst->alg.init = skcipher_init_tfm_simple;
1226 inst->alg.exit = skcipher_exit_tfm_simple;
1227
1228 *cipher_alg_ret = cipher_alg;
1229 return inst;
1230
1231err_free_inst:
1232 kfree(inst);
1233err_put_cipher_alg:
1234 crypto_mod_put(cipher_alg);
1235 return ERR_PTR(err);
1236}
1237EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
1238
1239MODULE_LICENSE("GPL");
1240MODULE_DESCRIPTION("Symmetric key cipher type");