Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Block chaining cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
8 *
9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 */
11
12#include <crypto/aead.h>
13#include <crypto/internal/skcipher.h>
14#include <crypto/scatterwalk.h>
15#include <linux/errno.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/slab.h>
20#include <linux/string.h>
21#include <linux/cryptouser.h>
22#include <linux/compiler.h>
23#include <net/netlink.h>
24
25#include "internal.h"
26
27enum {
28 BLKCIPHER_WALK_PHYS = 1 << 0,
29 BLKCIPHER_WALK_SLOW = 1 << 1,
30 BLKCIPHER_WALK_COPY = 1 << 2,
31 BLKCIPHER_WALK_DIFF = 1 << 3,
32};
33
34static int blkcipher_walk_next(struct blkcipher_desc *desc,
35 struct blkcipher_walk *walk);
36static int blkcipher_walk_first(struct blkcipher_desc *desc,
37 struct blkcipher_walk *walk);
38
39static inline void blkcipher_map_src(struct blkcipher_walk *walk)
40{
41 walk->src.virt.addr = scatterwalk_map(&walk->in);
42}
43
44static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
45{
46 walk->dst.virt.addr = scatterwalk_map(&walk->out);
47}
48
49static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
50{
51 scatterwalk_unmap(walk->src.virt.addr);
52}
53
54static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
55{
56 scatterwalk_unmap(walk->dst.virt.addr);
57}
58
59/* Get a spot of the specified length that does not straddle a page.
60 * The caller needs to ensure that there is enough space for this operation.
61 */
62static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
63{
64 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
65 return max(start, end_page);
66}
67
68static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
69 unsigned int bsize)
70{
71 u8 *addr;
72
73 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
74 addr = blkcipher_get_spot(addr, bsize);
75 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
76}
77
78static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
79 unsigned int n)
80{
81 if (walk->flags & BLKCIPHER_WALK_COPY) {
82 blkcipher_map_dst(walk);
83 memcpy(walk->dst.virt.addr, walk->page, n);
84 blkcipher_unmap_dst(walk);
85 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
86 if (walk->flags & BLKCIPHER_WALK_DIFF)
87 blkcipher_unmap_dst(walk);
88 blkcipher_unmap_src(walk);
89 }
90
91 scatterwalk_advance(&walk->in, n);
92 scatterwalk_advance(&walk->out, n);
93}
94
95int blkcipher_walk_done(struct blkcipher_desc *desc,
96 struct blkcipher_walk *walk, int err)
97{
98 unsigned int n; /* bytes processed */
99 bool more;
100
101 if (unlikely(err < 0))
102 goto finish;
103
104 n = walk->nbytes - err;
105 walk->total -= n;
106 more = (walk->total != 0);
107
108 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
109 blkcipher_done_fast(walk, n);
110 } else {
111 if (WARN_ON(err)) {
112 /* unexpected case; didn't process all bytes */
113 err = -EINVAL;
114 goto finish;
115 }
116 blkcipher_done_slow(walk, n);
117 }
118
119 scatterwalk_done(&walk->in, 0, more);
120 scatterwalk_done(&walk->out, 1, more);
121
122 if (more) {
123 crypto_yield(desc->flags);
124 return blkcipher_walk_next(desc, walk);
125 }
126 err = 0;
127finish:
128 walk->nbytes = 0;
129 if (walk->iv != desc->info)
130 memcpy(desc->info, walk->iv, walk->ivsize);
131 if (walk->buffer != walk->page)
132 kfree(walk->buffer);
133 if (walk->page)
134 free_page((unsigned long)walk->page);
135 return err;
136}
137EXPORT_SYMBOL_GPL(blkcipher_walk_done);
138
139static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
140 struct blkcipher_walk *walk,
141 unsigned int bsize,
142 unsigned int alignmask)
143{
144 unsigned int n;
145 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
146
147 if (walk->buffer)
148 goto ok;
149
150 walk->buffer = walk->page;
151 if (walk->buffer)
152 goto ok;
153
154 n = aligned_bsize * 3 - (alignmask + 1) +
155 (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
156 walk->buffer = kmalloc(n, GFP_ATOMIC);
157 if (!walk->buffer)
158 return blkcipher_walk_done(desc, walk, -ENOMEM);
159
160ok:
161 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
162 alignmask + 1);
163 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
164 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
165 aligned_bsize, bsize);
166
167 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
168
169 walk->nbytes = bsize;
170 walk->flags |= BLKCIPHER_WALK_SLOW;
171
172 return 0;
173}
174
175static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
176{
177 u8 *tmp = walk->page;
178
179 blkcipher_map_src(walk);
180 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
181 blkcipher_unmap_src(walk);
182
183 walk->src.virt.addr = tmp;
184 walk->dst.virt.addr = tmp;
185
186 return 0;
187}
188
189static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
190 struct blkcipher_walk *walk)
191{
192 unsigned long diff;
193
194 walk->src.phys.page = scatterwalk_page(&walk->in);
195 walk->src.phys.offset = offset_in_page(walk->in.offset);
196 walk->dst.phys.page = scatterwalk_page(&walk->out);
197 walk->dst.phys.offset = offset_in_page(walk->out.offset);
198
199 if (walk->flags & BLKCIPHER_WALK_PHYS)
200 return 0;
201
202 diff = walk->src.phys.offset - walk->dst.phys.offset;
203 diff |= walk->src.virt.page - walk->dst.virt.page;
204
205 blkcipher_map_src(walk);
206 walk->dst.virt.addr = walk->src.virt.addr;
207
208 if (diff) {
209 walk->flags |= BLKCIPHER_WALK_DIFF;
210 blkcipher_map_dst(walk);
211 }
212
213 return 0;
214}
215
216static int blkcipher_walk_next(struct blkcipher_desc *desc,
217 struct blkcipher_walk *walk)
218{
219 unsigned int bsize;
220 unsigned int n;
221 int err;
222
223 n = walk->total;
224 if (unlikely(n < walk->cipher_blocksize)) {
225 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
226 return blkcipher_walk_done(desc, walk, -EINVAL);
227 }
228
229 bsize = min(walk->walk_blocksize, n);
230
231 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
232 BLKCIPHER_WALK_DIFF);
233 if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
234 !scatterwalk_aligned(&walk->out, walk->alignmask)) {
235 walk->flags |= BLKCIPHER_WALK_COPY;
236 if (!walk->page) {
237 walk->page = (void *)__get_free_page(GFP_ATOMIC);
238 if (!walk->page)
239 n = 0;
240 }
241 }
242
243 n = scatterwalk_clamp(&walk->in, n);
244 n = scatterwalk_clamp(&walk->out, n);
245
246 if (unlikely(n < bsize)) {
247 err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
248 goto set_phys_lowmem;
249 }
250
251 walk->nbytes = n;
252 if (walk->flags & BLKCIPHER_WALK_COPY) {
253 err = blkcipher_next_copy(walk);
254 goto set_phys_lowmem;
255 }
256
257 return blkcipher_next_fast(desc, walk);
258
259set_phys_lowmem:
260 if (walk->flags & BLKCIPHER_WALK_PHYS) {
261 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
262 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
263 walk->src.phys.offset &= PAGE_SIZE - 1;
264 walk->dst.phys.offset &= PAGE_SIZE - 1;
265 }
266 return err;
267}
268
269static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
270{
271 unsigned bs = walk->walk_blocksize;
272 unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
273 unsigned int size = aligned_bs * 2 +
274 walk->ivsize + max(aligned_bs, walk->ivsize) -
275 (walk->alignmask + 1);
276 u8 *iv;
277
278 size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
279 walk->buffer = kmalloc(size, GFP_ATOMIC);
280 if (!walk->buffer)
281 return -ENOMEM;
282
283 iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
284 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
285 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
286 iv = blkcipher_get_spot(iv, walk->ivsize);
287
288 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
289 return 0;
290}
291
292int blkcipher_walk_virt(struct blkcipher_desc *desc,
293 struct blkcipher_walk *walk)
294{
295 walk->flags &= ~BLKCIPHER_WALK_PHYS;
296 walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
297 walk->cipher_blocksize = walk->walk_blocksize;
298 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
299 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
300 return blkcipher_walk_first(desc, walk);
301}
302EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
303
304int blkcipher_walk_phys(struct blkcipher_desc *desc,
305 struct blkcipher_walk *walk)
306{
307 walk->flags |= BLKCIPHER_WALK_PHYS;
308 walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
309 walk->cipher_blocksize = walk->walk_blocksize;
310 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
311 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
312 return blkcipher_walk_first(desc, walk);
313}
314EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
315
316static int blkcipher_walk_first(struct blkcipher_desc *desc,
317 struct blkcipher_walk *walk)
318{
319 if (WARN_ON_ONCE(in_irq()))
320 return -EDEADLK;
321
322 walk->iv = desc->info;
323 walk->nbytes = walk->total;
324 if (unlikely(!walk->total))
325 return 0;
326
327 walk->buffer = NULL;
328 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
329 int err = blkcipher_copy_iv(walk);
330 if (err)
331 return err;
332 }
333
334 scatterwalk_start(&walk->in, walk->in.sg);
335 scatterwalk_start(&walk->out, walk->out.sg);
336 walk->page = NULL;
337
338 return blkcipher_walk_next(desc, walk);
339}
340
341int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
342 struct blkcipher_walk *walk,
343 unsigned int blocksize)
344{
345 walk->flags &= ~BLKCIPHER_WALK_PHYS;
346 walk->walk_blocksize = blocksize;
347 walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
348 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
349 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
350 return blkcipher_walk_first(desc, walk);
351}
352EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
353
354int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
355 struct blkcipher_walk *walk,
356 struct crypto_aead *tfm,
357 unsigned int blocksize)
358{
359 walk->flags &= ~BLKCIPHER_WALK_PHYS;
360 walk->walk_blocksize = blocksize;
361 walk->cipher_blocksize = crypto_aead_blocksize(tfm);
362 walk->ivsize = crypto_aead_ivsize(tfm);
363 walk->alignmask = crypto_aead_alignmask(tfm);
364 return blkcipher_walk_first(desc, walk);
365}
366EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
367
368static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
369 unsigned int keylen)
370{
371 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
372 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
373 int ret;
374 u8 *buffer, *alignbuffer;
375 unsigned long absize;
376
377 absize = keylen + alignmask;
378 buffer = kmalloc(absize, GFP_ATOMIC);
379 if (!buffer)
380 return -ENOMEM;
381
382 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
383 memcpy(alignbuffer, key, keylen);
384 ret = cipher->setkey(tfm, alignbuffer, keylen);
385 memset(alignbuffer, 0, keylen);
386 kfree(buffer);
387 return ret;
388}
389
390static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
391{
392 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
393 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
394
395 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
396 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
397 return -EINVAL;
398 }
399
400 if ((unsigned long)key & alignmask)
401 return setkey_unaligned(tfm, key, keylen);
402
403 return cipher->setkey(tfm, key, keylen);
404}
405
406static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
407 unsigned int keylen)
408{
409 return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
410}
411
412static int async_encrypt(struct ablkcipher_request *req)
413{
414 struct crypto_tfm *tfm = req->base.tfm;
415 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
416 struct blkcipher_desc desc = {
417 .tfm = __crypto_blkcipher_cast(tfm),
418 .info = req->info,
419 .flags = req->base.flags,
420 };
421
422
423 return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
424}
425
426static int async_decrypt(struct ablkcipher_request *req)
427{
428 struct crypto_tfm *tfm = req->base.tfm;
429 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
430 struct blkcipher_desc desc = {
431 .tfm = __crypto_blkcipher_cast(tfm),
432 .info = req->info,
433 .flags = req->base.flags,
434 };
435
436 return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
437}
438
439static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
440 u32 mask)
441{
442 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
443 unsigned int len = alg->cra_ctxsize;
444
445 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
446 cipher->ivsize) {
447 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
448 len += cipher->ivsize;
449 }
450
451 return len;
452}
453
454static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
455{
456 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
457 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
458
459 crt->setkey = async_setkey;
460 crt->encrypt = async_encrypt;
461 crt->decrypt = async_decrypt;
462 crt->base = __crypto_ablkcipher_cast(tfm);
463 crt->ivsize = alg->ivsize;
464
465 return 0;
466}
467
468static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
469{
470 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
471 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
472 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
473 unsigned long addr;
474
475 crt->setkey = setkey;
476 crt->encrypt = alg->encrypt;
477 crt->decrypt = alg->decrypt;
478
479 addr = (unsigned long)crypto_tfm_ctx(tfm);
480 addr = ALIGN(addr, align);
481 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
482 crt->iv = (void *)addr;
483
484 return 0;
485}
486
487static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
488{
489 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
490
491 if (alg->ivsize > PAGE_SIZE / 8)
492 return -EINVAL;
493
494 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
495 return crypto_init_blkcipher_ops_sync(tfm);
496 else
497 return crypto_init_blkcipher_ops_async(tfm);
498}
499
500#ifdef CONFIG_NET
501static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
502{
503 struct crypto_report_blkcipher rblkcipher;
504
505 memset(&rblkcipher, 0, sizeof(rblkcipher));
506
507 strscpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
508 strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
509
510 rblkcipher.blocksize = alg->cra_blocksize;
511 rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
512 rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
513 rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
514
515 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
516 sizeof(rblkcipher), &rblkcipher);
517}
518#else
519static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
520{
521 return -ENOSYS;
522}
523#endif
524
525static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
526 __maybe_unused;
527static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
528{
529 seq_printf(m, "type : blkcipher\n");
530 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
531 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
532 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
533 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
534 seq_printf(m, "geniv : <default>\n");
535}
536
537const struct crypto_type crypto_blkcipher_type = {
538 .ctxsize = crypto_blkcipher_ctxsize,
539 .init = crypto_init_blkcipher_ops,
540#ifdef CONFIG_PROC_FS
541 .show = crypto_blkcipher_show,
542#endif
543 .report = crypto_blkcipher_report,
544};
545EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
546
547MODULE_LICENSE("GPL");
548MODULE_DESCRIPTION("Generic block chaining cipher type");
1/*
2 * Block chaining cipher operations.
3 *
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
7 *
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16
17#include <crypto/internal/skcipher.h>
18#include <crypto/scatterwalk.h>
19#include <linux/errno.h>
20#include <linux/hardirq.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/scatterlist.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/string.h>
27
28#include "internal.h"
29
30enum {
31 BLKCIPHER_WALK_PHYS = 1 << 0,
32 BLKCIPHER_WALK_SLOW = 1 << 1,
33 BLKCIPHER_WALK_COPY = 1 << 2,
34 BLKCIPHER_WALK_DIFF = 1 << 3,
35};
36
37static int blkcipher_walk_next(struct blkcipher_desc *desc,
38 struct blkcipher_walk *walk);
39static int blkcipher_walk_first(struct blkcipher_desc *desc,
40 struct blkcipher_walk *walk);
41
42static inline void blkcipher_map_src(struct blkcipher_walk *walk)
43{
44 walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
45}
46
47static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
48{
49 walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
50}
51
52static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
53{
54 scatterwalk_unmap(walk->src.virt.addr, 0);
55}
56
57static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
58{
59 scatterwalk_unmap(walk->dst.virt.addr, 1);
60}
61
62/* Get a spot of the specified length that does not straddle a page.
63 * The caller needs to ensure that there is enough space for this operation.
64 */
65static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
66{
67 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
68 return max(start, end_page);
69}
70
71static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
72 struct blkcipher_walk *walk,
73 unsigned int bsize)
74{
75 u8 *addr;
76 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
77
78 addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
79 addr = blkcipher_get_spot(addr, bsize);
80 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
81 return bsize;
82}
83
84static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
85 unsigned int n)
86{
87 if (walk->flags & BLKCIPHER_WALK_COPY) {
88 blkcipher_map_dst(walk);
89 memcpy(walk->dst.virt.addr, walk->page, n);
90 blkcipher_unmap_dst(walk);
91 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
92 if (walk->flags & BLKCIPHER_WALK_DIFF)
93 blkcipher_unmap_dst(walk);
94 blkcipher_unmap_src(walk);
95 }
96
97 scatterwalk_advance(&walk->in, n);
98 scatterwalk_advance(&walk->out, n);
99
100 return n;
101}
102
103int blkcipher_walk_done(struct blkcipher_desc *desc,
104 struct blkcipher_walk *walk, int err)
105{
106 struct crypto_blkcipher *tfm = desc->tfm;
107 unsigned int nbytes = 0;
108
109 if (likely(err >= 0)) {
110 unsigned int n = walk->nbytes - err;
111
112 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
113 n = blkcipher_done_fast(walk, n);
114 else if (WARN_ON(err)) {
115 err = -EINVAL;
116 goto err;
117 } else
118 n = blkcipher_done_slow(tfm, walk, n);
119
120 nbytes = walk->total - n;
121 err = 0;
122 }
123
124 scatterwalk_done(&walk->in, 0, nbytes);
125 scatterwalk_done(&walk->out, 1, nbytes);
126
127err:
128 walk->total = nbytes;
129 walk->nbytes = nbytes;
130
131 if (nbytes) {
132 crypto_yield(desc->flags);
133 return blkcipher_walk_next(desc, walk);
134 }
135
136 if (walk->iv != desc->info)
137 memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
138 if (walk->buffer != walk->page)
139 kfree(walk->buffer);
140 if (walk->page)
141 free_page((unsigned long)walk->page);
142
143 return err;
144}
145EXPORT_SYMBOL_GPL(blkcipher_walk_done);
146
147static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
148 struct blkcipher_walk *walk,
149 unsigned int bsize,
150 unsigned int alignmask)
151{
152 unsigned int n;
153 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
154
155 if (walk->buffer)
156 goto ok;
157
158 walk->buffer = walk->page;
159 if (walk->buffer)
160 goto ok;
161
162 n = aligned_bsize * 3 - (alignmask + 1) +
163 (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
164 walk->buffer = kmalloc(n, GFP_ATOMIC);
165 if (!walk->buffer)
166 return blkcipher_walk_done(desc, walk, -ENOMEM);
167
168ok:
169 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
170 alignmask + 1);
171 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
172 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
173 aligned_bsize, bsize);
174
175 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
176
177 walk->nbytes = bsize;
178 walk->flags |= BLKCIPHER_WALK_SLOW;
179
180 return 0;
181}
182
183static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
184{
185 u8 *tmp = walk->page;
186
187 blkcipher_map_src(walk);
188 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
189 blkcipher_unmap_src(walk);
190
191 walk->src.virt.addr = tmp;
192 walk->dst.virt.addr = tmp;
193
194 return 0;
195}
196
197static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
198 struct blkcipher_walk *walk)
199{
200 unsigned long diff;
201
202 walk->src.phys.page = scatterwalk_page(&walk->in);
203 walk->src.phys.offset = offset_in_page(walk->in.offset);
204 walk->dst.phys.page = scatterwalk_page(&walk->out);
205 walk->dst.phys.offset = offset_in_page(walk->out.offset);
206
207 if (walk->flags & BLKCIPHER_WALK_PHYS)
208 return 0;
209
210 diff = walk->src.phys.offset - walk->dst.phys.offset;
211 diff |= walk->src.virt.page - walk->dst.virt.page;
212
213 blkcipher_map_src(walk);
214 walk->dst.virt.addr = walk->src.virt.addr;
215
216 if (diff) {
217 walk->flags |= BLKCIPHER_WALK_DIFF;
218 blkcipher_map_dst(walk);
219 }
220
221 return 0;
222}
223
224static int blkcipher_walk_next(struct blkcipher_desc *desc,
225 struct blkcipher_walk *walk)
226{
227 struct crypto_blkcipher *tfm = desc->tfm;
228 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
229 unsigned int bsize;
230 unsigned int n;
231 int err;
232
233 n = walk->total;
234 if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
235 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
236 return blkcipher_walk_done(desc, walk, -EINVAL);
237 }
238
239 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
240 BLKCIPHER_WALK_DIFF);
241 if (!scatterwalk_aligned(&walk->in, alignmask) ||
242 !scatterwalk_aligned(&walk->out, alignmask)) {
243 walk->flags |= BLKCIPHER_WALK_COPY;
244 if (!walk->page) {
245 walk->page = (void *)__get_free_page(GFP_ATOMIC);
246 if (!walk->page)
247 n = 0;
248 }
249 }
250
251 bsize = min(walk->blocksize, n);
252 n = scatterwalk_clamp(&walk->in, n);
253 n = scatterwalk_clamp(&walk->out, n);
254
255 if (unlikely(n < bsize)) {
256 err = blkcipher_next_slow(desc, walk, bsize, alignmask);
257 goto set_phys_lowmem;
258 }
259
260 walk->nbytes = n;
261 if (walk->flags & BLKCIPHER_WALK_COPY) {
262 err = blkcipher_next_copy(walk);
263 goto set_phys_lowmem;
264 }
265
266 return blkcipher_next_fast(desc, walk);
267
268set_phys_lowmem:
269 if (walk->flags & BLKCIPHER_WALK_PHYS) {
270 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
271 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
272 walk->src.phys.offset &= PAGE_SIZE - 1;
273 walk->dst.phys.offset &= PAGE_SIZE - 1;
274 }
275 return err;
276}
277
278static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
279 struct crypto_blkcipher *tfm,
280 unsigned int alignmask)
281{
282 unsigned bs = walk->blocksize;
283 unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
284 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
285 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
286 (alignmask + 1);
287 u8 *iv;
288
289 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
290 walk->buffer = kmalloc(size, GFP_ATOMIC);
291 if (!walk->buffer)
292 return -ENOMEM;
293
294 iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
295 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
296 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
297 iv = blkcipher_get_spot(iv, ivsize);
298
299 walk->iv = memcpy(iv, walk->iv, ivsize);
300 return 0;
301}
302
303int blkcipher_walk_virt(struct blkcipher_desc *desc,
304 struct blkcipher_walk *walk)
305{
306 walk->flags &= ~BLKCIPHER_WALK_PHYS;
307 walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
308 return blkcipher_walk_first(desc, walk);
309}
310EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
311
312int blkcipher_walk_phys(struct blkcipher_desc *desc,
313 struct blkcipher_walk *walk)
314{
315 walk->flags |= BLKCIPHER_WALK_PHYS;
316 walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
317 return blkcipher_walk_first(desc, walk);
318}
319EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
320
321static int blkcipher_walk_first(struct blkcipher_desc *desc,
322 struct blkcipher_walk *walk)
323{
324 struct crypto_blkcipher *tfm = desc->tfm;
325 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
326
327 if (WARN_ON_ONCE(in_irq()))
328 return -EDEADLK;
329
330 walk->nbytes = walk->total;
331 if (unlikely(!walk->total))
332 return 0;
333
334 walk->buffer = NULL;
335 walk->iv = desc->info;
336 if (unlikely(((unsigned long)walk->iv & alignmask))) {
337 int err = blkcipher_copy_iv(walk, tfm, alignmask);
338 if (err)
339 return err;
340 }
341
342 scatterwalk_start(&walk->in, walk->in.sg);
343 scatterwalk_start(&walk->out, walk->out.sg);
344 walk->page = NULL;
345
346 return blkcipher_walk_next(desc, walk);
347}
348
349int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
350 struct blkcipher_walk *walk,
351 unsigned int blocksize)
352{
353 walk->flags &= ~BLKCIPHER_WALK_PHYS;
354 walk->blocksize = blocksize;
355 return blkcipher_walk_first(desc, walk);
356}
357EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
358
359static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
360 unsigned int keylen)
361{
362 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
363 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
364 int ret;
365 u8 *buffer, *alignbuffer;
366 unsigned long absize;
367
368 absize = keylen + alignmask;
369 buffer = kmalloc(absize, GFP_ATOMIC);
370 if (!buffer)
371 return -ENOMEM;
372
373 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
374 memcpy(alignbuffer, key, keylen);
375 ret = cipher->setkey(tfm, alignbuffer, keylen);
376 memset(alignbuffer, 0, keylen);
377 kfree(buffer);
378 return ret;
379}
380
381static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
382{
383 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
384 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
385
386 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
387 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
388 return -EINVAL;
389 }
390
391 if ((unsigned long)key & alignmask)
392 return setkey_unaligned(tfm, key, keylen);
393
394 return cipher->setkey(tfm, key, keylen);
395}
396
397static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
398 unsigned int keylen)
399{
400 return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
401}
402
403static int async_encrypt(struct ablkcipher_request *req)
404{
405 struct crypto_tfm *tfm = req->base.tfm;
406 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
407 struct blkcipher_desc desc = {
408 .tfm = __crypto_blkcipher_cast(tfm),
409 .info = req->info,
410 .flags = req->base.flags,
411 };
412
413
414 return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
415}
416
417static int async_decrypt(struct ablkcipher_request *req)
418{
419 struct crypto_tfm *tfm = req->base.tfm;
420 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
421 struct blkcipher_desc desc = {
422 .tfm = __crypto_blkcipher_cast(tfm),
423 .info = req->info,
424 .flags = req->base.flags,
425 };
426
427 return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
428}
429
430static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
431 u32 mask)
432{
433 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
434 unsigned int len = alg->cra_ctxsize;
435
436 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
437 cipher->ivsize) {
438 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
439 len += cipher->ivsize;
440 }
441
442 return len;
443}
444
445static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
446{
447 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
448 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
449
450 crt->setkey = async_setkey;
451 crt->encrypt = async_encrypt;
452 crt->decrypt = async_decrypt;
453 if (!alg->ivsize) {
454 crt->givencrypt = skcipher_null_givencrypt;
455 crt->givdecrypt = skcipher_null_givdecrypt;
456 }
457 crt->base = __crypto_ablkcipher_cast(tfm);
458 crt->ivsize = alg->ivsize;
459
460 return 0;
461}
462
463static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
464{
465 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
466 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
467 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
468 unsigned long addr;
469
470 crt->setkey = setkey;
471 crt->encrypt = alg->encrypt;
472 crt->decrypt = alg->decrypt;
473
474 addr = (unsigned long)crypto_tfm_ctx(tfm);
475 addr = ALIGN(addr, align);
476 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
477 crt->iv = (void *)addr;
478
479 return 0;
480}
481
482static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
483{
484 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
485
486 if (alg->ivsize > PAGE_SIZE / 8)
487 return -EINVAL;
488
489 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
490 return crypto_init_blkcipher_ops_sync(tfm);
491 else
492 return crypto_init_blkcipher_ops_async(tfm);
493}
494
495static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
496 __attribute__ ((unused));
497static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
498{
499 seq_printf(m, "type : blkcipher\n");
500 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
501 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
502 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
503 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
504 seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
505 "<default>");
506}
507
508const struct crypto_type crypto_blkcipher_type = {
509 .ctxsize = crypto_blkcipher_ctxsize,
510 .init = crypto_init_blkcipher_ops,
511#ifdef CONFIG_PROC_FS
512 .show = crypto_blkcipher_show,
513#endif
514};
515EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
516
517static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
518 const char *name, u32 type, u32 mask)
519{
520 struct crypto_alg *alg;
521 int err;
522
523 type = crypto_skcipher_type(type);
524 mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
525
526 alg = crypto_alg_mod_lookup(name, type, mask);
527 if (IS_ERR(alg))
528 return PTR_ERR(alg);
529
530 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
531 crypto_mod_put(alg);
532 return err;
533}
534
535struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
536 struct rtattr **tb, u32 type,
537 u32 mask)
538{
539 struct {
540 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
541 unsigned int keylen);
542 int (*encrypt)(struct ablkcipher_request *req);
543 int (*decrypt)(struct ablkcipher_request *req);
544
545 unsigned int min_keysize;
546 unsigned int max_keysize;
547 unsigned int ivsize;
548
549 const char *geniv;
550 } balg;
551 const char *name;
552 struct crypto_skcipher_spawn *spawn;
553 struct crypto_attr_type *algt;
554 struct crypto_instance *inst;
555 struct crypto_alg *alg;
556 int err;
557
558 algt = crypto_get_attr_type(tb);
559 err = PTR_ERR(algt);
560 if (IS_ERR(algt))
561 return ERR_PTR(err);
562
563 if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
564 algt->mask)
565 return ERR_PTR(-EINVAL);
566
567 name = crypto_attr_alg_name(tb[1]);
568 err = PTR_ERR(name);
569 if (IS_ERR(name))
570 return ERR_PTR(err);
571
572 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
573 if (!inst)
574 return ERR_PTR(-ENOMEM);
575
576 spawn = crypto_instance_ctx(inst);
577
578 /* Ignore async algorithms if necessary. */
579 mask |= crypto_requires_sync(algt->type, algt->mask);
580
581 crypto_set_skcipher_spawn(spawn, inst);
582 err = crypto_grab_nivcipher(spawn, name, type, mask);
583 if (err)
584 goto err_free_inst;
585
586 alg = crypto_skcipher_spawn_alg(spawn);
587
588 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
589 CRYPTO_ALG_TYPE_BLKCIPHER) {
590 balg.ivsize = alg->cra_blkcipher.ivsize;
591 balg.min_keysize = alg->cra_blkcipher.min_keysize;
592 balg.max_keysize = alg->cra_blkcipher.max_keysize;
593
594 balg.setkey = async_setkey;
595 balg.encrypt = async_encrypt;
596 balg.decrypt = async_decrypt;
597
598 balg.geniv = alg->cra_blkcipher.geniv;
599 } else {
600 balg.ivsize = alg->cra_ablkcipher.ivsize;
601 balg.min_keysize = alg->cra_ablkcipher.min_keysize;
602 balg.max_keysize = alg->cra_ablkcipher.max_keysize;
603
604 balg.setkey = alg->cra_ablkcipher.setkey;
605 balg.encrypt = alg->cra_ablkcipher.encrypt;
606 balg.decrypt = alg->cra_ablkcipher.decrypt;
607
608 balg.geniv = alg->cra_ablkcipher.geniv;
609 }
610
611 err = -EINVAL;
612 if (!balg.ivsize)
613 goto err_drop_alg;
614
615 /*
616 * This is only true if we're constructing an algorithm with its
617 * default IV generator. For the default generator we elide the
618 * template name and double-check the IV generator.
619 */
620 if (algt->mask & CRYPTO_ALG_GENIV) {
621 if (!balg.geniv)
622 balg.geniv = crypto_default_geniv(alg);
623 err = -EAGAIN;
624 if (strcmp(tmpl->name, balg.geniv))
625 goto err_drop_alg;
626
627 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
628 memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
629 CRYPTO_MAX_ALG_NAME);
630 } else {
631 err = -ENAMETOOLONG;
632 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
633 "%s(%s)", tmpl->name, alg->cra_name) >=
634 CRYPTO_MAX_ALG_NAME)
635 goto err_drop_alg;
636 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
637 "%s(%s)", tmpl->name, alg->cra_driver_name) >=
638 CRYPTO_MAX_ALG_NAME)
639 goto err_drop_alg;
640 }
641
642 inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
643 inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
644 inst->alg.cra_priority = alg->cra_priority;
645 inst->alg.cra_blocksize = alg->cra_blocksize;
646 inst->alg.cra_alignmask = alg->cra_alignmask;
647 inst->alg.cra_type = &crypto_givcipher_type;
648
649 inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
650 inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
651 inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
652 inst->alg.cra_ablkcipher.geniv = balg.geniv;
653
654 inst->alg.cra_ablkcipher.setkey = balg.setkey;
655 inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
656 inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
657
658out:
659 return inst;
660
661err_drop_alg:
662 crypto_drop_skcipher(spawn);
663err_free_inst:
664 kfree(inst);
665 inst = ERR_PTR(err);
666 goto out;
667}
668EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
669
670void skcipher_geniv_free(struct crypto_instance *inst)
671{
672 crypto_drop_skcipher(crypto_instance_ctx(inst));
673 kfree(inst);
674}
675EXPORT_SYMBOL_GPL(skcipher_geniv_free);
676
677int skcipher_geniv_init(struct crypto_tfm *tfm)
678{
679 struct crypto_instance *inst = (void *)tfm->__crt_alg;
680 struct crypto_ablkcipher *cipher;
681
682 cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
683 if (IS_ERR(cipher))
684 return PTR_ERR(cipher);
685
686 tfm->crt_ablkcipher.base = cipher;
687 tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
688
689 return 0;
690}
691EXPORT_SYMBOL_GPL(skcipher_geniv_init);
692
693void skcipher_geniv_exit(struct crypto_tfm *tfm)
694{
695 crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
696}
697EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
698
699MODULE_LICENSE("GPL");
700MODULE_DESCRIPTION("Generic block chaining cipher type");