Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Block chaining cipher operations.
  4 *
  5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  6 * multiple page boundaries by using temporary blocks.  In user context,
  7 * the kernel is given a chance to schedule us once per page.
  8 *
  9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
 
 10 */
 11
 12#include <crypto/aead.h>
 13#include <crypto/internal/skcipher.h>
 14#include <crypto/scatterwalk.h>
 15#include <linux/errno.h>
 16#include <linux/kernel.h>
 17#include <linux/module.h>
 18#include <linux/seq_file.h>
 19#include <linux/slab.h>
 20#include <linux/string.h>
 21#include <linux/cryptouser.h>
 22#include <linux/compiler.h>
 23#include <net/netlink.h>
 24
 25#include "internal.h"
 26
 27enum {
 28	BLKCIPHER_WALK_PHYS = 1 << 0,
 29	BLKCIPHER_WALK_SLOW = 1 << 1,
 30	BLKCIPHER_WALK_COPY = 1 << 2,
 31	BLKCIPHER_WALK_DIFF = 1 << 3,
 32};
 33
 34static int blkcipher_walk_next(struct blkcipher_desc *desc,
 35			       struct blkcipher_walk *walk);
 36static int blkcipher_walk_first(struct blkcipher_desc *desc,
 37				struct blkcipher_walk *walk);
 38
 39static inline void blkcipher_map_src(struct blkcipher_walk *walk)
 40{
 41	walk->src.virt.addr = scatterwalk_map(&walk->in);
 42}
 43
 44static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
 45{
 46	walk->dst.virt.addr = scatterwalk_map(&walk->out);
 47}
 48
 49static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
 50{
 51	scatterwalk_unmap(walk->src.virt.addr);
 52}
 53
 54static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
 55{
 56	scatterwalk_unmap(walk->dst.virt.addr);
 57}
 58
 59/* Get a spot of the specified length that does not straddle a page.
 60 * The caller needs to ensure that there is enough space for this operation.
 61 */
 62static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
 63{
 64	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 65	return max(start, end_page);
 66}
 67
 68static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
 69				       unsigned int bsize)
 70{
 71	u8 *addr;
 72
 73	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 74	addr = blkcipher_get_spot(addr, bsize);
 75	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
 
 76}
 77
 78static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
 79				       unsigned int n)
 80{
 81	if (walk->flags & BLKCIPHER_WALK_COPY) {
 82		blkcipher_map_dst(walk);
 83		memcpy(walk->dst.virt.addr, walk->page, n);
 84		blkcipher_unmap_dst(walk);
 85	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
 86		if (walk->flags & BLKCIPHER_WALK_DIFF)
 87			blkcipher_unmap_dst(walk);
 88		blkcipher_unmap_src(walk);
 89	}
 90
 91	scatterwalk_advance(&walk->in, n);
 92	scatterwalk_advance(&walk->out, n);
 
 
 93}
 94
 95int blkcipher_walk_done(struct blkcipher_desc *desc,
 96			struct blkcipher_walk *walk, int err)
 97{
 98	unsigned int n; /* bytes processed */
 99	bool more;
100
101	if (unlikely(err < 0))
102		goto finish;
103
104	n = walk->nbytes - err;
105	walk->total -= n;
106	more = (walk->total != 0);
107
108	if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
109		blkcipher_done_fast(walk, n);
110	} else {
111		if (WARN_ON(err)) {
112			/* unexpected case; didn't process all bytes */
113			err = -EINVAL;
114			goto finish;
115		}
116		blkcipher_done_slow(walk, n);
 
 
 
117	}
118
119	scatterwalk_done(&walk->in, 0, more);
120	scatterwalk_done(&walk->out, 1, more);
 
 
 
 
121
122	if (more) {
123		crypto_yield(desc->flags);
124		return blkcipher_walk_next(desc, walk);
125	}
126	err = 0;
127finish:
128	walk->nbytes = 0;
129	if (walk->iv != desc->info)
130		memcpy(desc->info, walk->iv, walk->ivsize);
131	if (walk->buffer != walk->page)
132		kfree(walk->buffer);
133	if (walk->page)
134		free_page((unsigned long)walk->page);
 
135	return err;
136}
137EXPORT_SYMBOL_GPL(blkcipher_walk_done);
138
139static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
140				      struct blkcipher_walk *walk,
141				      unsigned int bsize,
142				      unsigned int alignmask)
143{
144	unsigned int n;
145	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
146
147	if (walk->buffer)
148		goto ok;
149
150	walk->buffer = walk->page;
151	if (walk->buffer)
152		goto ok;
153
154	n = aligned_bsize * 3 - (alignmask + 1) +
155	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
156	walk->buffer = kmalloc(n, GFP_ATOMIC);
157	if (!walk->buffer)
158		return blkcipher_walk_done(desc, walk, -ENOMEM);
159
160ok:
161	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
162					  alignmask + 1);
163	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
164	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
165						 aligned_bsize, bsize);
166
167	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
168
169	walk->nbytes = bsize;
170	walk->flags |= BLKCIPHER_WALK_SLOW;
171
172	return 0;
173}
174
175static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
176{
177	u8 *tmp = walk->page;
178
179	blkcipher_map_src(walk);
180	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
181	blkcipher_unmap_src(walk);
182
183	walk->src.virt.addr = tmp;
184	walk->dst.virt.addr = tmp;
185
186	return 0;
187}
188
189static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
190				      struct blkcipher_walk *walk)
191{
192	unsigned long diff;
193
194	walk->src.phys.page = scatterwalk_page(&walk->in);
195	walk->src.phys.offset = offset_in_page(walk->in.offset);
196	walk->dst.phys.page = scatterwalk_page(&walk->out);
197	walk->dst.phys.offset = offset_in_page(walk->out.offset);
198
199	if (walk->flags & BLKCIPHER_WALK_PHYS)
200		return 0;
201
202	diff = walk->src.phys.offset - walk->dst.phys.offset;
203	diff |= walk->src.virt.page - walk->dst.virt.page;
204
205	blkcipher_map_src(walk);
206	walk->dst.virt.addr = walk->src.virt.addr;
207
208	if (diff) {
209		walk->flags |= BLKCIPHER_WALK_DIFF;
210		blkcipher_map_dst(walk);
211	}
212
213	return 0;
214}
215
216static int blkcipher_walk_next(struct blkcipher_desc *desc,
217			       struct blkcipher_walk *walk)
218{
219	unsigned int bsize;
220	unsigned int n;
221	int err;
222
223	n = walk->total;
224	if (unlikely(n < walk->cipher_blocksize)) {
225		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
226		return blkcipher_walk_done(desc, walk, -EINVAL);
227	}
228
229	bsize = min(walk->walk_blocksize, n);
230
231	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
232			 BLKCIPHER_WALK_DIFF);
233	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
234	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
235		walk->flags |= BLKCIPHER_WALK_COPY;
236		if (!walk->page) {
237			walk->page = (void *)__get_free_page(GFP_ATOMIC);
238			if (!walk->page)
239				n = 0;
240		}
241	}
242
243	n = scatterwalk_clamp(&walk->in, n);
244	n = scatterwalk_clamp(&walk->out, n);
245
246	if (unlikely(n < bsize)) {
247		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
248		goto set_phys_lowmem;
249	}
250
251	walk->nbytes = n;
252	if (walk->flags & BLKCIPHER_WALK_COPY) {
253		err = blkcipher_next_copy(walk);
254		goto set_phys_lowmem;
255	}
256
257	return blkcipher_next_fast(desc, walk);
258
259set_phys_lowmem:
260	if (walk->flags & BLKCIPHER_WALK_PHYS) {
261		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
262		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
263		walk->src.phys.offset &= PAGE_SIZE - 1;
264		walk->dst.phys.offset &= PAGE_SIZE - 1;
265	}
266	return err;
267}
268
269static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
270{
271	unsigned bs = walk->walk_blocksize;
272	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
273	unsigned int size = aligned_bs * 2 +
274			    walk->ivsize + max(aligned_bs, walk->ivsize) -
275			    (walk->alignmask + 1);
276	u8 *iv;
277
278	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
279	walk->buffer = kmalloc(size, GFP_ATOMIC);
280	if (!walk->buffer)
281		return -ENOMEM;
282
283	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
284	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
285	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
286	iv = blkcipher_get_spot(iv, walk->ivsize);
287
288	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
289	return 0;
290}
291
292int blkcipher_walk_virt(struct blkcipher_desc *desc,
293			struct blkcipher_walk *walk)
294{
295	walk->flags &= ~BLKCIPHER_WALK_PHYS;
296	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
297	walk->cipher_blocksize = walk->walk_blocksize;
298	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
299	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
300	return blkcipher_walk_first(desc, walk);
301}
302EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
303
304int blkcipher_walk_phys(struct blkcipher_desc *desc,
305			struct blkcipher_walk *walk)
306{
307	walk->flags |= BLKCIPHER_WALK_PHYS;
308	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
309	walk->cipher_blocksize = walk->walk_blocksize;
310	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
311	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
312	return blkcipher_walk_first(desc, walk);
313}
314EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
315
316static int blkcipher_walk_first(struct blkcipher_desc *desc,
317				struct blkcipher_walk *walk)
318{
319	if (WARN_ON_ONCE(in_irq()))
320		return -EDEADLK;
321
322	walk->iv = desc->info;
323	walk->nbytes = walk->total;
324	if (unlikely(!walk->total))
325		return 0;
326
327	walk->buffer = NULL;
328	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
329		int err = blkcipher_copy_iv(walk);
330		if (err)
331			return err;
332	}
333
334	scatterwalk_start(&walk->in, walk->in.sg);
335	scatterwalk_start(&walk->out, walk->out.sg);
336	walk->page = NULL;
337
338	return blkcipher_walk_next(desc, walk);
339}
340
341int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
342			      struct blkcipher_walk *walk,
343			      unsigned int blocksize)
344{
345	walk->flags &= ~BLKCIPHER_WALK_PHYS;
346	walk->walk_blocksize = blocksize;
347	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
348	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
349	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
350	return blkcipher_walk_first(desc, walk);
351}
352EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
353
354int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
355				   struct blkcipher_walk *walk,
356				   struct crypto_aead *tfm,
357				   unsigned int blocksize)
358{
359	walk->flags &= ~BLKCIPHER_WALK_PHYS;
360	walk->walk_blocksize = blocksize;
361	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
362	walk->ivsize = crypto_aead_ivsize(tfm);
363	walk->alignmask = crypto_aead_alignmask(tfm);
364	return blkcipher_walk_first(desc, walk);
365}
366EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
367
368static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
369			    unsigned int keylen)
370{
371	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
372	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
373	int ret;
374	u8 *buffer, *alignbuffer;
375	unsigned long absize;
376
377	absize = keylen + alignmask;
378	buffer = kmalloc(absize, GFP_ATOMIC);
379	if (!buffer)
380		return -ENOMEM;
381
382	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
383	memcpy(alignbuffer, key, keylen);
384	ret = cipher->setkey(tfm, alignbuffer, keylen);
385	memset(alignbuffer, 0, keylen);
386	kfree(buffer);
387	return ret;
388}
389
390static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
391{
392	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
393	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
394
395	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
396		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
397		return -EINVAL;
398	}
399
400	if ((unsigned long)key & alignmask)
401		return setkey_unaligned(tfm, key, keylen);
402
403	return cipher->setkey(tfm, key, keylen);
404}
405
406static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
407			unsigned int keylen)
408{
409	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
410}
411
412static int async_encrypt(struct ablkcipher_request *req)
413{
414	struct crypto_tfm *tfm = req->base.tfm;
415	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
416	struct blkcipher_desc desc = {
417		.tfm = __crypto_blkcipher_cast(tfm),
418		.info = req->info,
419		.flags = req->base.flags,
420	};
421
422
423	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
424}
425
426static int async_decrypt(struct ablkcipher_request *req)
427{
428	struct crypto_tfm *tfm = req->base.tfm;
429	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
430	struct blkcipher_desc desc = {
431		.tfm = __crypto_blkcipher_cast(tfm),
432		.info = req->info,
433		.flags = req->base.flags,
434	};
435
436	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
437}
438
439static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
440					     u32 mask)
441{
442	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
443	unsigned int len = alg->cra_ctxsize;
444
445	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
446	    cipher->ivsize) {
447		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
448		len += cipher->ivsize;
449	}
450
451	return len;
452}
453
454static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
455{
456	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
457	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
458
459	crt->setkey = async_setkey;
460	crt->encrypt = async_encrypt;
461	crt->decrypt = async_decrypt;
462	crt->base = __crypto_ablkcipher_cast(tfm);
463	crt->ivsize = alg->ivsize;
464
465	return 0;
466}
467
468static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
469{
470	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
471	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
472	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
473	unsigned long addr;
474
475	crt->setkey = setkey;
476	crt->encrypt = alg->encrypt;
477	crt->decrypt = alg->decrypt;
478
479	addr = (unsigned long)crypto_tfm_ctx(tfm);
480	addr = ALIGN(addr, align);
481	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
482	crt->iv = (void *)addr;
483
484	return 0;
485}
486
487static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
488{
489	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
490
491	if (alg->ivsize > PAGE_SIZE / 8)
492		return -EINVAL;
493
494	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
495		return crypto_init_blkcipher_ops_sync(tfm);
496	else
497		return crypto_init_blkcipher_ops_async(tfm);
498}
499
500#ifdef CONFIG_NET
501static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
502{
503	struct crypto_report_blkcipher rblkcipher;
504
505	memset(&rblkcipher, 0, sizeof(rblkcipher));
506
507	strscpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
508	strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
509
510	rblkcipher.blocksize = alg->cra_blocksize;
511	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
512	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
513	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
514
515	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
516		       sizeof(rblkcipher), &rblkcipher);
 
 
 
 
 
517}
518#else
519static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
520{
521	return -ENOSYS;
522}
523#endif
524
525static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
526	__maybe_unused;
527static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
528{
529	seq_printf(m, "type         : blkcipher\n");
530	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
531	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
532	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
533	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
534	seq_printf(m, "geniv        : <default>\n");
 
535}
536
537const struct crypto_type crypto_blkcipher_type = {
538	.ctxsize = crypto_blkcipher_ctxsize,
539	.init = crypto_init_blkcipher_ops,
540#ifdef CONFIG_PROC_FS
541	.show = crypto_blkcipher_show,
542#endif
543	.report = crypto_blkcipher_report,
544};
545EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
546
547MODULE_LICENSE("GPL");
548MODULE_DESCRIPTION("Generic block chaining cipher type");
v4.17
 
  1/*
  2 * Block chaining cipher operations.
  3 *
  4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5 * multiple page boundaries by using temporary blocks.  In user context,
  6 * the kernel is given a chance to schedule us once per page.
  7 *
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of the GNU General Public License as published by the Free
 12 * Software Foundation; either version 2 of the License, or (at your option)
 13 * any later version.
 14 *
 15 */
 16
 17#include <crypto/aead.h>
 18#include <crypto/internal/skcipher.h>
 19#include <crypto/scatterwalk.h>
 20#include <linux/errno.h>
 21#include <linux/kernel.h>
 22#include <linux/module.h>
 23#include <linux/seq_file.h>
 24#include <linux/slab.h>
 25#include <linux/string.h>
 26#include <linux/cryptouser.h>
 27#include <linux/compiler.h>
 28#include <net/netlink.h>
 29
 30#include "internal.h"
 31
 32enum {
 33	BLKCIPHER_WALK_PHYS = 1 << 0,
 34	BLKCIPHER_WALK_SLOW = 1 << 1,
 35	BLKCIPHER_WALK_COPY = 1 << 2,
 36	BLKCIPHER_WALK_DIFF = 1 << 3,
 37};
 38
 39static int blkcipher_walk_next(struct blkcipher_desc *desc,
 40			       struct blkcipher_walk *walk);
 41static int blkcipher_walk_first(struct blkcipher_desc *desc,
 42				struct blkcipher_walk *walk);
 43
 44static inline void blkcipher_map_src(struct blkcipher_walk *walk)
 45{
 46	walk->src.virt.addr = scatterwalk_map(&walk->in);
 47}
 48
 49static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
 50{
 51	walk->dst.virt.addr = scatterwalk_map(&walk->out);
 52}
 53
 54static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
 55{
 56	scatterwalk_unmap(walk->src.virt.addr);
 57}
 58
 59static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
 60{
 61	scatterwalk_unmap(walk->dst.virt.addr);
 62}
 63
 64/* Get a spot of the specified length that does not straddle a page.
 65 * The caller needs to ensure that there is enough space for this operation.
 66 */
 67static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
 68{
 69	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 70	return max(start, end_page);
 71}
 72
 73static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
 74					       unsigned int bsize)
 75{
 76	u8 *addr;
 77
 78	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 79	addr = blkcipher_get_spot(addr, bsize);
 80	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
 81	return bsize;
 82}
 83
 84static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
 85					       unsigned int n)
 86{
 87	if (walk->flags & BLKCIPHER_WALK_COPY) {
 88		blkcipher_map_dst(walk);
 89		memcpy(walk->dst.virt.addr, walk->page, n);
 90		blkcipher_unmap_dst(walk);
 91	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
 92		if (walk->flags & BLKCIPHER_WALK_DIFF)
 93			blkcipher_unmap_dst(walk);
 94		blkcipher_unmap_src(walk);
 95	}
 96
 97	scatterwalk_advance(&walk->in, n);
 98	scatterwalk_advance(&walk->out, n);
 99
100	return n;
101}
102
103int blkcipher_walk_done(struct blkcipher_desc *desc,
104			struct blkcipher_walk *walk, int err)
105{
106	unsigned int nbytes = 0;
 
107
108	if (likely(err >= 0)) {
109		unsigned int n = walk->nbytes - err;
110
111		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
112			n = blkcipher_done_fast(walk, n);
113		else if (WARN_ON(err)) {
 
 
 
 
 
 
114			err = -EINVAL;
115			goto err;
116		} else
117			n = blkcipher_done_slow(walk, n);
118
119		nbytes = walk->total - n;
120		err = 0;
121	}
122
123	scatterwalk_done(&walk->in, 0, nbytes);
124	scatterwalk_done(&walk->out, 1, nbytes);
125
126err:
127	walk->total = nbytes;
128	walk->nbytes = nbytes;
129
130	if (nbytes) {
131		crypto_yield(desc->flags);
132		return blkcipher_walk_next(desc, walk);
133	}
134
 
 
135	if (walk->iv != desc->info)
136		memcpy(desc->info, walk->iv, walk->ivsize);
137	if (walk->buffer != walk->page)
138		kfree(walk->buffer);
139	if (walk->page)
140		free_page((unsigned long)walk->page);
141
142	return err;
143}
144EXPORT_SYMBOL_GPL(blkcipher_walk_done);
145
146static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
147				      struct blkcipher_walk *walk,
148				      unsigned int bsize,
149				      unsigned int alignmask)
150{
151	unsigned int n;
152	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
153
154	if (walk->buffer)
155		goto ok;
156
157	walk->buffer = walk->page;
158	if (walk->buffer)
159		goto ok;
160
161	n = aligned_bsize * 3 - (alignmask + 1) +
162	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
163	walk->buffer = kmalloc(n, GFP_ATOMIC);
164	if (!walk->buffer)
165		return blkcipher_walk_done(desc, walk, -ENOMEM);
166
167ok:
168	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
169					  alignmask + 1);
170	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
171	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
172						 aligned_bsize, bsize);
173
174	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
175
176	walk->nbytes = bsize;
177	walk->flags |= BLKCIPHER_WALK_SLOW;
178
179	return 0;
180}
181
182static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
183{
184	u8 *tmp = walk->page;
185
186	blkcipher_map_src(walk);
187	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
188	blkcipher_unmap_src(walk);
189
190	walk->src.virt.addr = tmp;
191	walk->dst.virt.addr = tmp;
192
193	return 0;
194}
195
196static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
197				      struct blkcipher_walk *walk)
198{
199	unsigned long diff;
200
201	walk->src.phys.page = scatterwalk_page(&walk->in);
202	walk->src.phys.offset = offset_in_page(walk->in.offset);
203	walk->dst.phys.page = scatterwalk_page(&walk->out);
204	walk->dst.phys.offset = offset_in_page(walk->out.offset);
205
206	if (walk->flags & BLKCIPHER_WALK_PHYS)
207		return 0;
208
209	diff = walk->src.phys.offset - walk->dst.phys.offset;
210	diff |= walk->src.virt.page - walk->dst.virt.page;
211
212	blkcipher_map_src(walk);
213	walk->dst.virt.addr = walk->src.virt.addr;
214
215	if (diff) {
216		walk->flags |= BLKCIPHER_WALK_DIFF;
217		blkcipher_map_dst(walk);
218	}
219
220	return 0;
221}
222
223static int blkcipher_walk_next(struct blkcipher_desc *desc,
224			       struct blkcipher_walk *walk)
225{
226	unsigned int bsize;
227	unsigned int n;
228	int err;
229
230	n = walk->total;
231	if (unlikely(n < walk->cipher_blocksize)) {
232		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
233		return blkcipher_walk_done(desc, walk, -EINVAL);
234	}
235
236	bsize = min(walk->walk_blocksize, n);
237
238	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
239			 BLKCIPHER_WALK_DIFF);
240	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
241	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
242		walk->flags |= BLKCIPHER_WALK_COPY;
243		if (!walk->page) {
244			walk->page = (void *)__get_free_page(GFP_ATOMIC);
245			if (!walk->page)
246				n = 0;
247		}
248	}
249
250	n = scatterwalk_clamp(&walk->in, n);
251	n = scatterwalk_clamp(&walk->out, n);
252
253	if (unlikely(n < bsize)) {
254		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
255		goto set_phys_lowmem;
256	}
257
258	walk->nbytes = n;
259	if (walk->flags & BLKCIPHER_WALK_COPY) {
260		err = blkcipher_next_copy(walk);
261		goto set_phys_lowmem;
262	}
263
264	return blkcipher_next_fast(desc, walk);
265
266set_phys_lowmem:
267	if (walk->flags & BLKCIPHER_WALK_PHYS) {
268		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
269		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
270		walk->src.phys.offset &= PAGE_SIZE - 1;
271		walk->dst.phys.offset &= PAGE_SIZE - 1;
272	}
273	return err;
274}
275
276static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
277{
278	unsigned bs = walk->walk_blocksize;
279	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
280	unsigned int size = aligned_bs * 2 +
281			    walk->ivsize + max(aligned_bs, walk->ivsize) -
282			    (walk->alignmask + 1);
283	u8 *iv;
284
285	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
286	walk->buffer = kmalloc(size, GFP_ATOMIC);
287	if (!walk->buffer)
288		return -ENOMEM;
289
290	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
291	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
292	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
293	iv = blkcipher_get_spot(iv, walk->ivsize);
294
295	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
296	return 0;
297}
298
299int blkcipher_walk_virt(struct blkcipher_desc *desc,
300			struct blkcipher_walk *walk)
301{
302	walk->flags &= ~BLKCIPHER_WALK_PHYS;
303	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
304	walk->cipher_blocksize = walk->walk_blocksize;
305	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
306	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
307	return blkcipher_walk_first(desc, walk);
308}
309EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
310
311int blkcipher_walk_phys(struct blkcipher_desc *desc,
312			struct blkcipher_walk *walk)
313{
314	walk->flags |= BLKCIPHER_WALK_PHYS;
315	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
316	walk->cipher_blocksize = walk->walk_blocksize;
317	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
318	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
319	return blkcipher_walk_first(desc, walk);
320}
321EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
322
323static int blkcipher_walk_first(struct blkcipher_desc *desc,
324				struct blkcipher_walk *walk)
325{
326	if (WARN_ON_ONCE(in_irq()))
327		return -EDEADLK;
328
329	walk->iv = desc->info;
330	walk->nbytes = walk->total;
331	if (unlikely(!walk->total))
332		return 0;
333
334	walk->buffer = NULL;
335	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
336		int err = blkcipher_copy_iv(walk);
337		if (err)
338			return err;
339	}
340
341	scatterwalk_start(&walk->in, walk->in.sg);
342	scatterwalk_start(&walk->out, walk->out.sg);
343	walk->page = NULL;
344
345	return blkcipher_walk_next(desc, walk);
346}
347
348int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
349			      struct blkcipher_walk *walk,
350			      unsigned int blocksize)
351{
352	walk->flags &= ~BLKCIPHER_WALK_PHYS;
353	walk->walk_blocksize = blocksize;
354	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
355	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
356	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
357	return blkcipher_walk_first(desc, walk);
358}
359EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
360
361int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
362				   struct blkcipher_walk *walk,
363				   struct crypto_aead *tfm,
364				   unsigned int blocksize)
365{
366	walk->flags &= ~BLKCIPHER_WALK_PHYS;
367	walk->walk_blocksize = blocksize;
368	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
369	walk->ivsize = crypto_aead_ivsize(tfm);
370	walk->alignmask = crypto_aead_alignmask(tfm);
371	return blkcipher_walk_first(desc, walk);
372}
373EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
374
375static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
376			    unsigned int keylen)
377{
378	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
379	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
380	int ret;
381	u8 *buffer, *alignbuffer;
382	unsigned long absize;
383
384	absize = keylen + alignmask;
385	buffer = kmalloc(absize, GFP_ATOMIC);
386	if (!buffer)
387		return -ENOMEM;
388
389	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
390	memcpy(alignbuffer, key, keylen);
391	ret = cipher->setkey(tfm, alignbuffer, keylen);
392	memset(alignbuffer, 0, keylen);
393	kfree(buffer);
394	return ret;
395}
396
397static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
398{
399	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
400	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
401
402	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
403		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
404		return -EINVAL;
405	}
406
407	if ((unsigned long)key & alignmask)
408		return setkey_unaligned(tfm, key, keylen);
409
410	return cipher->setkey(tfm, key, keylen);
411}
412
413static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
414			unsigned int keylen)
415{
416	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
417}
418
419static int async_encrypt(struct ablkcipher_request *req)
420{
421	struct crypto_tfm *tfm = req->base.tfm;
422	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
423	struct blkcipher_desc desc = {
424		.tfm = __crypto_blkcipher_cast(tfm),
425		.info = req->info,
426		.flags = req->base.flags,
427	};
428
429
430	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
431}
432
433static int async_decrypt(struct ablkcipher_request *req)
434{
435	struct crypto_tfm *tfm = req->base.tfm;
436	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
437	struct blkcipher_desc desc = {
438		.tfm = __crypto_blkcipher_cast(tfm),
439		.info = req->info,
440		.flags = req->base.flags,
441	};
442
443	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
444}
445
446static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
447					     u32 mask)
448{
449	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
450	unsigned int len = alg->cra_ctxsize;
451
452	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
453	    cipher->ivsize) {
454		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
455		len += cipher->ivsize;
456	}
457
458	return len;
459}
460
461static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
462{
463	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
464	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
465
466	crt->setkey = async_setkey;
467	crt->encrypt = async_encrypt;
468	crt->decrypt = async_decrypt;
469	crt->base = __crypto_ablkcipher_cast(tfm);
470	crt->ivsize = alg->ivsize;
471
472	return 0;
473}
474
475static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
476{
477	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
478	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
479	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
480	unsigned long addr;
481
482	crt->setkey = setkey;
483	crt->encrypt = alg->encrypt;
484	crt->decrypt = alg->decrypt;
485
486	addr = (unsigned long)crypto_tfm_ctx(tfm);
487	addr = ALIGN(addr, align);
488	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
489	crt->iv = (void *)addr;
490
491	return 0;
492}
493
494static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
495{
496	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
497
498	if (alg->ivsize > PAGE_SIZE / 8)
499		return -EINVAL;
500
501	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
502		return crypto_init_blkcipher_ops_sync(tfm);
503	else
504		return crypto_init_blkcipher_ops_async(tfm);
505}
506
507#ifdef CONFIG_NET
508static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
509{
510	struct crypto_report_blkcipher rblkcipher;
511
512	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
513	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
514		sizeof(rblkcipher.geniv));
 
515
516	rblkcipher.blocksize = alg->cra_blocksize;
517	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
518	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
519	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
520
521	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
522		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
523		goto nla_put_failure;
524	return 0;
525
526nla_put_failure:
527	return -EMSGSIZE;
528}
529#else
530static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
531{
532	return -ENOSYS;
533}
534#endif
535
536static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
537	__maybe_unused;
538static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
539{
540	seq_printf(m, "type         : blkcipher\n");
541	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
542	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
543	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
544	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
545	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
546					     "<default>");
547}
548
549const struct crypto_type crypto_blkcipher_type = {
550	.ctxsize = crypto_blkcipher_ctxsize,
551	.init = crypto_init_blkcipher_ops,
552#ifdef CONFIG_PROC_FS
553	.show = crypto_blkcipher_show,
554#endif
555	.report = crypto_blkcipher_report,
556};
557EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
558
559MODULE_LICENSE("GPL");
560MODULE_DESCRIPTION("Generic block chaining cipher type");