Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Asynchronous block chaining cipher operations.
  3 *
  4 * This is the asynchronous version of blkcipher.c indicating completion
  5 * via a callback.
  6 *
  7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  8 *
  9 * This program is free software; you can redistribute it and/or modify it
 10 * under the terms of the GNU General Public License as published by the Free
 11 * Software Foundation; either version 2 of the License, or (at your option)
 12 * any later version.
 13 *
 14 */
 15
 16#include <crypto/internal/skcipher.h>
 17#include <linux/cpumask.h>
 18#include <linux/err.h>
 19#include <linux/kernel.h>
 20#include <linux/rtnetlink.h>
 21#include <linux/sched.h>
 22#include <linux/slab.h>
 23#include <linux/seq_file.h>
 24#include <linux/cryptouser.h>
 
 25#include <net/netlink.h>
 26
 27#include <crypto/scatterwalk.h>
 28
 29#include "internal.h"
 30
 31struct ablkcipher_buffer {
 32	struct list_head	entry;
 33	struct scatter_walk	dst;
 34	unsigned int		len;
 35	void			*data;
 36};
 37
 38enum {
 39	ABLKCIPHER_WALK_SLOW = 1 << 0,
 40};
 41
 42static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
 43{
 44	scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
 45}
 46
 47void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
 48{
 49	struct ablkcipher_buffer *p, *tmp;
 50
 51	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
 52		ablkcipher_buffer_write(p);
 53		list_del(&p->entry);
 54		kfree(p);
 55	}
 56}
 57EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
 58
 59static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
 60					  struct ablkcipher_buffer *p)
 61{
 62	p->dst = walk->out;
 63	list_add_tail(&p->entry, &walk->buffers);
 64}
 65
 66/* Get a spot of the specified length that does not straddle a page.
 67 * The caller needs to ensure that there is enough space for this operation.
 68 */
 69static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
 70{
 71	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 72
 73	return max(start, end_page);
 74}
 75
 76static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
 77						unsigned int bsize)
 78{
 79	unsigned int n = bsize;
 80
 81	for (;;) {
 82		unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
 83
 84		if (len_this_page > n)
 85			len_this_page = n;
 86		scatterwalk_advance(&walk->out, n);
 87		if (n == len_this_page)
 88			break;
 89		n -= len_this_page;
 90		scatterwalk_start(&walk->out, sg_next(walk->out.sg));
 91	}
 92
 93	return bsize;
 94}
 95
 96static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
 97						unsigned int n)
 98{
 99	scatterwalk_advance(&walk->in, n);
100	scatterwalk_advance(&walk->out, n);
101
102	return n;
103}
104
105static int ablkcipher_walk_next(struct ablkcipher_request *req,
106				struct ablkcipher_walk *walk);
107
108int ablkcipher_walk_done(struct ablkcipher_request *req,
109			 struct ablkcipher_walk *walk, int err)
110{
111	struct crypto_tfm *tfm = req->base.tfm;
112	unsigned int nbytes = 0;
 
113
114	if (likely(err >= 0)) {
115		unsigned int n = walk->nbytes - err;
116
117		if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
118			n = ablkcipher_done_fast(walk, n);
119		else if (WARN_ON(err)) {
 
 
 
 
 
 
120			err = -EINVAL;
121			goto err;
122		} else
123			n = ablkcipher_done_slow(walk, n);
124
125		nbytes = walk->total - n;
126		err = 0;
127	}
128
129	scatterwalk_done(&walk->in, 0, nbytes);
130	scatterwalk_done(&walk->out, 1, nbytes);
131
132err:
133	walk->total = nbytes;
134	walk->nbytes = nbytes;
135
136	if (nbytes) {
137		crypto_yield(req->base.flags);
138		return ablkcipher_walk_next(req, walk);
139	}
140
 
 
141	if (walk->iv != req->info)
142		memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
143	kfree(walk->iv_buffer);
144
145	return err;
146}
147EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
148
149static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
150				       struct ablkcipher_walk *walk,
151				       unsigned int bsize,
152				       unsigned int alignmask,
153				       void **src_p, void **dst_p)
154{
155	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
156	struct ablkcipher_buffer *p;
157	void *src, *dst, *base;
158	unsigned int n;
159
160	n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
161	n += (aligned_bsize * 3 - (alignmask + 1) +
162	      (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
163
164	p = kmalloc(n, GFP_ATOMIC);
165	if (!p)
166		return ablkcipher_walk_done(req, walk, -ENOMEM);
167
168	base = p + 1;
169
170	dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
171	src = dst = ablkcipher_get_spot(dst, bsize);
172
173	p->len = bsize;
174	p->data = dst;
175
176	scatterwalk_copychunks(src, &walk->in, bsize, 0);
177
178	ablkcipher_queue_write(walk, p);
179
180	walk->nbytes = bsize;
181	walk->flags |= ABLKCIPHER_WALK_SLOW;
182
183	*src_p = src;
184	*dst_p = dst;
185
186	return 0;
187}
188
189static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
190				     struct crypto_tfm *tfm,
191				     unsigned int alignmask)
192{
193	unsigned bs = walk->blocksize;
194	unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
195	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
196	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
197			    (alignmask + 1);
198	u8 *iv;
199
200	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
201	walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
202	if (!walk->iv_buffer)
203		return -ENOMEM;
204
205	iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
206	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
207	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
208	iv = ablkcipher_get_spot(iv, ivsize);
209
210	walk->iv = memcpy(iv, walk->iv, ivsize);
211	return 0;
212}
213
214static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
215				       struct ablkcipher_walk *walk)
216{
217	walk->src.page = scatterwalk_page(&walk->in);
218	walk->src.offset = offset_in_page(walk->in.offset);
219	walk->dst.page = scatterwalk_page(&walk->out);
220	walk->dst.offset = offset_in_page(walk->out.offset);
221
222	return 0;
223}
224
225static int ablkcipher_walk_next(struct ablkcipher_request *req,
226				struct ablkcipher_walk *walk)
227{
228	struct crypto_tfm *tfm = req->base.tfm;
229	unsigned int alignmask, bsize, n;
230	void *src, *dst;
231	int err;
232
233	alignmask = crypto_tfm_alg_alignmask(tfm);
234	n = walk->total;
235	if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
236		req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
237		return ablkcipher_walk_done(req, walk, -EINVAL);
238	}
239
240	walk->flags &= ~ABLKCIPHER_WALK_SLOW;
241	src = dst = NULL;
242
243	bsize = min(walk->blocksize, n);
244	n = scatterwalk_clamp(&walk->in, n);
245	n = scatterwalk_clamp(&walk->out, n);
246
247	if (n < bsize ||
248	    !scatterwalk_aligned(&walk->in, alignmask) ||
249	    !scatterwalk_aligned(&walk->out, alignmask)) {
250		err = ablkcipher_next_slow(req, walk, bsize, alignmask,
251					   &src, &dst);
252		goto set_phys_lowmem;
253	}
254
255	walk->nbytes = n;
256
257	return ablkcipher_next_fast(req, walk);
258
259set_phys_lowmem:
260	if (err >= 0) {
261		walk->src.page = virt_to_page(src);
262		walk->dst.page = virt_to_page(dst);
263		walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
264		walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
265	}
266
267	return err;
268}
269
270static int ablkcipher_walk_first(struct ablkcipher_request *req,
271				 struct ablkcipher_walk *walk)
272{
273	struct crypto_tfm *tfm = req->base.tfm;
274	unsigned int alignmask;
275
276	alignmask = crypto_tfm_alg_alignmask(tfm);
277	if (WARN_ON_ONCE(in_irq()))
278		return -EDEADLK;
279
280	walk->iv = req->info;
281	walk->nbytes = walk->total;
282	if (unlikely(!walk->total))
283		return 0;
284
285	walk->iv_buffer = NULL;
286	if (unlikely(((unsigned long)walk->iv & alignmask))) {
287		int err = ablkcipher_copy_iv(walk, tfm, alignmask);
288
289		if (err)
290			return err;
291	}
292
293	scatterwalk_start(&walk->in, walk->in.sg);
294	scatterwalk_start(&walk->out, walk->out.sg);
295
296	return ablkcipher_walk_next(req, walk);
297}
298
299int ablkcipher_walk_phys(struct ablkcipher_request *req,
300			 struct ablkcipher_walk *walk)
301{
302	walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
303	return ablkcipher_walk_first(req, walk);
304}
305EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
306
307static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
308			    unsigned int keylen)
309{
310	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
311	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
312	int ret;
313	u8 *buffer, *alignbuffer;
314	unsigned long absize;
315
316	absize = keylen + alignmask;
317	buffer = kmalloc(absize, GFP_ATOMIC);
318	if (!buffer)
319		return -ENOMEM;
320
321	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
322	memcpy(alignbuffer, key, keylen);
323	ret = cipher->setkey(tfm, alignbuffer, keylen);
324	memset(alignbuffer, 0, keylen);
325	kfree(buffer);
326	return ret;
327}
328
329static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
330		  unsigned int keylen)
331{
332	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
333	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
334
335	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
336		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
337		return -EINVAL;
338	}
339
340	if ((unsigned long)key & alignmask)
341		return setkey_unaligned(tfm, key, keylen);
342
343	return cipher->setkey(tfm, key, keylen);
344}
345
346static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
347					      u32 mask)
348{
349	return alg->cra_ctxsize;
350}
351
352int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
353{
354	return crypto_ablkcipher_encrypt(&req->creq);
355}
356
357int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
358{
359	return crypto_ablkcipher_decrypt(&req->creq);
360}
361
362static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
363				      u32 mask)
364{
365	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
366	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
367
368	if (alg->ivsize > PAGE_SIZE / 8)
369		return -EINVAL;
370
371	crt->setkey = setkey;
372	crt->encrypt = alg->encrypt;
373	crt->decrypt = alg->decrypt;
374	if (!alg->ivsize) {
375		crt->givencrypt = skcipher_null_givencrypt;
376		crt->givdecrypt = skcipher_null_givdecrypt;
377	}
378	crt->base = __crypto_ablkcipher_cast(tfm);
379	crt->ivsize = alg->ivsize;
380
381	return 0;
382}
383
384#ifdef CONFIG_NET
385static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
386{
387	struct crypto_report_blkcipher rblkcipher;
388
389	strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
390	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
391		sizeof(rblkcipher.geniv));
 
392
393	rblkcipher.blocksize = alg->cra_blocksize;
394	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
395	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
396	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
397
398	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
399		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
400		goto nla_put_failure;
401	return 0;
402
403nla_put_failure:
404	return -EMSGSIZE;
405}
406#else
407static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
408{
409	return -ENOSYS;
410}
411#endif
412
413static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
414	__attribute__ ((unused));
415static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
416{
417	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
418
419	seq_printf(m, "type         : ablkcipher\n");
420	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
421					     "yes" : "no");
422	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
423	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
424	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
425	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
426	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
427}
428
429const struct crypto_type crypto_ablkcipher_type = {
430	.ctxsize = crypto_ablkcipher_ctxsize,
431	.init = crypto_init_ablkcipher_ops,
432#ifdef CONFIG_PROC_FS
433	.show = crypto_ablkcipher_show,
434#endif
435	.report = crypto_ablkcipher_report,
436};
437EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
438
439static int no_givdecrypt(struct skcipher_givcrypt_request *req)
440{
441	return -ENOSYS;
442}
443
444static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
445				      u32 mask)
446{
447	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
448	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
449
450	if (alg->ivsize > PAGE_SIZE / 8)
451		return -EINVAL;
452
453	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
454		      alg->setkey : setkey;
455	crt->encrypt = alg->encrypt;
456	crt->decrypt = alg->decrypt;
457	crt->givencrypt = alg->givencrypt ?: no_givdecrypt;
458	crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
459	crt->base = __crypto_ablkcipher_cast(tfm);
460	crt->ivsize = alg->ivsize;
461
462	return 0;
463}
464
465#ifdef CONFIG_NET
466static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
467{
468	struct crypto_report_blkcipher rblkcipher;
469
470	strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
471	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
472		sizeof(rblkcipher.geniv));
473
474	rblkcipher.blocksize = alg->cra_blocksize;
475	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
476	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
477	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
478
479	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
480		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
481		goto nla_put_failure;
482	return 0;
483
484nla_put_failure:
485	return -EMSGSIZE;
486}
487#else
488static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
489{
490	return -ENOSYS;
491}
492#endif
493
494static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
495	__attribute__ ((unused));
496static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
497{
498	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
499
500	seq_printf(m, "type         : givcipher\n");
501	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
502					     "yes" : "no");
503	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
504	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
505	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
506	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
507	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
508}
509
510const struct crypto_type crypto_givcipher_type = {
511	.ctxsize = crypto_ablkcipher_ctxsize,
512	.init = crypto_init_givcipher_ops,
513#ifdef CONFIG_PROC_FS
514	.show = crypto_givcipher_show,
515#endif
516	.report = crypto_givcipher_report,
517};
518EXPORT_SYMBOL_GPL(crypto_givcipher_type);
519
520const char *crypto_default_geniv(const struct crypto_alg *alg)
521{
522	if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
523	     CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
524					 alg->cra_ablkcipher.ivsize) !=
525	    alg->cra_blocksize)
526		return "chainiv";
527
528	return "eseqiv";
529}
530
531static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
532{
533	struct rtattr *tb[3];
534	struct {
535		struct rtattr attr;
536		struct crypto_attr_type data;
537	} ptype;
538	struct {
539		struct rtattr attr;
540		struct crypto_attr_alg data;
541	} palg;
542	struct crypto_template *tmpl;
543	struct crypto_instance *inst;
544	struct crypto_alg *larval;
545	const char *geniv;
546	int err;
547
548	larval = crypto_larval_lookup(alg->cra_driver_name,
549				      (type & ~CRYPTO_ALG_TYPE_MASK) |
550				      CRYPTO_ALG_TYPE_GIVCIPHER,
551				      mask | CRYPTO_ALG_TYPE_MASK);
552	err = PTR_ERR(larval);
553	if (IS_ERR(larval))
554		goto out;
555
556	err = -EAGAIN;
557	if (!crypto_is_larval(larval))
558		goto drop_larval;
559
560	ptype.attr.rta_len = sizeof(ptype);
561	ptype.attr.rta_type = CRYPTOA_TYPE;
562	ptype.data.type = type | CRYPTO_ALG_GENIV;
563	/* GENIV tells the template that we're making a default geniv. */
564	ptype.data.mask = mask | CRYPTO_ALG_GENIV;
565	tb[0] = &ptype.attr;
566
567	palg.attr.rta_len = sizeof(palg);
568	palg.attr.rta_type = CRYPTOA_ALG;
569	/* Must use the exact name to locate ourselves. */
570	memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
571	tb[1] = &palg.attr;
572
573	tb[2] = NULL;
574
575	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
576	    CRYPTO_ALG_TYPE_BLKCIPHER)
577		geniv = alg->cra_blkcipher.geniv;
578	else
579		geniv = alg->cra_ablkcipher.geniv;
580
581	if (!geniv)
582		geniv = crypto_default_geniv(alg);
583
584	tmpl = crypto_lookup_template(geniv);
585	err = -ENOENT;
586	if (!tmpl)
587		goto kill_larval;
588
589	if (tmpl->create) {
590		err = tmpl->create(tmpl, tb);
591		if (err)
592			goto put_tmpl;
593		goto ok;
594	}
595
596	inst = tmpl->alloc(tb);
597	err = PTR_ERR(inst);
598	if (IS_ERR(inst))
599		goto put_tmpl;
600
601	err = crypto_register_instance(tmpl, inst);
602	if (err) {
603		tmpl->free(inst);
604		goto put_tmpl;
605	}
606
607ok:
608	/* Redo the lookup to use the instance we just registered. */
609	err = -EAGAIN;
610
611put_tmpl:
612	crypto_tmpl_put(tmpl);
613kill_larval:
614	crypto_larval_kill(larval);
615drop_larval:
616	crypto_mod_put(larval);
617out:
618	crypto_mod_put(alg);
619	return err;
620}
621
622struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
623{
624	struct crypto_alg *alg;
625
626	alg = crypto_alg_mod_lookup(name, type, mask);
627	if (IS_ERR(alg))
628		return alg;
629
630	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
631	    CRYPTO_ALG_TYPE_GIVCIPHER)
632		return alg;
633
634	if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
635	      CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
636					  alg->cra_ablkcipher.ivsize))
637		return alg;
638
639	crypto_mod_put(alg);
640	alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
641				    mask & ~CRYPTO_ALG_TESTED);
642	if (IS_ERR(alg))
643		return alg;
644
645	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
646	    CRYPTO_ALG_TYPE_GIVCIPHER) {
647		if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
648			crypto_mod_put(alg);
649			alg = ERR_PTR(-ENOENT);
650		}
651		return alg;
652	}
653
654	BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
655		 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
656					     alg->cra_ablkcipher.ivsize));
657
658	return ERR_PTR(crypto_givcipher_default(alg, type, mask));
659}
660EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
661
662int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
663			 u32 type, u32 mask)
664{
665	struct crypto_alg *alg;
666	int err;
667
668	type = crypto_skcipher_type(type);
669	mask = crypto_skcipher_mask(mask);
670
671	alg = crypto_lookup_skcipher(name, type, mask);
672	if (IS_ERR(alg))
673		return PTR_ERR(alg);
674
675	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
676	crypto_mod_put(alg);
677	return err;
678}
679EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
680
681struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
682						  u32 type, u32 mask)
683{
684	struct crypto_tfm *tfm;
685	int err;
686
687	type = crypto_skcipher_type(type);
688	mask = crypto_skcipher_mask(mask);
689
690	for (;;) {
691		struct crypto_alg *alg;
692
693		alg = crypto_lookup_skcipher(alg_name, type, mask);
694		if (IS_ERR(alg)) {
695			err = PTR_ERR(alg);
696			goto err;
697		}
698
699		tfm = __crypto_alloc_tfm(alg, type, mask);
700		if (!IS_ERR(tfm))
701			return __crypto_ablkcipher_cast(tfm);
702
703		crypto_mod_put(alg);
704		err = PTR_ERR(tfm);
705
706err:
707		if (err != -EAGAIN)
708			break;
709		if (fatal_signal_pending(current)) {
710			err = -EINTR;
711			break;
712		}
713	}
714
715	return ERR_PTR(err);
716}
717EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Asynchronous block chaining cipher operations.
  4 *
  5 * This is the asynchronous version of blkcipher.c indicating completion
  6 * via a callback.
  7 *
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
 
  9 */
 10
 11#include <crypto/internal/skcipher.h>
 
 12#include <linux/err.h>
 13#include <linux/kernel.h>
 
 
 14#include <linux/slab.h>
 15#include <linux/seq_file.h>
 16#include <linux/cryptouser.h>
 17#include <linux/compiler.h>
 18#include <net/netlink.h>
 19
 20#include <crypto/scatterwalk.h>
 21
 22#include "internal.h"
 23
 24struct ablkcipher_buffer {
 25	struct list_head	entry;
 26	struct scatter_walk	dst;
 27	unsigned int		len;
 28	void			*data;
 29};
 30
 31enum {
 32	ABLKCIPHER_WALK_SLOW = 1 << 0,
 33};
 34
 35static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
 36{
 37	scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
 38}
 39
 40void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
 41{
 42	struct ablkcipher_buffer *p, *tmp;
 43
 44	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
 45		ablkcipher_buffer_write(p);
 46		list_del(&p->entry);
 47		kfree(p);
 48	}
 49}
 50EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
 51
 52static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
 53					  struct ablkcipher_buffer *p)
 54{
 55	p->dst = walk->out;
 56	list_add_tail(&p->entry, &walk->buffers);
 57}
 58
 59/* Get a spot of the specified length that does not straddle a page.
 60 * The caller needs to ensure that there is enough space for this operation.
 61 */
 62static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
 63{
 64	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 65
 66	return max(start, end_page);
 67}
 68
 69static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
 70					unsigned int n)
 71{
 
 
 72	for (;;) {
 73		unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
 74
 75		if (len_this_page > n)
 76			len_this_page = n;
 77		scatterwalk_advance(&walk->out, n);
 78		if (n == len_this_page)
 79			break;
 80		n -= len_this_page;
 81		scatterwalk_start(&walk->out, sg_next(walk->out.sg));
 82	}
 
 
 83}
 84
 85static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
 86					unsigned int n)
 87{
 88	scatterwalk_advance(&walk->in, n);
 89	scatterwalk_advance(&walk->out, n);
 
 
 90}
 91
 92static int ablkcipher_walk_next(struct ablkcipher_request *req,
 93				struct ablkcipher_walk *walk);
 94
 95int ablkcipher_walk_done(struct ablkcipher_request *req,
 96			 struct ablkcipher_walk *walk, int err)
 97{
 98	struct crypto_tfm *tfm = req->base.tfm;
 99	unsigned int n; /* bytes processed */
100	bool more;
101
102	if (unlikely(err < 0))
103		goto finish;
104
105	n = walk->nbytes - err;
106	walk->total -= n;
107	more = (walk->total != 0);
108
109	if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
110		ablkcipher_done_fast(walk, n);
111	} else {
112		if (WARN_ON(err)) {
113			/* unexpected case; didn't process all bytes */
114			err = -EINVAL;
115			goto finish;
116		}
117		ablkcipher_done_slow(walk, n);
 
 
 
118	}
119
120	scatterwalk_done(&walk->in, 0, more);
121	scatterwalk_done(&walk->out, 1, more);
 
 
 
 
122
123	if (more) {
124		crypto_yield(req->base.flags);
125		return ablkcipher_walk_next(req, walk);
126	}
127	err = 0;
128finish:
129	walk->nbytes = 0;
130	if (walk->iv != req->info)
131		memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
132	kfree(walk->iv_buffer);
 
133	return err;
134}
135EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
136
137static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
138				       struct ablkcipher_walk *walk,
139				       unsigned int bsize,
140				       unsigned int alignmask,
141				       void **src_p, void **dst_p)
142{
143	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
144	struct ablkcipher_buffer *p;
145	void *src, *dst, *base;
146	unsigned int n;
147
148	n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
149	n += (aligned_bsize * 3 - (alignmask + 1) +
150	      (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
151
152	p = kmalloc(n, GFP_ATOMIC);
153	if (!p)
154		return ablkcipher_walk_done(req, walk, -ENOMEM);
155
156	base = p + 1;
157
158	dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
159	src = dst = ablkcipher_get_spot(dst, bsize);
160
161	p->len = bsize;
162	p->data = dst;
163
164	scatterwalk_copychunks(src, &walk->in, bsize, 0);
165
166	ablkcipher_queue_write(walk, p);
167
168	walk->nbytes = bsize;
169	walk->flags |= ABLKCIPHER_WALK_SLOW;
170
171	*src_p = src;
172	*dst_p = dst;
173
174	return 0;
175}
176
177static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
178				     struct crypto_tfm *tfm,
179				     unsigned int alignmask)
180{
181	unsigned bs = walk->blocksize;
182	unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
183	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
184	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
185			    (alignmask + 1);
186	u8 *iv;
187
188	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
189	walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
190	if (!walk->iv_buffer)
191		return -ENOMEM;
192
193	iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
194	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
195	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
196	iv = ablkcipher_get_spot(iv, ivsize);
197
198	walk->iv = memcpy(iv, walk->iv, ivsize);
199	return 0;
200}
201
202static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
203				       struct ablkcipher_walk *walk)
204{
205	walk->src.page = scatterwalk_page(&walk->in);
206	walk->src.offset = offset_in_page(walk->in.offset);
207	walk->dst.page = scatterwalk_page(&walk->out);
208	walk->dst.offset = offset_in_page(walk->out.offset);
209
210	return 0;
211}
212
213static int ablkcipher_walk_next(struct ablkcipher_request *req,
214				struct ablkcipher_walk *walk)
215{
216	struct crypto_tfm *tfm = req->base.tfm;
217	unsigned int alignmask, bsize, n;
218	void *src, *dst;
219	int err;
220
221	alignmask = crypto_tfm_alg_alignmask(tfm);
222	n = walk->total;
223	if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
224		req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
225		return ablkcipher_walk_done(req, walk, -EINVAL);
226	}
227
228	walk->flags &= ~ABLKCIPHER_WALK_SLOW;
229	src = dst = NULL;
230
231	bsize = min(walk->blocksize, n);
232	n = scatterwalk_clamp(&walk->in, n);
233	n = scatterwalk_clamp(&walk->out, n);
234
235	if (n < bsize ||
236	    !scatterwalk_aligned(&walk->in, alignmask) ||
237	    !scatterwalk_aligned(&walk->out, alignmask)) {
238		err = ablkcipher_next_slow(req, walk, bsize, alignmask,
239					   &src, &dst);
240		goto set_phys_lowmem;
241	}
242
243	walk->nbytes = n;
244
245	return ablkcipher_next_fast(req, walk);
246
247set_phys_lowmem:
248	if (err >= 0) {
249		walk->src.page = virt_to_page(src);
250		walk->dst.page = virt_to_page(dst);
251		walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
252		walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
253	}
254
255	return err;
256}
257
258static int ablkcipher_walk_first(struct ablkcipher_request *req,
259				 struct ablkcipher_walk *walk)
260{
261	struct crypto_tfm *tfm = req->base.tfm;
262	unsigned int alignmask;
263
264	alignmask = crypto_tfm_alg_alignmask(tfm);
265	if (WARN_ON_ONCE(in_irq()))
266		return -EDEADLK;
267
268	walk->iv = req->info;
269	walk->nbytes = walk->total;
270	if (unlikely(!walk->total))
271		return 0;
272
273	walk->iv_buffer = NULL;
274	if (unlikely(((unsigned long)walk->iv & alignmask))) {
275		int err = ablkcipher_copy_iv(walk, tfm, alignmask);
276
277		if (err)
278			return err;
279	}
280
281	scatterwalk_start(&walk->in, walk->in.sg);
282	scatterwalk_start(&walk->out, walk->out.sg);
283
284	return ablkcipher_walk_next(req, walk);
285}
286
287int ablkcipher_walk_phys(struct ablkcipher_request *req,
288			 struct ablkcipher_walk *walk)
289{
290	walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
291	return ablkcipher_walk_first(req, walk);
292}
293EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
294
295static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
296			    unsigned int keylen)
297{
298	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
299	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
300	int ret;
301	u8 *buffer, *alignbuffer;
302	unsigned long absize;
303
304	absize = keylen + alignmask;
305	buffer = kmalloc(absize, GFP_ATOMIC);
306	if (!buffer)
307		return -ENOMEM;
308
309	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
310	memcpy(alignbuffer, key, keylen);
311	ret = cipher->setkey(tfm, alignbuffer, keylen);
312	memset(alignbuffer, 0, keylen);
313	kfree(buffer);
314	return ret;
315}
316
317static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
318		  unsigned int keylen)
319{
320	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
321	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
322
323	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
324		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
325		return -EINVAL;
326	}
327
328	if ((unsigned long)key & alignmask)
329		return setkey_unaligned(tfm, key, keylen);
330
331	return cipher->setkey(tfm, key, keylen);
332}
333
334static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
335					      u32 mask)
336{
337	return alg->cra_ctxsize;
338}
339
 
 
 
 
 
 
 
 
 
 
340static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
341				      u32 mask)
342{
343	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
344	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
345
346	if (alg->ivsize > PAGE_SIZE / 8)
347		return -EINVAL;
348
349	crt->setkey = setkey;
350	crt->encrypt = alg->encrypt;
351	crt->decrypt = alg->decrypt;
 
 
 
 
352	crt->base = __crypto_ablkcipher_cast(tfm);
353	crt->ivsize = alg->ivsize;
354
355	return 0;
356}
357
358#ifdef CONFIG_NET
359static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
360{
361	struct crypto_report_blkcipher rblkcipher;
362
363	memset(&rblkcipher, 0, sizeof(rblkcipher));
364
365	strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
366	strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
367
368	rblkcipher.blocksize = alg->cra_blocksize;
369	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
370	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
371	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
372
373	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
374		       sizeof(rblkcipher), &rblkcipher);
 
 
 
 
 
375}
376#else
377static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
378{
379	return -ENOSYS;
380}
381#endif
382
383static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
384	__maybe_unused;
385static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
386{
387	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
388
389	seq_printf(m, "type         : ablkcipher\n");
390	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
391					     "yes" : "no");
392	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
393	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
394	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
395	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
396	seq_printf(m, "geniv        : <default>\n");
397}
398
399const struct crypto_type crypto_ablkcipher_type = {
400	.ctxsize = crypto_ablkcipher_ctxsize,
401	.init = crypto_init_ablkcipher_ops,
402#ifdef CONFIG_PROC_FS
403	.show = crypto_ablkcipher_show,
404#endif
405	.report = crypto_ablkcipher_report,
406};
407EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);