Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Block chaining cipher operations.
  3 * 
  4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5 * multiple page boundaries by using temporary blocks.  In user context,
  6 * the kernel is given a chance to schedule us once per page.
  7 *
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of the GNU General Public License as published by the Free
 12 * Software Foundation; either version 2 of the License, or (at your option) 
 13 * any later version.
 14 *
 15 */
 16
 17#include <crypto/aead.h>
 18#include <crypto/internal/skcipher.h>
 19#include <crypto/scatterwalk.h>
 20#include <linux/errno.h>
 21#include <linux/hardirq.h>
 22#include <linux/kernel.h>
 23#include <linux/module.h>
 24#include <linux/scatterlist.h>
 25#include <linux/seq_file.h>
 26#include <linux/slab.h>
 27#include <linux/string.h>
 28#include <linux/cryptouser.h>
 
 29#include <net/netlink.h>
 30
 31#include "internal.h"
 32
 33enum {
 34	BLKCIPHER_WALK_PHYS = 1 << 0,
 35	BLKCIPHER_WALK_SLOW = 1 << 1,
 36	BLKCIPHER_WALK_COPY = 1 << 2,
 37	BLKCIPHER_WALK_DIFF = 1 << 3,
 38};
 39
 40static int blkcipher_walk_next(struct blkcipher_desc *desc,
 41			       struct blkcipher_walk *walk);
 42static int blkcipher_walk_first(struct blkcipher_desc *desc,
 43				struct blkcipher_walk *walk);
 44
 45static inline void blkcipher_map_src(struct blkcipher_walk *walk)
 46{
 47	walk->src.virt.addr = scatterwalk_map(&walk->in);
 48}
 49
 50static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
 51{
 52	walk->dst.virt.addr = scatterwalk_map(&walk->out);
 53}
 54
 55static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
 56{
 57	scatterwalk_unmap(walk->src.virt.addr);
 58}
 59
 60static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
 61{
 62	scatterwalk_unmap(walk->dst.virt.addr);
 63}
 64
 65/* Get a spot of the specified length that does not straddle a page.
 66 * The caller needs to ensure that there is enough space for this operation.
 67 */
 68static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
 69{
 70	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 71	return max(start, end_page);
 72}
 73
 74static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
 75					       unsigned int bsize)
 76{
 77	u8 *addr;
 78
 79	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 80	addr = blkcipher_get_spot(addr, bsize);
 81	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
 82	return bsize;
 83}
 84
 85static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
 86					       unsigned int n)
 87{
 88	if (walk->flags & BLKCIPHER_WALK_COPY) {
 89		blkcipher_map_dst(walk);
 90		memcpy(walk->dst.virt.addr, walk->page, n);
 91		blkcipher_unmap_dst(walk);
 92	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
 93		if (walk->flags & BLKCIPHER_WALK_DIFF)
 94			blkcipher_unmap_dst(walk);
 95		blkcipher_unmap_src(walk);
 96	}
 97
 98	scatterwalk_advance(&walk->in, n);
 99	scatterwalk_advance(&walk->out, n);
100
101	return n;
102}
103
104int blkcipher_walk_done(struct blkcipher_desc *desc,
105			struct blkcipher_walk *walk, int err)
106{
107	unsigned int nbytes = 0;
 
108
109	if (likely(err >= 0)) {
110		unsigned int n = walk->nbytes - err;
111
112		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
113			n = blkcipher_done_fast(walk, n);
114		else if (WARN_ON(err)) {
115			err = -EINVAL;
116			goto err;
117		} else
118			n = blkcipher_done_slow(walk, n);
119
120		nbytes = walk->total - n;
121		err = 0;
 
 
 
 
 
 
 
122	}
123
124	scatterwalk_done(&walk->in, 0, nbytes);
125	scatterwalk_done(&walk->out, 1, nbytes);
126
127err:
128	walk->total = nbytes;
129	walk->nbytes = nbytes;
130
131	if (nbytes) {
132		crypto_yield(desc->flags);
133		return blkcipher_walk_next(desc, walk);
134	}
135
 
 
136	if (walk->iv != desc->info)
137		memcpy(desc->info, walk->iv, walk->ivsize);
138	if (walk->buffer != walk->page)
139		kfree(walk->buffer);
140	if (walk->page)
141		free_page((unsigned long)walk->page);
142
143	return err;
144}
145EXPORT_SYMBOL_GPL(blkcipher_walk_done);
146
147static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
148				      struct blkcipher_walk *walk,
149				      unsigned int bsize,
150				      unsigned int alignmask)
151{
152	unsigned int n;
153	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
154
155	if (walk->buffer)
156		goto ok;
157
158	walk->buffer = walk->page;
159	if (walk->buffer)
160		goto ok;
161
162	n = aligned_bsize * 3 - (alignmask + 1) +
163	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
164	walk->buffer = kmalloc(n, GFP_ATOMIC);
165	if (!walk->buffer)
166		return blkcipher_walk_done(desc, walk, -ENOMEM);
167
168ok:
169	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
170					  alignmask + 1);
171	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
172	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
173						 aligned_bsize, bsize);
174
175	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
176
177	walk->nbytes = bsize;
178	walk->flags |= BLKCIPHER_WALK_SLOW;
179
180	return 0;
181}
182
183static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
184{
185	u8 *tmp = walk->page;
186
187	blkcipher_map_src(walk);
188	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
189	blkcipher_unmap_src(walk);
190
191	walk->src.virt.addr = tmp;
192	walk->dst.virt.addr = tmp;
193
194	return 0;
195}
196
197static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
198				      struct blkcipher_walk *walk)
199{
200	unsigned long diff;
201
202	walk->src.phys.page = scatterwalk_page(&walk->in);
203	walk->src.phys.offset = offset_in_page(walk->in.offset);
204	walk->dst.phys.page = scatterwalk_page(&walk->out);
205	walk->dst.phys.offset = offset_in_page(walk->out.offset);
206
207	if (walk->flags & BLKCIPHER_WALK_PHYS)
208		return 0;
209
210	diff = walk->src.phys.offset - walk->dst.phys.offset;
211	diff |= walk->src.virt.page - walk->dst.virt.page;
212
213	blkcipher_map_src(walk);
214	walk->dst.virt.addr = walk->src.virt.addr;
215
216	if (diff) {
217		walk->flags |= BLKCIPHER_WALK_DIFF;
218		blkcipher_map_dst(walk);
219	}
220
221	return 0;
222}
223
224static int blkcipher_walk_next(struct blkcipher_desc *desc,
225			       struct blkcipher_walk *walk)
226{
227	unsigned int bsize;
228	unsigned int n;
229	int err;
230
231	n = walk->total;
232	if (unlikely(n < walk->cipher_blocksize)) {
233		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
234		return blkcipher_walk_done(desc, walk, -EINVAL);
235	}
236
 
 
237	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
238			 BLKCIPHER_WALK_DIFF);
239	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
240	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
241		walk->flags |= BLKCIPHER_WALK_COPY;
242		if (!walk->page) {
243			walk->page = (void *)__get_free_page(GFP_ATOMIC);
244			if (!walk->page)
245				n = 0;
246		}
247	}
248
249	bsize = min(walk->walk_blocksize, n);
250	n = scatterwalk_clamp(&walk->in, n);
251	n = scatterwalk_clamp(&walk->out, n);
252
253	if (unlikely(n < bsize)) {
254		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
255		goto set_phys_lowmem;
256	}
257
258	walk->nbytes = n;
259	if (walk->flags & BLKCIPHER_WALK_COPY) {
260		err = blkcipher_next_copy(walk);
261		goto set_phys_lowmem;
262	}
263
264	return blkcipher_next_fast(desc, walk);
265
266set_phys_lowmem:
267	if (walk->flags & BLKCIPHER_WALK_PHYS) {
268		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
269		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
270		walk->src.phys.offset &= PAGE_SIZE - 1;
271		walk->dst.phys.offset &= PAGE_SIZE - 1;
272	}
273	return err;
274}
275
276static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
277{
278	unsigned bs = walk->walk_blocksize;
279	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
280	unsigned int size = aligned_bs * 2 +
281			    walk->ivsize + max(aligned_bs, walk->ivsize) -
282			    (walk->alignmask + 1);
283	u8 *iv;
284
285	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
286	walk->buffer = kmalloc(size, GFP_ATOMIC);
287	if (!walk->buffer)
288		return -ENOMEM;
289
290	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
291	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
292	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
293	iv = blkcipher_get_spot(iv, walk->ivsize);
294
295	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
296	return 0;
297}
298
299int blkcipher_walk_virt(struct blkcipher_desc *desc,
300			struct blkcipher_walk *walk)
301{
302	walk->flags &= ~BLKCIPHER_WALK_PHYS;
303	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
304	walk->cipher_blocksize = walk->walk_blocksize;
305	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
306	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
307	return blkcipher_walk_first(desc, walk);
308}
309EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
310
311int blkcipher_walk_phys(struct blkcipher_desc *desc,
312			struct blkcipher_walk *walk)
313{
314	walk->flags |= BLKCIPHER_WALK_PHYS;
315	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
316	walk->cipher_blocksize = walk->walk_blocksize;
317	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
318	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
319	return blkcipher_walk_first(desc, walk);
320}
321EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
322
323static int blkcipher_walk_first(struct blkcipher_desc *desc,
324				struct blkcipher_walk *walk)
325{
326	if (WARN_ON_ONCE(in_irq()))
327		return -EDEADLK;
328
329	walk->iv = desc->info;
330	walk->nbytes = walk->total;
331	if (unlikely(!walk->total))
332		return 0;
333
334	walk->buffer = NULL;
335	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
336		int err = blkcipher_copy_iv(walk);
337		if (err)
338			return err;
339	}
340
341	scatterwalk_start(&walk->in, walk->in.sg);
342	scatterwalk_start(&walk->out, walk->out.sg);
343	walk->page = NULL;
344
345	return blkcipher_walk_next(desc, walk);
346}
347
348int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
349			      struct blkcipher_walk *walk,
350			      unsigned int blocksize)
351{
352	walk->flags &= ~BLKCIPHER_WALK_PHYS;
353	walk->walk_blocksize = blocksize;
354	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
355	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
356	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
357	return blkcipher_walk_first(desc, walk);
358}
359EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
360
361int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
362				   struct blkcipher_walk *walk,
363				   struct crypto_aead *tfm,
364				   unsigned int blocksize)
365{
366	walk->flags &= ~BLKCIPHER_WALK_PHYS;
367	walk->walk_blocksize = blocksize;
368	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
369	walk->ivsize = crypto_aead_ivsize(tfm);
370	walk->alignmask = crypto_aead_alignmask(tfm);
371	return blkcipher_walk_first(desc, walk);
372}
373EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
374
375static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
376			    unsigned int keylen)
377{
378	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
379	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
380	int ret;
381	u8 *buffer, *alignbuffer;
382	unsigned long absize;
383
384	absize = keylen + alignmask;
385	buffer = kmalloc(absize, GFP_ATOMIC);
386	if (!buffer)
387		return -ENOMEM;
388
389	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
390	memcpy(alignbuffer, key, keylen);
391	ret = cipher->setkey(tfm, alignbuffer, keylen);
392	memset(alignbuffer, 0, keylen);
393	kfree(buffer);
394	return ret;
395}
396
397static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
398{
399	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
400	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
401
402	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
403		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
404		return -EINVAL;
405	}
406
407	if ((unsigned long)key & alignmask)
408		return setkey_unaligned(tfm, key, keylen);
409
410	return cipher->setkey(tfm, key, keylen);
411}
412
413static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
414			unsigned int keylen)
415{
416	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
417}
418
419static int async_encrypt(struct ablkcipher_request *req)
420{
421	struct crypto_tfm *tfm = req->base.tfm;
422	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
423	struct blkcipher_desc desc = {
424		.tfm = __crypto_blkcipher_cast(tfm),
425		.info = req->info,
426		.flags = req->base.flags,
427	};
428
429
430	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
431}
432
433static int async_decrypt(struct ablkcipher_request *req)
434{
435	struct crypto_tfm *tfm = req->base.tfm;
436	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
437	struct blkcipher_desc desc = {
438		.tfm = __crypto_blkcipher_cast(tfm),
439		.info = req->info,
440		.flags = req->base.flags,
441	};
442
443	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
444}
445
446static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
447					     u32 mask)
448{
449	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
450	unsigned int len = alg->cra_ctxsize;
451
452	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
453	    cipher->ivsize) {
454		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
455		len += cipher->ivsize;
456	}
457
458	return len;
459}
460
461static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
462{
463	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
464	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
465
466	crt->setkey = async_setkey;
467	crt->encrypt = async_encrypt;
468	crt->decrypt = async_decrypt;
469	if (!alg->ivsize) {
470		crt->givencrypt = skcipher_null_givencrypt;
471		crt->givdecrypt = skcipher_null_givdecrypt;
472	}
473	crt->base = __crypto_ablkcipher_cast(tfm);
474	crt->ivsize = alg->ivsize;
475
476	return 0;
477}
478
479static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
480{
481	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
482	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
483	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
484	unsigned long addr;
485
486	crt->setkey = setkey;
487	crt->encrypt = alg->encrypt;
488	crt->decrypt = alg->decrypt;
489
490	addr = (unsigned long)crypto_tfm_ctx(tfm);
491	addr = ALIGN(addr, align);
492	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
493	crt->iv = (void *)addr;
494
495	return 0;
496}
497
498static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
499{
500	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
501
502	if (alg->ivsize > PAGE_SIZE / 8)
503		return -EINVAL;
504
505	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
506		return crypto_init_blkcipher_ops_sync(tfm);
507	else
508		return crypto_init_blkcipher_ops_async(tfm);
509}
510
511#ifdef CONFIG_NET
512static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
513{
514	struct crypto_report_blkcipher rblkcipher;
515
516	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
517	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
518		sizeof(rblkcipher.geniv));
 
519
520	rblkcipher.blocksize = alg->cra_blocksize;
521	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
522	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
523	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
524
525	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
526		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
527		goto nla_put_failure;
528	return 0;
529
530nla_put_failure:
531	return -EMSGSIZE;
532}
533#else
534static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
535{
536	return -ENOSYS;
537}
538#endif
539
540static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
541	__attribute__ ((unused));
542static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
543{
544	seq_printf(m, "type         : blkcipher\n");
545	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
546	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
547	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
548	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
549	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
550					     "<default>");
551}
552
553const struct crypto_type crypto_blkcipher_type = {
554	.ctxsize = crypto_blkcipher_ctxsize,
555	.init = crypto_init_blkcipher_ops,
556#ifdef CONFIG_PROC_FS
557	.show = crypto_blkcipher_show,
558#endif
559	.report = crypto_blkcipher_report,
560};
561EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
562
563static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
564				const char *name, u32 type, u32 mask)
565{
566	struct crypto_alg *alg;
567	int err;
568
569	type = crypto_skcipher_type(type);
570	mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
571
572	alg = crypto_alg_mod_lookup(name, type, mask);
573	if (IS_ERR(alg))
574		return PTR_ERR(alg);
575
576	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
577	crypto_mod_put(alg);
578	return err;
579}
580
581struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
582					     struct rtattr **tb, u32 type,
583					     u32 mask)
584{
585	struct {
586		int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
587			      unsigned int keylen);
588		int (*encrypt)(struct ablkcipher_request *req);
589		int (*decrypt)(struct ablkcipher_request *req);
590
591		unsigned int min_keysize;
592		unsigned int max_keysize;
593		unsigned int ivsize;
594
595		const char *geniv;
596	} balg;
597	const char *name;
598	struct crypto_skcipher_spawn *spawn;
599	struct crypto_attr_type *algt;
600	struct crypto_instance *inst;
601	struct crypto_alg *alg;
602	int err;
603
604	algt = crypto_get_attr_type(tb);
605	if (IS_ERR(algt))
606		return ERR_CAST(algt);
607
608	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
609	    algt->mask)
610		return ERR_PTR(-EINVAL);
611
612	name = crypto_attr_alg_name(tb[1]);
613	if (IS_ERR(name))
614		return ERR_CAST(name);
615
616	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
617	if (!inst)
618		return ERR_PTR(-ENOMEM);
619
620	spawn = crypto_instance_ctx(inst);
621
622	/* Ignore async algorithms if necessary. */
623	mask |= crypto_requires_sync(algt->type, algt->mask);
624
625	crypto_set_skcipher_spawn(spawn, inst);
626	err = crypto_grab_nivcipher(spawn, name, type, mask);
627	if (err)
628		goto err_free_inst;
629
630	alg = crypto_skcipher_spawn_alg(spawn);
631
632	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
633	    CRYPTO_ALG_TYPE_BLKCIPHER) {
634		balg.ivsize = alg->cra_blkcipher.ivsize;
635		balg.min_keysize = alg->cra_blkcipher.min_keysize;
636		balg.max_keysize = alg->cra_blkcipher.max_keysize;
637
638		balg.setkey = async_setkey;
639		balg.encrypt = async_encrypt;
640		balg.decrypt = async_decrypt;
641
642		balg.geniv = alg->cra_blkcipher.geniv;
643	} else {
644		balg.ivsize = alg->cra_ablkcipher.ivsize;
645		balg.min_keysize = alg->cra_ablkcipher.min_keysize;
646		balg.max_keysize = alg->cra_ablkcipher.max_keysize;
647
648		balg.setkey = alg->cra_ablkcipher.setkey;
649		balg.encrypt = alg->cra_ablkcipher.encrypt;
650		balg.decrypt = alg->cra_ablkcipher.decrypt;
651
652		balg.geniv = alg->cra_ablkcipher.geniv;
653	}
654
655	err = -EINVAL;
656	if (!balg.ivsize)
657		goto err_drop_alg;
658
659	/*
660	 * This is only true if we're constructing an algorithm with its
661	 * default IV generator.  For the default generator we elide the
662	 * template name and double-check the IV generator.
663	 */
664	if (algt->mask & CRYPTO_ALG_GENIV) {
665		if (!balg.geniv)
666			balg.geniv = crypto_default_geniv(alg);
667		err = -EAGAIN;
668		if (strcmp(tmpl->name, balg.geniv))
669			goto err_drop_alg;
670
671		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
672		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
673		       CRYPTO_MAX_ALG_NAME);
674	} else {
675		err = -ENAMETOOLONG;
676		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
677			     "%s(%s)", tmpl->name, alg->cra_name) >=
678		    CRYPTO_MAX_ALG_NAME)
679			goto err_drop_alg;
680		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
681			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
682		    CRYPTO_MAX_ALG_NAME)
683			goto err_drop_alg;
684	}
685
686	inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
687	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
688	inst->alg.cra_priority = alg->cra_priority;
689	inst->alg.cra_blocksize = alg->cra_blocksize;
690	inst->alg.cra_alignmask = alg->cra_alignmask;
691	inst->alg.cra_type = &crypto_givcipher_type;
692
693	inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
694	inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
695	inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
696	inst->alg.cra_ablkcipher.geniv = balg.geniv;
697
698	inst->alg.cra_ablkcipher.setkey = balg.setkey;
699	inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
700	inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
701
702out:
703	return inst;
704
705err_drop_alg:
706	crypto_drop_skcipher(spawn);
707err_free_inst:
708	kfree(inst);
709	inst = ERR_PTR(err);
710	goto out;
711}
712EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
713
714void skcipher_geniv_free(struct crypto_instance *inst)
715{
716	crypto_drop_skcipher(crypto_instance_ctx(inst));
717	kfree(inst);
718}
719EXPORT_SYMBOL_GPL(skcipher_geniv_free);
720
721int skcipher_geniv_init(struct crypto_tfm *tfm)
722{
723	struct crypto_instance *inst = (void *)tfm->__crt_alg;
724	struct crypto_ablkcipher *cipher;
725
726	cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
727	if (IS_ERR(cipher))
728		return PTR_ERR(cipher);
729
730	tfm->crt_ablkcipher.base = cipher;
731	tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
732
733	return 0;
734}
735EXPORT_SYMBOL_GPL(skcipher_geniv_init);
736
737void skcipher_geniv_exit(struct crypto_tfm *tfm)
738{
739	crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
740}
741EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
742
743MODULE_LICENSE("GPL");
744MODULE_DESCRIPTION("Generic block chaining cipher type");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Block chaining cipher operations.
  4 *
  5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  6 * multiple page boundaries by using temporary blocks.  In user context,
  7 * the kernel is given a chance to schedule us once per page.
  8 *
  9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
 
 10 */
 11
 12#include <crypto/aead.h>
 13#include <crypto/internal/skcipher.h>
 14#include <crypto/scatterwalk.h>
 15#include <linux/errno.h>
 
 16#include <linux/kernel.h>
 17#include <linux/module.h>
 
 18#include <linux/seq_file.h>
 19#include <linux/slab.h>
 20#include <linux/string.h>
 21#include <linux/cryptouser.h>
 22#include <linux/compiler.h>
 23#include <net/netlink.h>
 24
 25#include "internal.h"
 26
 27enum {
 28	BLKCIPHER_WALK_PHYS = 1 << 0,
 29	BLKCIPHER_WALK_SLOW = 1 << 1,
 30	BLKCIPHER_WALK_COPY = 1 << 2,
 31	BLKCIPHER_WALK_DIFF = 1 << 3,
 32};
 33
 34static int blkcipher_walk_next(struct blkcipher_desc *desc,
 35			       struct blkcipher_walk *walk);
 36static int blkcipher_walk_first(struct blkcipher_desc *desc,
 37				struct blkcipher_walk *walk);
 38
 39static inline void blkcipher_map_src(struct blkcipher_walk *walk)
 40{
 41	walk->src.virt.addr = scatterwalk_map(&walk->in);
 42}
 43
 44static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
 45{
 46	walk->dst.virt.addr = scatterwalk_map(&walk->out);
 47}
 48
 49static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
 50{
 51	scatterwalk_unmap(walk->src.virt.addr);
 52}
 53
 54static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
 55{
 56	scatterwalk_unmap(walk->dst.virt.addr);
 57}
 58
 59/* Get a spot of the specified length that does not straddle a page.
 60 * The caller needs to ensure that there is enough space for this operation.
 61 */
 62static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
 63{
 64	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 65	return max(start, end_page);
 66}
 67
 68static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
 69				       unsigned int bsize)
 70{
 71	u8 *addr;
 72
 73	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 74	addr = blkcipher_get_spot(addr, bsize);
 75	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
 
 76}
 77
 78static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
 79				       unsigned int n)
 80{
 81	if (walk->flags & BLKCIPHER_WALK_COPY) {
 82		blkcipher_map_dst(walk);
 83		memcpy(walk->dst.virt.addr, walk->page, n);
 84		blkcipher_unmap_dst(walk);
 85	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
 86		if (walk->flags & BLKCIPHER_WALK_DIFF)
 87			blkcipher_unmap_dst(walk);
 88		blkcipher_unmap_src(walk);
 89	}
 90
 91	scatterwalk_advance(&walk->in, n);
 92	scatterwalk_advance(&walk->out, n);
 
 
 93}
 94
 95int blkcipher_walk_done(struct blkcipher_desc *desc,
 96			struct blkcipher_walk *walk, int err)
 97{
 98	unsigned int n; /* bytes processed */
 99	bool more;
100
101	if (unlikely(err < 0))
102		goto finish;
103
104	n = walk->nbytes - err;
105	walk->total -= n;
106	more = (walk->total != 0);
 
 
 
 
107
108	if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
109		blkcipher_done_fast(walk, n);
110	} else {
111		if (WARN_ON(err)) {
112			/* unexpected case; didn't process all bytes */
113			err = -EINVAL;
114			goto finish;
115		}
116		blkcipher_done_slow(walk, n);
117	}
118
119	scatterwalk_done(&walk->in, 0, more);
120	scatterwalk_done(&walk->out, 1, more);
 
 
 
 
121
122	if (more) {
123		crypto_yield(desc->flags);
124		return blkcipher_walk_next(desc, walk);
125	}
126	err = 0;
127finish:
128	walk->nbytes = 0;
129	if (walk->iv != desc->info)
130		memcpy(desc->info, walk->iv, walk->ivsize);
131	if (walk->buffer != walk->page)
132		kfree(walk->buffer);
133	if (walk->page)
134		free_page((unsigned long)walk->page);
 
135	return err;
136}
137EXPORT_SYMBOL_GPL(blkcipher_walk_done);
138
139static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
140				      struct blkcipher_walk *walk,
141				      unsigned int bsize,
142				      unsigned int alignmask)
143{
144	unsigned int n;
145	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
146
147	if (walk->buffer)
148		goto ok;
149
150	walk->buffer = walk->page;
151	if (walk->buffer)
152		goto ok;
153
154	n = aligned_bsize * 3 - (alignmask + 1) +
155	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
156	walk->buffer = kmalloc(n, GFP_ATOMIC);
157	if (!walk->buffer)
158		return blkcipher_walk_done(desc, walk, -ENOMEM);
159
160ok:
161	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
162					  alignmask + 1);
163	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
164	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
165						 aligned_bsize, bsize);
166
167	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
168
169	walk->nbytes = bsize;
170	walk->flags |= BLKCIPHER_WALK_SLOW;
171
172	return 0;
173}
174
175static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
176{
177	u8 *tmp = walk->page;
178
179	blkcipher_map_src(walk);
180	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
181	blkcipher_unmap_src(walk);
182
183	walk->src.virt.addr = tmp;
184	walk->dst.virt.addr = tmp;
185
186	return 0;
187}
188
189static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
190				      struct blkcipher_walk *walk)
191{
192	unsigned long diff;
193
194	walk->src.phys.page = scatterwalk_page(&walk->in);
195	walk->src.phys.offset = offset_in_page(walk->in.offset);
196	walk->dst.phys.page = scatterwalk_page(&walk->out);
197	walk->dst.phys.offset = offset_in_page(walk->out.offset);
198
199	if (walk->flags & BLKCIPHER_WALK_PHYS)
200		return 0;
201
202	diff = walk->src.phys.offset - walk->dst.phys.offset;
203	diff |= walk->src.virt.page - walk->dst.virt.page;
204
205	blkcipher_map_src(walk);
206	walk->dst.virt.addr = walk->src.virt.addr;
207
208	if (diff) {
209		walk->flags |= BLKCIPHER_WALK_DIFF;
210		blkcipher_map_dst(walk);
211	}
212
213	return 0;
214}
215
216static int blkcipher_walk_next(struct blkcipher_desc *desc,
217			       struct blkcipher_walk *walk)
218{
219	unsigned int bsize;
220	unsigned int n;
221	int err;
222
223	n = walk->total;
224	if (unlikely(n < walk->cipher_blocksize)) {
225		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
226		return blkcipher_walk_done(desc, walk, -EINVAL);
227	}
228
229	bsize = min(walk->walk_blocksize, n);
230
231	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
232			 BLKCIPHER_WALK_DIFF);
233	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
234	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
235		walk->flags |= BLKCIPHER_WALK_COPY;
236		if (!walk->page) {
237			walk->page = (void *)__get_free_page(GFP_ATOMIC);
238			if (!walk->page)
239				n = 0;
240		}
241	}
242
 
243	n = scatterwalk_clamp(&walk->in, n);
244	n = scatterwalk_clamp(&walk->out, n);
245
246	if (unlikely(n < bsize)) {
247		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
248		goto set_phys_lowmem;
249	}
250
251	walk->nbytes = n;
252	if (walk->flags & BLKCIPHER_WALK_COPY) {
253		err = blkcipher_next_copy(walk);
254		goto set_phys_lowmem;
255	}
256
257	return blkcipher_next_fast(desc, walk);
258
259set_phys_lowmem:
260	if (walk->flags & BLKCIPHER_WALK_PHYS) {
261		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
262		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
263		walk->src.phys.offset &= PAGE_SIZE - 1;
264		walk->dst.phys.offset &= PAGE_SIZE - 1;
265	}
266	return err;
267}
268
269static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
270{
271	unsigned bs = walk->walk_blocksize;
272	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
273	unsigned int size = aligned_bs * 2 +
274			    walk->ivsize + max(aligned_bs, walk->ivsize) -
275			    (walk->alignmask + 1);
276	u8 *iv;
277
278	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
279	walk->buffer = kmalloc(size, GFP_ATOMIC);
280	if (!walk->buffer)
281		return -ENOMEM;
282
283	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
284	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
285	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
286	iv = blkcipher_get_spot(iv, walk->ivsize);
287
288	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
289	return 0;
290}
291
292int blkcipher_walk_virt(struct blkcipher_desc *desc,
293			struct blkcipher_walk *walk)
294{
295	walk->flags &= ~BLKCIPHER_WALK_PHYS;
296	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
297	walk->cipher_blocksize = walk->walk_blocksize;
298	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
299	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
300	return blkcipher_walk_first(desc, walk);
301}
302EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
303
304int blkcipher_walk_phys(struct blkcipher_desc *desc,
305			struct blkcipher_walk *walk)
306{
307	walk->flags |= BLKCIPHER_WALK_PHYS;
308	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
309	walk->cipher_blocksize = walk->walk_blocksize;
310	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
311	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
312	return blkcipher_walk_first(desc, walk);
313}
314EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
315
316static int blkcipher_walk_first(struct blkcipher_desc *desc,
317				struct blkcipher_walk *walk)
318{
319	if (WARN_ON_ONCE(in_irq()))
320		return -EDEADLK;
321
322	walk->iv = desc->info;
323	walk->nbytes = walk->total;
324	if (unlikely(!walk->total))
325		return 0;
326
327	walk->buffer = NULL;
328	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
329		int err = blkcipher_copy_iv(walk);
330		if (err)
331			return err;
332	}
333
334	scatterwalk_start(&walk->in, walk->in.sg);
335	scatterwalk_start(&walk->out, walk->out.sg);
336	walk->page = NULL;
337
338	return blkcipher_walk_next(desc, walk);
339}
340
341int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
342			      struct blkcipher_walk *walk,
343			      unsigned int blocksize)
344{
345	walk->flags &= ~BLKCIPHER_WALK_PHYS;
346	walk->walk_blocksize = blocksize;
347	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
348	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
349	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
350	return blkcipher_walk_first(desc, walk);
351}
352EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
353
354int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
355				   struct blkcipher_walk *walk,
356				   struct crypto_aead *tfm,
357				   unsigned int blocksize)
358{
359	walk->flags &= ~BLKCIPHER_WALK_PHYS;
360	walk->walk_blocksize = blocksize;
361	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
362	walk->ivsize = crypto_aead_ivsize(tfm);
363	walk->alignmask = crypto_aead_alignmask(tfm);
364	return blkcipher_walk_first(desc, walk);
365}
366EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
367
368static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
369			    unsigned int keylen)
370{
371	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
372	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
373	int ret;
374	u8 *buffer, *alignbuffer;
375	unsigned long absize;
376
377	absize = keylen + alignmask;
378	buffer = kmalloc(absize, GFP_ATOMIC);
379	if (!buffer)
380		return -ENOMEM;
381
382	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
383	memcpy(alignbuffer, key, keylen);
384	ret = cipher->setkey(tfm, alignbuffer, keylen);
385	memset(alignbuffer, 0, keylen);
386	kfree(buffer);
387	return ret;
388}
389
390static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
391{
392	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
393	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
394
395	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
396		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
397		return -EINVAL;
398	}
399
400	if ((unsigned long)key & alignmask)
401		return setkey_unaligned(tfm, key, keylen);
402
403	return cipher->setkey(tfm, key, keylen);
404}
405
406static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
407			unsigned int keylen)
408{
409	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
410}
411
412static int async_encrypt(struct ablkcipher_request *req)
413{
414	struct crypto_tfm *tfm = req->base.tfm;
415	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
416	struct blkcipher_desc desc = {
417		.tfm = __crypto_blkcipher_cast(tfm),
418		.info = req->info,
419		.flags = req->base.flags,
420	};
421
422
423	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
424}
425
426static int async_decrypt(struct ablkcipher_request *req)
427{
428	struct crypto_tfm *tfm = req->base.tfm;
429	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
430	struct blkcipher_desc desc = {
431		.tfm = __crypto_blkcipher_cast(tfm),
432		.info = req->info,
433		.flags = req->base.flags,
434	};
435
436	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
437}
438
439static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
440					     u32 mask)
441{
442	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
443	unsigned int len = alg->cra_ctxsize;
444
445	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
446	    cipher->ivsize) {
447		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
448		len += cipher->ivsize;
449	}
450
451	return len;
452}
453
454static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
455{
456	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
457	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
458
459	crt->setkey = async_setkey;
460	crt->encrypt = async_encrypt;
461	crt->decrypt = async_decrypt;
 
 
 
 
462	crt->base = __crypto_ablkcipher_cast(tfm);
463	crt->ivsize = alg->ivsize;
464
465	return 0;
466}
467
468static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
469{
470	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
471	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
472	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
473	unsigned long addr;
474
475	crt->setkey = setkey;
476	crt->encrypt = alg->encrypt;
477	crt->decrypt = alg->decrypt;
478
479	addr = (unsigned long)crypto_tfm_ctx(tfm);
480	addr = ALIGN(addr, align);
481	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
482	crt->iv = (void *)addr;
483
484	return 0;
485}
486
487static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
488{
489	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
490
491	if (alg->ivsize > PAGE_SIZE / 8)
492		return -EINVAL;
493
494	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
495		return crypto_init_blkcipher_ops_sync(tfm);
496	else
497		return crypto_init_blkcipher_ops_async(tfm);
498}
499
500#ifdef CONFIG_NET
501static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
502{
503	struct crypto_report_blkcipher rblkcipher;
504
505	memset(&rblkcipher, 0, sizeof(rblkcipher));
506
507	strscpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
508	strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
509
510	rblkcipher.blocksize = alg->cra_blocksize;
511	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
512	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
513	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
514
515	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
516		       sizeof(rblkcipher), &rblkcipher);
 
 
 
 
 
517}
518#else
519static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
520{
521	return -ENOSYS;
522}
523#endif
524
525static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
526	__maybe_unused;
527static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
528{
529	seq_printf(m, "type         : blkcipher\n");
530	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
531	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
532	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
533	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
534	seq_printf(m, "geniv        : <default>\n");
 
535}
536
537const struct crypto_type crypto_blkcipher_type = {
538	.ctxsize = crypto_blkcipher_ctxsize,
539	.init = crypto_init_blkcipher_ops,
540#ifdef CONFIG_PROC_FS
541	.show = crypto_blkcipher_show,
542#endif
543	.report = crypto_blkcipher_report,
544};
545EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
546
547MODULE_LICENSE("GPL");
548MODULE_DESCRIPTION("Generic block chaining cipher type");