Linux Audio

Check our new training course

Loading...
  1/*
  2 * Block chaining cipher operations.
  3 * 
  4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5 * multiple page boundaries by using temporary blocks.  In user context,
  6 * the kernel is given a chance to schedule us once per page.
  7 *
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of the GNU General Public License as published by the Free
 12 * Software Foundation; either version 2 of the License, or (at your option) 
 13 * any later version.
 14 *
 15 */
 16
 17#include <crypto/internal/skcipher.h>
 18#include <crypto/scatterwalk.h>
 19#include <linux/errno.h>
 20#include <linux/hardirq.h>
 21#include <linux/kernel.h>
 22#include <linux/module.h>
 23#include <linux/scatterlist.h>
 24#include <linux/seq_file.h>
 25#include <linux/slab.h>
 26#include <linux/string.h>
 27#include <linux/cryptouser.h>
 28#include <net/netlink.h>
 29
 30#include "internal.h"
 31
 32enum {
 33	BLKCIPHER_WALK_PHYS = 1 << 0,
 34	BLKCIPHER_WALK_SLOW = 1 << 1,
 35	BLKCIPHER_WALK_COPY = 1 << 2,
 36	BLKCIPHER_WALK_DIFF = 1 << 3,
 37};
 38
 39static int blkcipher_walk_next(struct blkcipher_desc *desc,
 40			       struct blkcipher_walk *walk);
 41static int blkcipher_walk_first(struct blkcipher_desc *desc,
 42				struct blkcipher_walk *walk);
 43
 44static inline void blkcipher_map_src(struct blkcipher_walk *walk)
 45{
 46	walk->src.virt.addr = scatterwalk_map(&walk->in);
 47}
 48
 49static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
 50{
 51	walk->dst.virt.addr = scatterwalk_map(&walk->out);
 52}
 53
 54static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
 55{
 56	scatterwalk_unmap(walk->src.virt.addr);
 57}
 58
 59static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
 60{
 61	scatterwalk_unmap(walk->dst.virt.addr);
 62}
 63
 64/* Get a spot of the specified length that does not straddle a page.
 65 * The caller needs to ensure that there is enough space for this operation.
 66 */
 67static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
 68{
 69	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 70	return max(start, end_page);
 71}
 72
 73static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
 74					       struct blkcipher_walk *walk,
 75					       unsigned int bsize)
 76{
 77	u8 *addr;
 78	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
 79
 80	addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
 81	addr = blkcipher_get_spot(addr, bsize);
 82	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
 83	return bsize;
 84}
 85
 86static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
 87					       unsigned int n)
 88{
 89	if (walk->flags & BLKCIPHER_WALK_COPY) {
 90		blkcipher_map_dst(walk);
 91		memcpy(walk->dst.virt.addr, walk->page, n);
 92		blkcipher_unmap_dst(walk);
 93	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
 94		if (walk->flags & BLKCIPHER_WALK_DIFF)
 95			blkcipher_unmap_dst(walk);
 96		blkcipher_unmap_src(walk);
 97	}
 98
 99	scatterwalk_advance(&walk->in, n);
100	scatterwalk_advance(&walk->out, n);
101
102	return n;
103}
104
105int blkcipher_walk_done(struct blkcipher_desc *desc,
106			struct blkcipher_walk *walk, int err)
107{
108	struct crypto_blkcipher *tfm = desc->tfm;
109	unsigned int nbytes = 0;
110
111	if (likely(err >= 0)) {
112		unsigned int n = walk->nbytes - err;
113
114		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
115			n = blkcipher_done_fast(walk, n);
116		else if (WARN_ON(err)) {
117			err = -EINVAL;
118			goto err;
119		} else
120			n = blkcipher_done_slow(tfm, walk, n);
121
122		nbytes = walk->total - n;
123		err = 0;
124	}
125
126	scatterwalk_done(&walk->in, 0, nbytes);
127	scatterwalk_done(&walk->out, 1, nbytes);
128
129err:
130	walk->total = nbytes;
131	walk->nbytes = nbytes;
132
133	if (nbytes) {
134		crypto_yield(desc->flags);
135		return blkcipher_walk_next(desc, walk);
136	}
137
138	if (walk->iv != desc->info)
139		memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
140	if (walk->buffer != walk->page)
141		kfree(walk->buffer);
142	if (walk->page)
143		free_page((unsigned long)walk->page);
144
145	return err;
146}
147EXPORT_SYMBOL_GPL(blkcipher_walk_done);
148
149static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
150				      struct blkcipher_walk *walk,
151				      unsigned int bsize,
152				      unsigned int alignmask)
153{
154	unsigned int n;
155	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
156
157	if (walk->buffer)
158		goto ok;
159
160	walk->buffer = walk->page;
161	if (walk->buffer)
162		goto ok;
163
164	n = aligned_bsize * 3 - (alignmask + 1) +
165	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
166	walk->buffer = kmalloc(n, GFP_ATOMIC);
167	if (!walk->buffer)
168		return blkcipher_walk_done(desc, walk, -ENOMEM);
169
170ok:
171	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
172					  alignmask + 1);
173	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
174	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
175						 aligned_bsize, bsize);
176
177	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
178
179	walk->nbytes = bsize;
180	walk->flags |= BLKCIPHER_WALK_SLOW;
181
182	return 0;
183}
184
185static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
186{
187	u8 *tmp = walk->page;
188
189	blkcipher_map_src(walk);
190	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
191	blkcipher_unmap_src(walk);
192
193	walk->src.virt.addr = tmp;
194	walk->dst.virt.addr = tmp;
195
196	return 0;
197}
198
199static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
200				      struct blkcipher_walk *walk)
201{
202	unsigned long diff;
203
204	walk->src.phys.page = scatterwalk_page(&walk->in);
205	walk->src.phys.offset = offset_in_page(walk->in.offset);
206	walk->dst.phys.page = scatterwalk_page(&walk->out);
207	walk->dst.phys.offset = offset_in_page(walk->out.offset);
208
209	if (walk->flags & BLKCIPHER_WALK_PHYS)
210		return 0;
211
212	diff = walk->src.phys.offset - walk->dst.phys.offset;
213	diff |= walk->src.virt.page - walk->dst.virt.page;
214
215	blkcipher_map_src(walk);
216	walk->dst.virt.addr = walk->src.virt.addr;
217
218	if (diff) {
219		walk->flags |= BLKCIPHER_WALK_DIFF;
220		blkcipher_map_dst(walk);
221	}
222
223	return 0;
224}
225
226static int blkcipher_walk_next(struct blkcipher_desc *desc,
227			       struct blkcipher_walk *walk)
228{
229	struct crypto_blkcipher *tfm = desc->tfm;
230	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
231	unsigned int bsize;
232	unsigned int n;
233	int err;
234
235	n = walk->total;
236	if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
237		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
238		return blkcipher_walk_done(desc, walk, -EINVAL);
239	}
240
241	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
242			 BLKCIPHER_WALK_DIFF);
243	if (!scatterwalk_aligned(&walk->in, alignmask) ||
244	    !scatterwalk_aligned(&walk->out, alignmask)) {
245		walk->flags |= BLKCIPHER_WALK_COPY;
246		if (!walk->page) {
247			walk->page = (void *)__get_free_page(GFP_ATOMIC);
248			if (!walk->page)
249				n = 0;
250		}
251	}
252
253	bsize = min(walk->blocksize, n);
254	n = scatterwalk_clamp(&walk->in, n);
255	n = scatterwalk_clamp(&walk->out, n);
256
257	if (unlikely(n < bsize)) {
258		err = blkcipher_next_slow(desc, walk, bsize, alignmask);
259		goto set_phys_lowmem;
260	}
261
262	walk->nbytes = n;
263	if (walk->flags & BLKCIPHER_WALK_COPY) {
264		err = blkcipher_next_copy(walk);
265		goto set_phys_lowmem;
266	}
267
268	return blkcipher_next_fast(desc, walk);
269
270set_phys_lowmem:
271	if (walk->flags & BLKCIPHER_WALK_PHYS) {
272		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
273		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
274		walk->src.phys.offset &= PAGE_SIZE - 1;
275		walk->dst.phys.offset &= PAGE_SIZE - 1;
276	}
277	return err;
278}
279
280static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
281				    struct crypto_blkcipher *tfm,
282				    unsigned int alignmask)
283{
284	unsigned bs = walk->blocksize;
285	unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
286	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
287	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
288			    (alignmask + 1);
289	u8 *iv;
290
291	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
292	walk->buffer = kmalloc(size, GFP_ATOMIC);
293	if (!walk->buffer)
294		return -ENOMEM;
295
296	iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
297	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
298	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
299	iv = blkcipher_get_spot(iv, ivsize);
300
301	walk->iv = memcpy(iv, walk->iv, ivsize);
302	return 0;
303}
304
305int blkcipher_walk_virt(struct blkcipher_desc *desc,
306			struct blkcipher_walk *walk)
307{
308	walk->flags &= ~BLKCIPHER_WALK_PHYS;
309	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
310	return blkcipher_walk_first(desc, walk);
311}
312EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
313
314int blkcipher_walk_phys(struct blkcipher_desc *desc,
315			struct blkcipher_walk *walk)
316{
317	walk->flags |= BLKCIPHER_WALK_PHYS;
318	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
319	return blkcipher_walk_first(desc, walk);
320}
321EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
322
323static int blkcipher_walk_first(struct blkcipher_desc *desc,
324				struct blkcipher_walk *walk)
325{
326	struct crypto_blkcipher *tfm = desc->tfm;
327	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
328
329	if (WARN_ON_ONCE(in_irq()))
330		return -EDEADLK;
331
332	walk->nbytes = walk->total;
333	if (unlikely(!walk->total))
334		return 0;
335
336	walk->buffer = NULL;
337	walk->iv = desc->info;
338	if (unlikely(((unsigned long)walk->iv & alignmask))) {
339		int err = blkcipher_copy_iv(walk, tfm, alignmask);
340		if (err)
341			return err;
342	}
343
344	scatterwalk_start(&walk->in, walk->in.sg);
345	scatterwalk_start(&walk->out, walk->out.sg);
346	walk->page = NULL;
347
348	return blkcipher_walk_next(desc, walk);
349}
350
351int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
352			      struct blkcipher_walk *walk,
353			      unsigned int blocksize)
354{
355	walk->flags &= ~BLKCIPHER_WALK_PHYS;
356	walk->blocksize = blocksize;
357	return blkcipher_walk_first(desc, walk);
358}
359EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
360
361static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
362			    unsigned int keylen)
363{
364	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
365	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
366	int ret;
367	u8 *buffer, *alignbuffer;
368	unsigned long absize;
369
370	absize = keylen + alignmask;
371	buffer = kmalloc(absize, GFP_ATOMIC);
372	if (!buffer)
373		return -ENOMEM;
374
375	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
376	memcpy(alignbuffer, key, keylen);
377	ret = cipher->setkey(tfm, alignbuffer, keylen);
378	memset(alignbuffer, 0, keylen);
379	kfree(buffer);
380	return ret;
381}
382
383static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
384{
385	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
386	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
387
388	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
389		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
390		return -EINVAL;
391	}
392
393	if ((unsigned long)key & alignmask)
394		return setkey_unaligned(tfm, key, keylen);
395
396	return cipher->setkey(tfm, key, keylen);
397}
398
399static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
400			unsigned int keylen)
401{
402	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
403}
404
405static int async_encrypt(struct ablkcipher_request *req)
406{
407	struct crypto_tfm *tfm = req->base.tfm;
408	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
409	struct blkcipher_desc desc = {
410		.tfm = __crypto_blkcipher_cast(tfm),
411		.info = req->info,
412		.flags = req->base.flags,
413	};
414
415
416	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
417}
418
419static int async_decrypt(struct ablkcipher_request *req)
420{
421	struct crypto_tfm *tfm = req->base.tfm;
422	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
423	struct blkcipher_desc desc = {
424		.tfm = __crypto_blkcipher_cast(tfm),
425		.info = req->info,
426		.flags = req->base.flags,
427	};
428
429	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
430}
431
432static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
433					     u32 mask)
434{
435	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
436	unsigned int len = alg->cra_ctxsize;
437
438	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
439	    cipher->ivsize) {
440		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
441		len += cipher->ivsize;
442	}
443
444	return len;
445}
446
447static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
448{
449	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
450	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
451
452	crt->setkey = async_setkey;
453	crt->encrypt = async_encrypt;
454	crt->decrypt = async_decrypt;
455	if (!alg->ivsize) {
456		crt->givencrypt = skcipher_null_givencrypt;
457		crt->givdecrypt = skcipher_null_givdecrypt;
458	}
459	crt->base = __crypto_ablkcipher_cast(tfm);
460	crt->ivsize = alg->ivsize;
461
462	return 0;
463}
464
465static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
466{
467	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
468	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
469	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
470	unsigned long addr;
471
472	crt->setkey = setkey;
473	crt->encrypt = alg->encrypt;
474	crt->decrypt = alg->decrypt;
475
476	addr = (unsigned long)crypto_tfm_ctx(tfm);
477	addr = ALIGN(addr, align);
478	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
479	crt->iv = (void *)addr;
480
481	return 0;
482}
483
484static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
485{
486	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
487
488	if (alg->ivsize > PAGE_SIZE / 8)
489		return -EINVAL;
490
491	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
492		return crypto_init_blkcipher_ops_sync(tfm);
493	else
494		return crypto_init_blkcipher_ops_async(tfm);
495}
496
497#ifdef CONFIG_NET
498static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
499{
500	struct crypto_report_blkcipher rblkcipher;
501
502	snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
503	snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
504		 alg->cra_blkcipher.geniv ?: "<default>");
505
506	rblkcipher.blocksize = alg->cra_blocksize;
507	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
508	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
509	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
510
511	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
512		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
513		goto nla_put_failure;
514	return 0;
515
516nla_put_failure:
517	return -EMSGSIZE;
518}
519#else
520static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
521{
522	return -ENOSYS;
523}
524#endif
525
526static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
527	__attribute__ ((unused));
528static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
529{
530	seq_printf(m, "type         : blkcipher\n");
531	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
532	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
533	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
534	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
535	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
536					     "<default>");
537}
538
539const struct crypto_type crypto_blkcipher_type = {
540	.ctxsize = crypto_blkcipher_ctxsize,
541	.init = crypto_init_blkcipher_ops,
542#ifdef CONFIG_PROC_FS
543	.show = crypto_blkcipher_show,
544#endif
545	.report = crypto_blkcipher_report,
546};
547EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
548
549static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
550				const char *name, u32 type, u32 mask)
551{
552	struct crypto_alg *alg;
553	int err;
554
555	type = crypto_skcipher_type(type);
556	mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
557
558	alg = crypto_alg_mod_lookup(name, type, mask);
559	if (IS_ERR(alg))
560		return PTR_ERR(alg);
561
562	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
563	crypto_mod_put(alg);
564	return err;
565}
566
567struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
568					     struct rtattr **tb, u32 type,
569					     u32 mask)
570{
571	struct {
572		int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
573			      unsigned int keylen);
574		int (*encrypt)(struct ablkcipher_request *req);
575		int (*decrypt)(struct ablkcipher_request *req);
576
577		unsigned int min_keysize;
578		unsigned int max_keysize;
579		unsigned int ivsize;
580
581		const char *geniv;
582	} balg;
583	const char *name;
584	struct crypto_skcipher_spawn *spawn;
585	struct crypto_attr_type *algt;
586	struct crypto_instance *inst;
587	struct crypto_alg *alg;
588	int err;
589
590	algt = crypto_get_attr_type(tb);
591	err = PTR_ERR(algt);
592	if (IS_ERR(algt))
593		return ERR_PTR(err);
594
595	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
596	    algt->mask)
597		return ERR_PTR(-EINVAL);
598
599	name = crypto_attr_alg_name(tb[1]);
600	err = PTR_ERR(name);
601	if (IS_ERR(name))
602		return ERR_PTR(err);
603
604	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
605	if (!inst)
606		return ERR_PTR(-ENOMEM);
607
608	spawn = crypto_instance_ctx(inst);
609
610	/* Ignore async algorithms if necessary. */
611	mask |= crypto_requires_sync(algt->type, algt->mask);
612
613	crypto_set_skcipher_spawn(spawn, inst);
614	err = crypto_grab_nivcipher(spawn, name, type, mask);
615	if (err)
616		goto err_free_inst;
617
618	alg = crypto_skcipher_spawn_alg(spawn);
619
620	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
621	    CRYPTO_ALG_TYPE_BLKCIPHER) {
622		balg.ivsize = alg->cra_blkcipher.ivsize;
623		balg.min_keysize = alg->cra_blkcipher.min_keysize;
624		balg.max_keysize = alg->cra_blkcipher.max_keysize;
625
626		balg.setkey = async_setkey;
627		balg.encrypt = async_encrypt;
628		balg.decrypt = async_decrypt;
629
630		balg.geniv = alg->cra_blkcipher.geniv;
631	} else {
632		balg.ivsize = alg->cra_ablkcipher.ivsize;
633		balg.min_keysize = alg->cra_ablkcipher.min_keysize;
634		balg.max_keysize = alg->cra_ablkcipher.max_keysize;
635
636		balg.setkey = alg->cra_ablkcipher.setkey;
637		balg.encrypt = alg->cra_ablkcipher.encrypt;
638		balg.decrypt = alg->cra_ablkcipher.decrypt;
639
640		balg.geniv = alg->cra_ablkcipher.geniv;
641	}
642
643	err = -EINVAL;
644	if (!balg.ivsize)
645		goto err_drop_alg;
646
647	/*
648	 * This is only true if we're constructing an algorithm with its
649	 * default IV generator.  For the default generator we elide the
650	 * template name and double-check the IV generator.
651	 */
652	if (algt->mask & CRYPTO_ALG_GENIV) {
653		if (!balg.geniv)
654			balg.geniv = crypto_default_geniv(alg);
655		err = -EAGAIN;
656		if (strcmp(tmpl->name, balg.geniv))
657			goto err_drop_alg;
658
659		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
660		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
661		       CRYPTO_MAX_ALG_NAME);
662	} else {
663		err = -ENAMETOOLONG;
664		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
665			     "%s(%s)", tmpl->name, alg->cra_name) >=
666		    CRYPTO_MAX_ALG_NAME)
667			goto err_drop_alg;
668		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
669			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
670		    CRYPTO_MAX_ALG_NAME)
671			goto err_drop_alg;
672	}
673
674	inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
675	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
676	inst->alg.cra_priority = alg->cra_priority;
677	inst->alg.cra_blocksize = alg->cra_blocksize;
678	inst->alg.cra_alignmask = alg->cra_alignmask;
679	inst->alg.cra_type = &crypto_givcipher_type;
680
681	inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
682	inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
683	inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
684	inst->alg.cra_ablkcipher.geniv = balg.geniv;
685
686	inst->alg.cra_ablkcipher.setkey = balg.setkey;
687	inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
688	inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
689
690out:
691	return inst;
692
693err_drop_alg:
694	crypto_drop_skcipher(spawn);
695err_free_inst:
696	kfree(inst);
697	inst = ERR_PTR(err);
698	goto out;
699}
700EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
701
702void skcipher_geniv_free(struct crypto_instance *inst)
703{
704	crypto_drop_skcipher(crypto_instance_ctx(inst));
705	kfree(inst);
706}
707EXPORT_SYMBOL_GPL(skcipher_geniv_free);
708
709int skcipher_geniv_init(struct crypto_tfm *tfm)
710{
711	struct crypto_instance *inst = (void *)tfm->__crt_alg;
712	struct crypto_ablkcipher *cipher;
713
714	cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
715	if (IS_ERR(cipher))
716		return PTR_ERR(cipher);
717
718	tfm->crt_ablkcipher.base = cipher;
719	tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
720
721	return 0;
722}
723EXPORT_SYMBOL_GPL(skcipher_geniv_init);
724
725void skcipher_geniv_exit(struct crypto_tfm *tfm)
726{
727	crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
728}
729EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
730
731MODULE_LICENSE("GPL");
732MODULE_DESCRIPTION("Generic block chaining cipher type");