Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Block chaining cipher operations.
  3 *
  4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5 * multiple page boundaries by using temporary blocks.  In user context,
  6 * the kernel is given a chance to schedule us once per page.
  7 *
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of the GNU General Public License as published by the Free
 12 * Software Foundation; either version 2 of the License, or (at your option)
 13 * any later version.
 14 *
 15 */
 16
 17#include <crypto/aead.h>
 18#include <crypto/internal/skcipher.h>
 19#include <crypto/scatterwalk.h>
 20#include <linux/errno.h>
 
 21#include <linux/kernel.h>
 22#include <linux/module.h>
 
 23#include <linux/seq_file.h>
 24#include <linux/slab.h>
 25#include <linux/string.h>
 26#include <linux/cryptouser.h>
 27#include <linux/compiler.h>
 28#include <net/netlink.h>
 29
 30#include "internal.h"
 31
 32enum {
 33	BLKCIPHER_WALK_PHYS = 1 << 0,
 34	BLKCIPHER_WALK_SLOW = 1 << 1,
 35	BLKCIPHER_WALK_COPY = 1 << 2,
 36	BLKCIPHER_WALK_DIFF = 1 << 3,
 37};
 38
 39static int blkcipher_walk_next(struct blkcipher_desc *desc,
 40			       struct blkcipher_walk *walk);
 41static int blkcipher_walk_first(struct blkcipher_desc *desc,
 42				struct blkcipher_walk *walk);
 43
 44static inline void blkcipher_map_src(struct blkcipher_walk *walk)
 45{
 46	walk->src.virt.addr = scatterwalk_map(&walk->in);
 47}
 48
 49static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
 50{
 51	walk->dst.virt.addr = scatterwalk_map(&walk->out);
 52}
 53
 54static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
 55{
 56	scatterwalk_unmap(walk->src.virt.addr);
 57}
 58
 59static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
 60{
 61	scatterwalk_unmap(walk->dst.virt.addr);
 62}
 63
 64/* Get a spot of the specified length that does not straddle a page.
 65 * The caller needs to ensure that there is enough space for this operation.
 66 */
 67static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
 68{
 69	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 70	return max(start, end_page);
 71}
 72
 73static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
 
 74					       unsigned int bsize)
 75{
 76	u8 *addr;
 
 77
 78	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 79	addr = blkcipher_get_spot(addr, bsize);
 80	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
 81	return bsize;
 82}
 83
 84static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
 85					       unsigned int n)
 86{
 87	if (walk->flags & BLKCIPHER_WALK_COPY) {
 88		blkcipher_map_dst(walk);
 89		memcpy(walk->dst.virt.addr, walk->page, n);
 90		blkcipher_unmap_dst(walk);
 91	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
 92		if (walk->flags & BLKCIPHER_WALK_DIFF)
 93			blkcipher_unmap_dst(walk);
 94		blkcipher_unmap_src(walk);
 95	}
 96
 97	scatterwalk_advance(&walk->in, n);
 98	scatterwalk_advance(&walk->out, n);
 99
100	return n;
101}
102
103int blkcipher_walk_done(struct blkcipher_desc *desc,
104			struct blkcipher_walk *walk, int err)
105{
 
106	unsigned int nbytes = 0;
107
108	if (likely(err >= 0)) {
109		unsigned int n = walk->nbytes - err;
110
111		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
112			n = blkcipher_done_fast(walk, n);
113		else if (WARN_ON(err)) {
114			err = -EINVAL;
115			goto err;
116		} else
117			n = blkcipher_done_slow(walk, n);
118
119		nbytes = walk->total - n;
120		err = 0;
121	}
122
123	scatterwalk_done(&walk->in, 0, nbytes);
124	scatterwalk_done(&walk->out, 1, nbytes);
125
126err:
127	walk->total = nbytes;
128	walk->nbytes = nbytes;
129
130	if (nbytes) {
131		crypto_yield(desc->flags);
132		return blkcipher_walk_next(desc, walk);
133	}
134
135	if (walk->iv != desc->info)
136		memcpy(desc->info, walk->iv, walk->ivsize);
137	if (walk->buffer != walk->page)
138		kfree(walk->buffer);
139	if (walk->page)
140		free_page((unsigned long)walk->page);
141
142	return err;
143}
144EXPORT_SYMBOL_GPL(blkcipher_walk_done);
145
146static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
147				      struct blkcipher_walk *walk,
148				      unsigned int bsize,
149				      unsigned int alignmask)
150{
151	unsigned int n;
152	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
153
154	if (walk->buffer)
155		goto ok;
156
157	walk->buffer = walk->page;
158	if (walk->buffer)
159		goto ok;
160
161	n = aligned_bsize * 3 - (alignmask + 1) +
162	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
163	walk->buffer = kmalloc(n, GFP_ATOMIC);
164	if (!walk->buffer)
165		return blkcipher_walk_done(desc, walk, -ENOMEM);
166
167ok:
168	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
169					  alignmask + 1);
170	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
171	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
172						 aligned_bsize, bsize);
173
174	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
175
176	walk->nbytes = bsize;
177	walk->flags |= BLKCIPHER_WALK_SLOW;
178
179	return 0;
180}
181
182static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
183{
184	u8 *tmp = walk->page;
185
186	blkcipher_map_src(walk);
187	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
188	blkcipher_unmap_src(walk);
189
190	walk->src.virt.addr = tmp;
191	walk->dst.virt.addr = tmp;
192
193	return 0;
194}
195
196static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
197				      struct blkcipher_walk *walk)
198{
199	unsigned long diff;
200
201	walk->src.phys.page = scatterwalk_page(&walk->in);
202	walk->src.phys.offset = offset_in_page(walk->in.offset);
203	walk->dst.phys.page = scatterwalk_page(&walk->out);
204	walk->dst.phys.offset = offset_in_page(walk->out.offset);
205
206	if (walk->flags & BLKCIPHER_WALK_PHYS)
207		return 0;
208
209	diff = walk->src.phys.offset - walk->dst.phys.offset;
210	diff |= walk->src.virt.page - walk->dst.virt.page;
211
212	blkcipher_map_src(walk);
213	walk->dst.virt.addr = walk->src.virt.addr;
214
215	if (diff) {
216		walk->flags |= BLKCIPHER_WALK_DIFF;
217		blkcipher_map_dst(walk);
218	}
219
220	return 0;
221}
222
223static int blkcipher_walk_next(struct blkcipher_desc *desc,
224			       struct blkcipher_walk *walk)
225{
 
 
226	unsigned int bsize;
227	unsigned int n;
228	int err;
229
230	n = walk->total;
231	if (unlikely(n < walk->cipher_blocksize)) {
232		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
233		return blkcipher_walk_done(desc, walk, -EINVAL);
234	}
235
236	bsize = min(walk->walk_blocksize, n);
237
238	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
239			 BLKCIPHER_WALK_DIFF);
240	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
241	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
242		walk->flags |= BLKCIPHER_WALK_COPY;
243		if (!walk->page) {
244			walk->page = (void *)__get_free_page(GFP_ATOMIC);
245			if (!walk->page)
246				n = 0;
247		}
248	}
249
 
250	n = scatterwalk_clamp(&walk->in, n);
251	n = scatterwalk_clamp(&walk->out, n);
252
253	if (unlikely(n < bsize)) {
254		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
255		goto set_phys_lowmem;
256	}
257
258	walk->nbytes = n;
259	if (walk->flags & BLKCIPHER_WALK_COPY) {
260		err = blkcipher_next_copy(walk);
261		goto set_phys_lowmem;
262	}
263
264	return blkcipher_next_fast(desc, walk);
265
266set_phys_lowmem:
267	if (walk->flags & BLKCIPHER_WALK_PHYS) {
268		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
269		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
270		walk->src.phys.offset &= PAGE_SIZE - 1;
271		walk->dst.phys.offset &= PAGE_SIZE - 1;
272	}
273	return err;
274}
275
276static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
277{
278	unsigned bs = walk->walk_blocksize;
279	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
280	unsigned int size = aligned_bs * 2 +
281			    walk->ivsize + max(aligned_bs, walk->ivsize) -
282			    (walk->alignmask + 1);
 
 
283	u8 *iv;
284
285	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
286	walk->buffer = kmalloc(size, GFP_ATOMIC);
287	if (!walk->buffer)
288		return -ENOMEM;
289
290	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
291	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
292	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
293	iv = blkcipher_get_spot(iv, walk->ivsize);
294
295	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
296	return 0;
297}
298
299int blkcipher_walk_virt(struct blkcipher_desc *desc,
300			struct blkcipher_walk *walk)
301{
302	walk->flags &= ~BLKCIPHER_WALK_PHYS;
303	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
304	walk->cipher_blocksize = walk->walk_blocksize;
305	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
306	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
307	return blkcipher_walk_first(desc, walk);
308}
309EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
310
311int blkcipher_walk_phys(struct blkcipher_desc *desc,
312			struct blkcipher_walk *walk)
313{
314	walk->flags |= BLKCIPHER_WALK_PHYS;
315	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
316	walk->cipher_blocksize = walk->walk_blocksize;
317	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
318	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
319	return blkcipher_walk_first(desc, walk);
320}
321EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
322
323static int blkcipher_walk_first(struct blkcipher_desc *desc,
324				struct blkcipher_walk *walk)
325{
 
 
 
326	if (WARN_ON_ONCE(in_irq()))
327		return -EDEADLK;
328
329	walk->iv = desc->info;
330	walk->nbytes = walk->total;
331	if (unlikely(!walk->total))
332		return 0;
333
334	walk->buffer = NULL;
335	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
336		int err = blkcipher_copy_iv(walk);
 
337		if (err)
338			return err;
339	}
340
341	scatterwalk_start(&walk->in, walk->in.sg);
342	scatterwalk_start(&walk->out, walk->out.sg);
343	walk->page = NULL;
344
345	return blkcipher_walk_next(desc, walk);
346}
347
348int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
349			      struct blkcipher_walk *walk,
350			      unsigned int blocksize)
351{
352	walk->flags &= ~BLKCIPHER_WALK_PHYS;
353	walk->walk_blocksize = blocksize;
354	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
355	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
356	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
357	return blkcipher_walk_first(desc, walk);
358}
359EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
360
361int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
362				   struct blkcipher_walk *walk,
363				   struct crypto_aead *tfm,
364				   unsigned int blocksize)
365{
366	walk->flags &= ~BLKCIPHER_WALK_PHYS;
367	walk->walk_blocksize = blocksize;
368	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
369	walk->ivsize = crypto_aead_ivsize(tfm);
370	walk->alignmask = crypto_aead_alignmask(tfm);
371	return blkcipher_walk_first(desc, walk);
372}
373EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
374
375static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
376			    unsigned int keylen)
377{
378	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
379	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
380	int ret;
381	u8 *buffer, *alignbuffer;
382	unsigned long absize;
383
384	absize = keylen + alignmask;
385	buffer = kmalloc(absize, GFP_ATOMIC);
386	if (!buffer)
387		return -ENOMEM;
388
389	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
390	memcpy(alignbuffer, key, keylen);
391	ret = cipher->setkey(tfm, alignbuffer, keylen);
392	memset(alignbuffer, 0, keylen);
393	kfree(buffer);
394	return ret;
395}
396
397static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
398{
399	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
400	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
401
402	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
403		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
404		return -EINVAL;
405	}
406
407	if ((unsigned long)key & alignmask)
408		return setkey_unaligned(tfm, key, keylen);
409
410	return cipher->setkey(tfm, key, keylen);
411}
412
413static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
414			unsigned int keylen)
415{
416	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
417}
418
419static int async_encrypt(struct ablkcipher_request *req)
420{
421	struct crypto_tfm *tfm = req->base.tfm;
422	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
423	struct blkcipher_desc desc = {
424		.tfm = __crypto_blkcipher_cast(tfm),
425		.info = req->info,
426		.flags = req->base.flags,
427	};
428
429
430	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
431}
432
433static int async_decrypt(struct ablkcipher_request *req)
434{
435	struct crypto_tfm *tfm = req->base.tfm;
436	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
437	struct blkcipher_desc desc = {
438		.tfm = __crypto_blkcipher_cast(tfm),
439		.info = req->info,
440		.flags = req->base.flags,
441	};
442
443	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
444}
445
446static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
447					     u32 mask)
448{
449	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
450	unsigned int len = alg->cra_ctxsize;
451
452	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
453	    cipher->ivsize) {
454		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
455		len += cipher->ivsize;
456	}
457
458	return len;
459}
460
461static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
462{
463	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
464	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
465
466	crt->setkey = async_setkey;
467	crt->encrypt = async_encrypt;
468	crt->decrypt = async_decrypt;
 
 
 
 
469	crt->base = __crypto_ablkcipher_cast(tfm);
470	crt->ivsize = alg->ivsize;
471
472	return 0;
473}
474
475static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
476{
477	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
478	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
479	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
480	unsigned long addr;
481
482	crt->setkey = setkey;
483	crt->encrypt = alg->encrypt;
484	crt->decrypt = alg->decrypt;
485
486	addr = (unsigned long)crypto_tfm_ctx(tfm);
487	addr = ALIGN(addr, align);
488	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
489	crt->iv = (void *)addr;
490
491	return 0;
492}
493
494static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
495{
496	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
497
498	if (alg->ivsize > PAGE_SIZE / 8)
499		return -EINVAL;
500
501	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
502		return crypto_init_blkcipher_ops_sync(tfm);
503	else
504		return crypto_init_blkcipher_ops_async(tfm);
505}
506
507#ifdef CONFIG_NET
508static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
509{
510	struct crypto_report_blkcipher rblkcipher;
511
512	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
513	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
514		sizeof(rblkcipher.geniv));
515
516	rblkcipher.blocksize = alg->cra_blocksize;
517	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
518	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
519	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
520
521	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
522		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
523		goto nla_put_failure;
524	return 0;
525
526nla_put_failure:
527	return -EMSGSIZE;
528}
529#else
530static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
531{
532	return -ENOSYS;
533}
534#endif
535
536static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
537	__maybe_unused;
538static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
539{
540	seq_printf(m, "type         : blkcipher\n");
541	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
542	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
543	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
544	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
545	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
546					     "<default>");
547}
548
549const struct crypto_type crypto_blkcipher_type = {
550	.ctxsize = crypto_blkcipher_ctxsize,
551	.init = crypto_init_blkcipher_ops,
552#ifdef CONFIG_PROC_FS
553	.show = crypto_blkcipher_show,
554#endif
555	.report = crypto_blkcipher_report,
556};
557EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
558
559MODULE_LICENSE("GPL");
560MODULE_DESCRIPTION("Generic block chaining cipher type");
v3.1
  1/*
  2 * Block chaining cipher operations.
  3 * 
  4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5 * multiple page boundaries by using temporary blocks.  In user context,
  6 * the kernel is given a chance to schedule us once per page.
  7 *
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of the GNU General Public License as published by the Free
 12 * Software Foundation; either version 2 of the License, or (at your option) 
 13 * any later version.
 14 *
 15 */
 16
 
 17#include <crypto/internal/skcipher.h>
 18#include <crypto/scatterwalk.h>
 19#include <linux/errno.h>
 20#include <linux/hardirq.h>
 21#include <linux/kernel.h>
 22#include <linux/module.h>
 23#include <linux/scatterlist.h>
 24#include <linux/seq_file.h>
 25#include <linux/slab.h>
 26#include <linux/string.h>
 
 
 
 27
 28#include "internal.h"
 29
 30enum {
 31	BLKCIPHER_WALK_PHYS = 1 << 0,
 32	BLKCIPHER_WALK_SLOW = 1 << 1,
 33	BLKCIPHER_WALK_COPY = 1 << 2,
 34	BLKCIPHER_WALK_DIFF = 1 << 3,
 35};
 36
 37static int blkcipher_walk_next(struct blkcipher_desc *desc,
 38			       struct blkcipher_walk *walk);
 39static int blkcipher_walk_first(struct blkcipher_desc *desc,
 40				struct blkcipher_walk *walk);
 41
 42static inline void blkcipher_map_src(struct blkcipher_walk *walk)
 43{
 44	walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
 45}
 46
 47static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
 48{
 49	walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
 50}
 51
 52static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
 53{
 54	scatterwalk_unmap(walk->src.virt.addr, 0);
 55}
 56
 57static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
 58{
 59	scatterwalk_unmap(walk->dst.virt.addr, 1);
 60}
 61
 62/* Get a spot of the specified length that does not straddle a page.
 63 * The caller needs to ensure that there is enough space for this operation.
 64 */
 65static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
 66{
 67	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 68	return max(start, end_page);
 69}
 70
 71static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
 72					       struct blkcipher_walk *walk,
 73					       unsigned int bsize)
 74{
 75	u8 *addr;
 76	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
 77
 78	addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
 79	addr = blkcipher_get_spot(addr, bsize);
 80	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
 81	return bsize;
 82}
 83
 84static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
 85					       unsigned int n)
 86{
 87	if (walk->flags & BLKCIPHER_WALK_COPY) {
 88		blkcipher_map_dst(walk);
 89		memcpy(walk->dst.virt.addr, walk->page, n);
 90		blkcipher_unmap_dst(walk);
 91	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
 92		if (walk->flags & BLKCIPHER_WALK_DIFF)
 93			blkcipher_unmap_dst(walk);
 94		blkcipher_unmap_src(walk);
 95	}
 96
 97	scatterwalk_advance(&walk->in, n);
 98	scatterwalk_advance(&walk->out, n);
 99
100	return n;
101}
102
103int blkcipher_walk_done(struct blkcipher_desc *desc,
104			struct blkcipher_walk *walk, int err)
105{
106	struct crypto_blkcipher *tfm = desc->tfm;
107	unsigned int nbytes = 0;
108
109	if (likely(err >= 0)) {
110		unsigned int n = walk->nbytes - err;
111
112		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
113			n = blkcipher_done_fast(walk, n);
114		else if (WARN_ON(err)) {
115			err = -EINVAL;
116			goto err;
117		} else
118			n = blkcipher_done_slow(tfm, walk, n);
119
120		nbytes = walk->total - n;
121		err = 0;
122	}
123
124	scatterwalk_done(&walk->in, 0, nbytes);
125	scatterwalk_done(&walk->out, 1, nbytes);
126
127err:
128	walk->total = nbytes;
129	walk->nbytes = nbytes;
130
131	if (nbytes) {
132		crypto_yield(desc->flags);
133		return blkcipher_walk_next(desc, walk);
134	}
135
136	if (walk->iv != desc->info)
137		memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
138	if (walk->buffer != walk->page)
139		kfree(walk->buffer);
140	if (walk->page)
141		free_page((unsigned long)walk->page);
142
143	return err;
144}
145EXPORT_SYMBOL_GPL(blkcipher_walk_done);
146
147static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
148				      struct blkcipher_walk *walk,
149				      unsigned int bsize,
150				      unsigned int alignmask)
151{
152	unsigned int n;
153	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
154
155	if (walk->buffer)
156		goto ok;
157
158	walk->buffer = walk->page;
159	if (walk->buffer)
160		goto ok;
161
162	n = aligned_bsize * 3 - (alignmask + 1) +
163	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
164	walk->buffer = kmalloc(n, GFP_ATOMIC);
165	if (!walk->buffer)
166		return blkcipher_walk_done(desc, walk, -ENOMEM);
167
168ok:
169	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
170					  alignmask + 1);
171	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
172	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
173						 aligned_bsize, bsize);
174
175	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
176
177	walk->nbytes = bsize;
178	walk->flags |= BLKCIPHER_WALK_SLOW;
179
180	return 0;
181}
182
183static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
184{
185	u8 *tmp = walk->page;
186
187	blkcipher_map_src(walk);
188	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
189	blkcipher_unmap_src(walk);
190
191	walk->src.virt.addr = tmp;
192	walk->dst.virt.addr = tmp;
193
194	return 0;
195}
196
197static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
198				      struct blkcipher_walk *walk)
199{
200	unsigned long diff;
201
202	walk->src.phys.page = scatterwalk_page(&walk->in);
203	walk->src.phys.offset = offset_in_page(walk->in.offset);
204	walk->dst.phys.page = scatterwalk_page(&walk->out);
205	walk->dst.phys.offset = offset_in_page(walk->out.offset);
206
207	if (walk->flags & BLKCIPHER_WALK_PHYS)
208		return 0;
209
210	diff = walk->src.phys.offset - walk->dst.phys.offset;
211	diff |= walk->src.virt.page - walk->dst.virt.page;
212
213	blkcipher_map_src(walk);
214	walk->dst.virt.addr = walk->src.virt.addr;
215
216	if (diff) {
217		walk->flags |= BLKCIPHER_WALK_DIFF;
218		blkcipher_map_dst(walk);
219	}
220
221	return 0;
222}
223
224static int blkcipher_walk_next(struct blkcipher_desc *desc,
225			       struct blkcipher_walk *walk)
226{
227	struct crypto_blkcipher *tfm = desc->tfm;
228	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
229	unsigned int bsize;
230	unsigned int n;
231	int err;
232
233	n = walk->total;
234	if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
235		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
236		return blkcipher_walk_done(desc, walk, -EINVAL);
237	}
238
 
 
239	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
240			 BLKCIPHER_WALK_DIFF);
241	if (!scatterwalk_aligned(&walk->in, alignmask) ||
242	    !scatterwalk_aligned(&walk->out, alignmask)) {
243		walk->flags |= BLKCIPHER_WALK_COPY;
244		if (!walk->page) {
245			walk->page = (void *)__get_free_page(GFP_ATOMIC);
246			if (!walk->page)
247				n = 0;
248		}
249	}
250
251	bsize = min(walk->blocksize, n);
252	n = scatterwalk_clamp(&walk->in, n);
253	n = scatterwalk_clamp(&walk->out, n);
254
255	if (unlikely(n < bsize)) {
256		err = blkcipher_next_slow(desc, walk, bsize, alignmask);
257		goto set_phys_lowmem;
258	}
259
260	walk->nbytes = n;
261	if (walk->flags & BLKCIPHER_WALK_COPY) {
262		err = blkcipher_next_copy(walk);
263		goto set_phys_lowmem;
264	}
265
266	return blkcipher_next_fast(desc, walk);
267
268set_phys_lowmem:
269	if (walk->flags & BLKCIPHER_WALK_PHYS) {
270		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
271		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
272		walk->src.phys.offset &= PAGE_SIZE - 1;
273		walk->dst.phys.offset &= PAGE_SIZE - 1;
274	}
275	return err;
276}
277
278static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
279				    struct crypto_blkcipher *tfm,
280				    unsigned int alignmask)
281{
282	unsigned bs = walk->blocksize;
283	unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
284	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
285	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
286			    (alignmask + 1);
287	u8 *iv;
288
289	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
290	walk->buffer = kmalloc(size, GFP_ATOMIC);
291	if (!walk->buffer)
292		return -ENOMEM;
293
294	iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
295	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
296	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
297	iv = blkcipher_get_spot(iv, ivsize);
298
299	walk->iv = memcpy(iv, walk->iv, ivsize);
300	return 0;
301}
302
303int blkcipher_walk_virt(struct blkcipher_desc *desc,
304			struct blkcipher_walk *walk)
305{
306	walk->flags &= ~BLKCIPHER_WALK_PHYS;
307	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
 
 
 
308	return blkcipher_walk_first(desc, walk);
309}
310EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
311
312int blkcipher_walk_phys(struct blkcipher_desc *desc,
313			struct blkcipher_walk *walk)
314{
315	walk->flags |= BLKCIPHER_WALK_PHYS;
316	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
 
 
 
317	return blkcipher_walk_first(desc, walk);
318}
319EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
320
321static int blkcipher_walk_first(struct blkcipher_desc *desc,
322				struct blkcipher_walk *walk)
323{
324	struct crypto_blkcipher *tfm = desc->tfm;
325	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
326
327	if (WARN_ON_ONCE(in_irq()))
328		return -EDEADLK;
329
 
330	walk->nbytes = walk->total;
331	if (unlikely(!walk->total))
332		return 0;
333
334	walk->buffer = NULL;
335	walk->iv = desc->info;
336	if (unlikely(((unsigned long)walk->iv & alignmask))) {
337		int err = blkcipher_copy_iv(walk, tfm, alignmask);
338		if (err)
339			return err;
340	}
341
342	scatterwalk_start(&walk->in, walk->in.sg);
343	scatterwalk_start(&walk->out, walk->out.sg);
344	walk->page = NULL;
345
346	return blkcipher_walk_next(desc, walk);
347}
348
349int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
350			      struct blkcipher_walk *walk,
351			      unsigned int blocksize)
352{
353	walk->flags &= ~BLKCIPHER_WALK_PHYS;
354	walk->blocksize = blocksize;
 
 
 
355	return blkcipher_walk_first(desc, walk);
356}
357EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
358
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
360			    unsigned int keylen)
361{
362	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
363	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
364	int ret;
365	u8 *buffer, *alignbuffer;
366	unsigned long absize;
367
368	absize = keylen + alignmask;
369	buffer = kmalloc(absize, GFP_ATOMIC);
370	if (!buffer)
371		return -ENOMEM;
372
373	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
374	memcpy(alignbuffer, key, keylen);
375	ret = cipher->setkey(tfm, alignbuffer, keylen);
376	memset(alignbuffer, 0, keylen);
377	kfree(buffer);
378	return ret;
379}
380
381static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
382{
383	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
384	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
385
386	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
387		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
388		return -EINVAL;
389	}
390
391	if ((unsigned long)key & alignmask)
392		return setkey_unaligned(tfm, key, keylen);
393
394	return cipher->setkey(tfm, key, keylen);
395}
396
397static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
398			unsigned int keylen)
399{
400	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
401}
402
403static int async_encrypt(struct ablkcipher_request *req)
404{
405	struct crypto_tfm *tfm = req->base.tfm;
406	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
407	struct blkcipher_desc desc = {
408		.tfm = __crypto_blkcipher_cast(tfm),
409		.info = req->info,
410		.flags = req->base.flags,
411	};
412
413
414	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
415}
416
417static int async_decrypt(struct ablkcipher_request *req)
418{
419	struct crypto_tfm *tfm = req->base.tfm;
420	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
421	struct blkcipher_desc desc = {
422		.tfm = __crypto_blkcipher_cast(tfm),
423		.info = req->info,
424		.flags = req->base.flags,
425	};
426
427	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
428}
429
430static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
431					     u32 mask)
432{
433	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
434	unsigned int len = alg->cra_ctxsize;
435
436	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
437	    cipher->ivsize) {
438		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
439		len += cipher->ivsize;
440	}
441
442	return len;
443}
444
445static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
446{
447	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
448	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
449
450	crt->setkey = async_setkey;
451	crt->encrypt = async_encrypt;
452	crt->decrypt = async_decrypt;
453	if (!alg->ivsize) {
454		crt->givencrypt = skcipher_null_givencrypt;
455		crt->givdecrypt = skcipher_null_givdecrypt;
456	}
457	crt->base = __crypto_ablkcipher_cast(tfm);
458	crt->ivsize = alg->ivsize;
459
460	return 0;
461}
462
463static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
464{
465	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
466	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
467	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
468	unsigned long addr;
469
470	crt->setkey = setkey;
471	crt->encrypt = alg->encrypt;
472	crt->decrypt = alg->decrypt;
473
474	addr = (unsigned long)crypto_tfm_ctx(tfm);
475	addr = ALIGN(addr, align);
476	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
477	crt->iv = (void *)addr;
478
479	return 0;
480}
481
482static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
483{
484	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
485
486	if (alg->ivsize > PAGE_SIZE / 8)
487		return -EINVAL;
488
489	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
490		return crypto_init_blkcipher_ops_sync(tfm);
491	else
492		return crypto_init_blkcipher_ops_async(tfm);
493}
494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
496	__attribute__ ((unused));
497static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
498{
499	seq_printf(m, "type         : blkcipher\n");
500	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
501	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
502	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
503	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
504	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
505					     "<default>");
506}
507
508const struct crypto_type crypto_blkcipher_type = {
509	.ctxsize = crypto_blkcipher_ctxsize,
510	.init = crypto_init_blkcipher_ops,
511#ifdef CONFIG_PROC_FS
512	.show = crypto_blkcipher_show,
513#endif
 
514};
515EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
516
517static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
518				const char *name, u32 type, u32 mask)
519{
520	struct crypto_alg *alg;
521	int err;
522
523	type = crypto_skcipher_type(type);
524	mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
525
526	alg = crypto_alg_mod_lookup(name, type, mask);
527	if (IS_ERR(alg))
528		return PTR_ERR(alg);
529
530	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
531	crypto_mod_put(alg);
532	return err;
533}
534
535struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
536					     struct rtattr **tb, u32 type,
537					     u32 mask)
538{
539	struct {
540		int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
541			      unsigned int keylen);
542		int (*encrypt)(struct ablkcipher_request *req);
543		int (*decrypt)(struct ablkcipher_request *req);
544
545		unsigned int min_keysize;
546		unsigned int max_keysize;
547		unsigned int ivsize;
548
549		const char *geniv;
550	} balg;
551	const char *name;
552	struct crypto_skcipher_spawn *spawn;
553	struct crypto_attr_type *algt;
554	struct crypto_instance *inst;
555	struct crypto_alg *alg;
556	int err;
557
558	algt = crypto_get_attr_type(tb);
559	err = PTR_ERR(algt);
560	if (IS_ERR(algt))
561		return ERR_PTR(err);
562
563	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
564	    algt->mask)
565		return ERR_PTR(-EINVAL);
566
567	name = crypto_attr_alg_name(tb[1]);
568	err = PTR_ERR(name);
569	if (IS_ERR(name))
570		return ERR_PTR(err);
571
572	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
573	if (!inst)
574		return ERR_PTR(-ENOMEM);
575
576	spawn = crypto_instance_ctx(inst);
577
578	/* Ignore async algorithms if necessary. */
579	mask |= crypto_requires_sync(algt->type, algt->mask);
580
581	crypto_set_skcipher_spawn(spawn, inst);
582	err = crypto_grab_nivcipher(spawn, name, type, mask);
583	if (err)
584		goto err_free_inst;
585
586	alg = crypto_skcipher_spawn_alg(spawn);
587
588	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
589	    CRYPTO_ALG_TYPE_BLKCIPHER) {
590		balg.ivsize = alg->cra_blkcipher.ivsize;
591		balg.min_keysize = alg->cra_blkcipher.min_keysize;
592		balg.max_keysize = alg->cra_blkcipher.max_keysize;
593
594		balg.setkey = async_setkey;
595		balg.encrypt = async_encrypt;
596		balg.decrypt = async_decrypt;
597
598		balg.geniv = alg->cra_blkcipher.geniv;
599	} else {
600		balg.ivsize = alg->cra_ablkcipher.ivsize;
601		balg.min_keysize = alg->cra_ablkcipher.min_keysize;
602		balg.max_keysize = alg->cra_ablkcipher.max_keysize;
603
604		balg.setkey = alg->cra_ablkcipher.setkey;
605		balg.encrypt = alg->cra_ablkcipher.encrypt;
606		balg.decrypt = alg->cra_ablkcipher.decrypt;
607
608		balg.geniv = alg->cra_ablkcipher.geniv;
609	}
610
611	err = -EINVAL;
612	if (!balg.ivsize)
613		goto err_drop_alg;
614
615	/*
616	 * This is only true if we're constructing an algorithm with its
617	 * default IV generator.  For the default generator we elide the
618	 * template name and double-check the IV generator.
619	 */
620	if (algt->mask & CRYPTO_ALG_GENIV) {
621		if (!balg.geniv)
622			balg.geniv = crypto_default_geniv(alg);
623		err = -EAGAIN;
624		if (strcmp(tmpl->name, balg.geniv))
625			goto err_drop_alg;
626
627		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
628		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
629		       CRYPTO_MAX_ALG_NAME);
630	} else {
631		err = -ENAMETOOLONG;
632		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
633			     "%s(%s)", tmpl->name, alg->cra_name) >=
634		    CRYPTO_MAX_ALG_NAME)
635			goto err_drop_alg;
636		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
637			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
638		    CRYPTO_MAX_ALG_NAME)
639			goto err_drop_alg;
640	}
641
642	inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
643	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
644	inst->alg.cra_priority = alg->cra_priority;
645	inst->alg.cra_blocksize = alg->cra_blocksize;
646	inst->alg.cra_alignmask = alg->cra_alignmask;
647	inst->alg.cra_type = &crypto_givcipher_type;
648
649	inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
650	inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
651	inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
652	inst->alg.cra_ablkcipher.geniv = balg.geniv;
653
654	inst->alg.cra_ablkcipher.setkey = balg.setkey;
655	inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
656	inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
657
658out:
659	return inst;
660
661err_drop_alg:
662	crypto_drop_skcipher(spawn);
663err_free_inst:
664	kfree(inst);
665	inst = ERR_PTR(err);
666	goto out;
667}
668EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
669
670void skcipher_geniv_free(struct crypto_instance *inst)
671{
672	crypto_drop_skcipher(crypto_instance_ctx(inst));
673	kfree(inst);
674}
675EXPORT_SYMBOL_GPL(skcipher_geniv_free);
676
677int skcipher_geniv_init(struct crypto_tfm *tfm)
678{
679	struct crypto_instance *inst = (void *)tfm->__crt_alg;
680	struct crypto_ablkcipher *cipher;
681
682	cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
683	if (IS_ERR(cipher))
684		return PTR_ERR(cipher);
685
686	tfm->crt_ablkcipher.base = cipher;
687	tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
688
689	return 0;
690}
691EXPORT_SYMBOL_GPL(skcipher_geniv_init);
692
693void skcipher_geniv_exit(struct crypto_tfm *tfm)
694{
695	crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
696}
697EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
698
699MODULE_LICENSE("GPL");
700MODULE_DESCRIPTION("Generic block chaining cipher type");