Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Symmetric key cipher operations.
  3 *
  4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5 * multiple page boundaries by using temporary blocks.  In user context,
  6 * the kernel is given a chance to schedule us once per page.
  7 *
  8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of the GNU General Public License as published by the Free
 12 * Software Foundation; either version 2 of the License, or (at your option)
 13 * any later version.
 14 *
 15 */
 16
 
 
 17#include <crypto/internal/skcipher.h>
 
 18#include <linux/bug.h>
 
 
 
 19#include <linux/module.h>
 
 
 
 20
 21#include "internal.h"
 22
 23static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
 24{
 25	if (alg->cra_type == &crypto_blkcipher_type)
 26		return sizeof(struct crypto_blkcipher *);
 
 
 
 27
 28	BUG_ON(alg->cra_type != &crypto_ablkcipher_type &&
 29	       alg->cra_type != &crypto_givcipher_type);
 
 
 
 
 
 
 
 30
 31	return sizeof(struct crypto_ablkcipher *);
 
 
 
 32}
 33
 34static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
 35				     const u8 *key, unsigned int keylen)
 36{
 37	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
 38	struct crypto_blkcipher *blkcipher = *ctx;
 39	int err;
 40
 41	crypto_blkcipher_clear_flags(blkcipher, ~0);
 42	crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
 43					      CRYPTO_TFM_REQ_MASK);
 44	err = crypto_blkcipher_setkey(blkcipher, key, keylen);
 45	crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
 46				       CRYPTO_TFM_RES_MASK);
 47
 48	return err;
 
 
 49}
 50
 51static int skcipher_crypt_blkcipher(struct skcipher_request *req,
 52				    int (*crypt)(struct blkcipher_desc *,
 53						 struct scatterlist *,
 54						 struct scatterlist *,
 55						 unsigned int))
 56{
 57	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 58	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
 59	struct blkcipher_desc desc = {
 60		.tfm = *ctx,
 61		.info = req->iv,
 62		.flags = req->base.flags,
 63	};
 64
 
 
 
 
 65
 66	return crypt(&desc, req->dst, req->src, req->cryptlen);
 
 
 67}
 68
 69static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
 
 
 
 70{
 71	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 72	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 73	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 74
 75	return skcipher_crypt_blkcipher(req, alg->encrypt);
 76}
 77
 78static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
 79{
 80	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 81	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 82	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 83
 84	return skcipher_crypt_blkcipher(req, alg->decrypt);
 
 
 
 
 85}
 86
 87static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
 88{
 89	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90
 91	crypto_free_blkcipher(*ctx);
 
 92}
 
 93
 94static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
 95{
 96	struct crypto_alg *calg = tfm->__crt_alg;
 97	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 98	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
 99	struct crypto_blkcipher *blkcipher;
100	struct crypto_tfm *btfm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
102	if (!crypto_mod_get(calg))
103		return -EAGAIN;
104
105	btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
106					CRYPTO_ALG_TYPE_MASK);
107	if (IS_ERR(btfm)) {
108		crypto_mod_put(calg);
109		return PTR_ERR(btfm);
 
 
 
 
 
 
 
 
 
 
110	}
111
112	blkcipher = __crypto_blkcipher_cast(btfm);
113	*ctx = blkcipher;
114	tfm->exit = crypto_exit_skcipher_ops_blkcipher;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
116	skcipher->setkey = skcipher_setkey_blkcipher;
117	skcipher->encrypt = skcipher_encrypt_blkcipher;
118	skcipher->decrypt = skcipher_decrypt_blkcipher;
119
120	skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
121	skcipher->keysize = calg->cra_blkcipher.max_keysize;
 
 
 
 
 
 
 
 
122
123	return 0;
124}
125
126static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
127				      const u8 *key, unsigned int keylen)
128{
129	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
130	struct crypto_ablkcipher *ablkcipher = *ctx;
131	int err;
132
133	crypto_ablkcipher_clear_flags(ablkcipher, ~0);
134	crypto_ablkcipher_set_flags(ablkcipher,
135				    crypto_skcipher_get_flags(tfm) &
136				    CRYPTO_TFM_REQ_MASK);
137	err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
138	crypto_skcipher_set_flags(tfm,
139				  crypto_ablkcipher_get_flags(ablkcipher) &
140				  CRYPTO_TFM_RES_MASK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
 
 
 
 
 
 
 
 
142	return err;
143}
144
145static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
146				     int (*crypt)(struct ablkcipher_request *))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147{
148	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
149	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
150	struct ablkcipher_request *subreq = skcipher_request_ctx(req);
151
152	ablkcipher_request_set_tfm(subreq, *ctx);
153	ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
154					req->base.complete, req->base.data);
155	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
156				     req->iv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
158	return crypt(subreq);
159}
160
161static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
 
162{
163	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
164	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
165	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
167	return skcipher_crypt_ablkcipher(req, alg->encrypt);
 
 
 
 
 
 
 
 
 
 
168}
169
170static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
 
171{
172	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
173	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
174	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
175
176	return skcipher_crypt_ablkcipher(req, alg->decrypt);
177}
 
178
179static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
 
180{
181	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
182
183	crypto_free_ablkcipher(*ctx);
 
 
184}
 
185
186static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
187{
188	struct crypto_alg *calg = tfm->__crt_alg;
189	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
190	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
191	struct crypto_ablkcipher *ablkcipher;
192	struct crypto_tfm *abtfm;
193
194	if (!crypto_mod_get(calg))
195		return -EAGAIN;
196
197	abtfm = __crypto_alloc_tfm(calg, 0, 0);
198	if (IS_ERR(abtfm)) {
199		crypto_mod_put(calg);
200		return PTR_ERR(abtfm);
201	}
202
203	ablkcipher = __crypto_ablkcipher_cast(abtfm);
204	*ctx = ablkcipher;
205	tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
206
207	skcipher->setkey = skcipher_setkey_ablkcipher;
208	skcipher->encrypt = skcipher_encrypt_ablkcipher;
209	skcipher->decrypt = skcipher_decrypt_ablkcipher;
210
211	skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
212	skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
213			    sizeof(struct ablkcipher_request);
214	skcipher->keysize = calg->cra_ablkcipher.max_keysize;
215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216	return 0;
217}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
219static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
220{
221	if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
222		return crypto_init_skcipher_ops_blkcipher(tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
224	BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
225	       tfm->__crt_alg->cra_type != &crypto_givcipher_type);
 
 
 
 
226
227	return crypto_init_skcipher_ops_ablkcipher(tfm);
 
 
 
 
 
 
 
 
228}
229
230static const struct crypto_type crypto_skcipher_type2 = {
231	.extsize = crypto_skcipher_extsize,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232	.init_tfm = crypto_skcipher_init_tfm,
 
 
 
 
 
233	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
234	.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
235	.type = CRYPTO_ALG_TYPE_BLKCIPHER,
236	.tfmsize = offsetof(struct crypto_skcipher, base),
237};
238
 
 
 
 
 
 
 
 
 
239struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
240					      u32 type, u32 mask)
241{
242	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
243}
244EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246MODULE_LICENSE("GPL");
247MODULE_DESCRIPTION("Symmetric key cipher type");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Symmetric key cipher operations.
  4 *
  5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  6 * multiple page boundaries by using temporary blocks.  In user context,
  7 * the kernel is given a chance to schedule us once per page.
  8 *
  9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
 
 10 */
 11
 12#include <crypto/internal/aead.h>
 13#include <crypto/internal/cipher.h>
 14#include <crypto/internal/skcipher.h>
 15#include <crypto/scatterwalk.h>
 16#include <linux/bug.h>
 17#include <linux/cryptouser.h>
 18#include <linux/compiler.h>
 19#include <linux/list.h>
 20#include <linux/module.h>
 21#include <linux/rtnetlink.h>
 22#include <linux/seq_file.h>
 23#include <net/netlink.h>
 24
 25#include "internal.h"
 26
 27enum {
 28	SKCIPHER_WALK_PHYS = 1 << 0,
 29	SKCIPHER_WALK_SLOW = 1 << 1,
 30	SKCIPHER_WALK_COPY = 1 << 2,
 31	SKCIPHER_WALK_DIFF = 1 << 3,
 32	SKCIPHER_WALK_SLEEP = 1 << 4,
 33};
 34
 35struct skcipher_walk_buffer {
 36	struct list_head entry;
 37	struct scatter_walk dst;
 38	unsigned int len;
 39	u8 *data;
 40	u8 buffer[];
 41};
 42
 43static int skcipher_walk_next(struct skcipher_walk *walk);
 44
 45static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
 46{
 47	if (PageHighMem(scatterwalk_page(walk)))
 48		kunmap_atomic(vaddr);
 49}
 50
 51static inline void *skcipher_map(struct scatter_walk *walk)
 
 52{
 53	struct page *page = scatterwalk_page(walk);
 
 
 54
 55	return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
 56	       offset_in_page(walk->offset);
 57}
 
 
 
 58
 59static inline void skcipher_map_src(struct skcipher_walk *walk)
 60{
 61	walk->src.virt.addr = skcipher_map(&walk->in);
 62}
 63
 64static inline void skcipher_map_dst(struct skcipher_walk *walk)
 
 
 
 
 65{
 66	walk->dst.virt.addr = skcipher_map(&walk->out);
 67}
 68
 69static inline void skcipher_unmap_src(struct skcipher_walk *walk)
 70{
 71	skcipher_unmap(&walk->in, walk->src.virt.addr);
 72}
 73
 74static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
 75{
 76	skcipher_unmap(&walk->out, walk->dst.virt.addr);
 77}
 78
 79static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
 80{
 81	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
 82}
 83
 84/* Get a spot of the specified length that does not straddle a page.
 85 * The caller needs to ensure that there is enough space for this operation.
 86 */
 87static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
 88{
 89	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 
 
 90
 91	return max(start, end_page);
 92}
 93
 94static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
 95{
 96	u8 *addr;
 
 
 97
 98	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 99	addr = skcipher_get_spot(addr, bsize);
100	scatterwalk_copychunks(addr, &walk->out, bsize,
101			       (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
102	return 0;
103}
104
105int skcipher_walk_done(struct skcipher_walk *walk, int err)
106{
107	unsigned int n = walk->nbytes;
108	unsigned int nbytes = 0;
109
110	if (!n)
111		goto finish;
112
113	if (likely(err >= 0)) {
114		n -= err;
115		nbytes = walk->total - n;
116	}
117
118	if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
119				    SKCIPHER_WALK_SLOW |
120				    SKCIPHER_WALK_COPY |
121				    SKCIPHER_WALK_DIFF)))) {
122unmap_src:
123		skcipher_unmap_src(walk);
124	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
125		skcipher_unmap_dst(walk);
126		goto unmap_src;
127	} else if (walk->flags & SKCIPHER_WALK_COPY) {
128		skcipher_map_dst(walk);
129		memcpy(walk->dst.virt.addr, walk->page, n);
130		skcipher_unmap_dst(walk);
131	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
132		if (err > 0) {
133			/*
134			 * Didn't process all bytes.  Either the algorithm is
135			 * broken, or this was the last step and it turned out
136			 * the message wasn't evenly divisible into blocks but
137			 * the algorithm requires it.
138			 */
139			err = -EINVAL;
140			nbytes = 0;
141		} else
142			n = skcipher_done_slow(walk, n);
143	}
144
145	if (err > 0)
146		err = 0;
147
148	walk->total = nbytes;
149	walk->nbytes = 0;
150
151	scatterwalk_advance(&walk->in, n);
152	scatterwalk_advance(&walk->out, n);
153	scatterwalk_done(&walk->in, 0, nbytes);
154	scatterwalk_done(&walk->out, 1, nbytes);
155
156	if (nbytes) {
157		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
158			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
159		return skcipher_walk_next(walk);
160	}
161
162finish:
163	/* Short-circuit for the common/fast path. */
164	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
165		goto out;
166
167	if (walk->flags & SKCIPHER_WALK_PHYS)
168		goto out;
169
170	if (walk->iv != walk->oiv)
171		memcpy(walk->oiv, walk->iv, walk->ivsize);
172	if (walk->buffer != walk->page)
173		kfree(walk->buffer);
174	if (walk->page)
175		free_page((unsigned long)walk->page);
176
177out:
178	return err;
179}
180EXPORT_SYMBOL_GPL(skcipher_walk_done);
181
182void skcipher_walk_complete(struct skcipher_walk *walk, int err)
183{
184	struct skcipher_walk_buffer *p, *tmp;
185
186	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
187		u8 *data;
188
189		if (err)
190			goto done;
191
192		data = p->data;
193		if (!data) {
194			data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
195			data = skcipher_get_spot(data, walk->stride);
196		}
197
198		scatterwalk_copychunks(data, &p->dst, p->len, 1);
199
200		if (offset_in_page(p->data) + p->len + walk->stride >
201		    PAGE_SIZE)
202			free_page((unsigned long)p->data);
203
204done:
205		list_del(&p->entry);
206		kfree(p);
207	}
208
209	if (!err && walk->iv != walk->oiv)
210		memcpy(walk->oiv, walk->iv, walk->ivsize);
211	if (walk->buffer != walk->page)
212		kfree(walk->buffer);
213	if (walk->page)
214		free_page((unsigned long)walk->page);
215}
216EXPORT_SYMBOL_GPL(skcipher_walk_complete);
217
218static void skcipher_queue_write(struct skcipher_walk *walk,
219				 struct skcipher_walk_buffer *p)
220{
221	p->dst = walk->out;
222	list_add_tail(&p->entry, &walk->buffers);
223}
224
225static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
226{
227	bool phys = walk->flags & SKCIPHER_WALK_PHYS;
228	unsigned alignmask = walk->alignmask;
229	struct skcipher_walk_buffer *p;
230	unsigned a;
231	unsigned n;
232	u8 *buffer;
233	void *v;
234
235	if (!phys) {
236		if (!walk->buffer)
237			walk->buffer = walk->page;
238		buffer = walk->buffer;
239		if (buffer)
240			goto ok;
241	}
242
243	/* Start with the minimum alignment of kmalloc. */
244	a = crypto_tfm_ctx_alignment() - 1;
245	n = bsize;
246
247	if (phys) {
248		/* Calculate the minimum alignment of p->buffer. */
249		a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
250		n += sizeof(*p);
251	}
252
253	/* Minimum size to align p->buffer by alignmask. */
254	n += alignmask & ~a;
255
256	/* Minimum size to ensure p->buffer does not straddle a page. */
257	n += (bsize - 1) & ~(alignmask | a);
258
259	v = kzalloc(n, skcipher_walk_gfp(walk));
260	if (!v)
261		return skcipher_walk_done(walk, -ENOMEM);
262
263	if (phys) {
264		p = v;
265		p->len = bsize;
266		skcipher_queue_write(walk, p);
267		buffer = p->buffer;
268	} else {
269		walk->buffer = v;
270		buffer = v;
271	}
272
273ok:
274	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
275	walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
276	walk->src.virt.addr = walk->dst.virt.addr;
277
278	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
279
280	walk->nbytes = bsize;
281	walk->flags |= SKCIPHER_WALK_SLOW;
282
283	return 0;
284}
285
286static int skcipher_next_copy(struct skcipher_walk *walk)
287{
288	struct skcipher_walk_buffer *p;
289	u8 *tmp = walk->page;
290
291	skcipher_map_src(walk);
292	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
293	skcipher_unmap_src(walk);
294
295	walk->src.virt.addr = tmp;
296	walk->dst.virt.addr = tmp;
297
298	if (!(walk->flags & SKCIPHER_WALK_PHYS))
299		return 0;
300
301	p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
302	if (!p)
303		return -ENOMEM;
304
305	p->data = walk->page;
306	p->len = walk->nbytes;
307	skcipher_queue_write(walk, p);
308
309	if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
310	    PAGE_SIZE)
311		walk->page = NULL;
312	else
313		walk->page += walk->nbytes;
314
315	return 0;
316}
317
318static int skcipher_next_fast(struct skcipher_walk *walk)
319{
320	unsigned long diff;
321
322	walk->src.phys.page = scatterwalk_page(&walk->in);
323	walk->src.phys.offset = offset_in_page(walk->in.offset);
324	walk->dst.phys.page = scatterwalk_page(&walk->out);
325	walk->dst.phys.offset = offset_in_page(walk->out.offset);
326
327	if (walk->flags & SKCIPHER_WALK_PHYS)
328		return 0;
 
329
330	diff = walk->src.phys.offset - walk->dst.phys.offset;
331	diff |= walk->src.virt.page - walk->dst.virt.page;
332
333	skcipher_map_src(walk);
334	walk->dst.virt.addr = walk->src.virt.addr;
335
336	if (diff) {
337		walk->flags |= SKCIPHER_WALK_DIFF;
338		skcipher_map_dst(walk);
339	}
340
341	return 0;
342}
343
344static int skcipher_walk_next(struct skcipher_walk *walk)
 
345{
346	unsigned int bsize;
347	unsigned int n;
348	int err;
349
350	walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
351			 SKCIPHER_WALK_DIFF);
352
353	n = walk->total;
354	bsize = min(walk->stride, max(n, walk->blocksize));
355	n = scatterwalk_clamp(&walk->in, n);
356	n = scatterwalk_clamp(&walk->out, n);
357
358	if (unlikely(n < bsize)) {
359		if (unlikely(walk->total < walk->blocksize))
360			return skcipher_walk_done(walk, -EINVAL);
361
362slow_path:
363		err = skcipher_next_slow(walk, bsize);
364		goto set_phys_lowmem;
365	}
366
367	if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
368		if (!walk->page) {
369			gfp_t gfp = skcipher_walk_gfp(walk);
370
371			walk->page = (void *)__get_free_page(gfp);
372			if (!walk->page)
373				goto slow_path;
374		}
375
376		walk->nbytes = min_t(unsigned, n,
377				     PAGE_SIZE - offset_in_page(walk->page));
378		walk->flags |= SKCIPHER_WALK_COPY;
379		err = skcipher_next_copy(walk);
380		goto set_phys_lowmem;
381	}
382
383	walk->nbytes = n;
384
385	return skcipher_next_fast(walk);
386
387set_phys_lowmem:
388	if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
389		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
390		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
391		walk->src.phys.offset &= PAGE_SIZE - 1;
392		walk->dst.phys.offset &= PAGE_SIZE - 1;
393	}
394	return err;
395}
396
397static int skcipher_copy_iv(struct skcipher_walk *walk)
398{
399	unsigned a = crypto_tfm_ctx_alignment() - 1;
400	unsigned alignmask = walk->alignmask;
401	unsigned ivsize = walk->ivsize;
402	unsigned bs = walk->stride;
403	unsigned aligned_bs;
404	unsigned size;
405	u8 *iv;
406
407	aligned_bs = ALIGN(bs, alignmask + 1);
408
409	/* Minimum size to align buffer by alignmask. */
410	size = alignmask & ~a;
411
412	if (walk->flags & SKCIPHER_WALK_PHYS)
413		size += ivsize;
414	else {
415		size += aligned_bs + ivsize;
416
417		/* Minimum size to ensure buffer does not straddle a page. */
418		size += (bs - 1) & ~(alignmask | a);
419	}
420
421	walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
422	if (!walk->buffer)
423		return -ENOMEM;
424
425	iv = PTR_ALIGN(walk->buffer, alignmask + 1);
426	iv = skcipher_get_spot(iv, bs) + aligned_bs;
427
428	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
429	return 0;
430}
431
432static int skcipher_walk_first(struct skcipher_walk *walk)
433{
434	if (WARN_ON_ONCE(in_irq()))
435		return -EDEADLK;
436
437	walk->buffer = NULL;
438	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
439		int err = skcipher_copy_iv(walk);
440		if (err)
441			return err;
442	}
443
444	walk->page = NULL;
445
446	return skcipher_walk_next(walk);
447}
448
449static int skcipher_walk_skcipher(struct skcipher_walk *walk,
450				  struct skcipher_request *req)
451{
452	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 
 
453
454	walk->total = req->cryptlen;
455	walk->nbytes = 0;
456	walk->iv = req->iv;
457	walk->oiv = req->iv;
458
459	if (unlikely(!walk->total))
460		return 0;
461
462	scatterwalk_start(&walk->in, req->src);
463	scatterwalk_start(&walk->out, req->dst);
464
465	walk->flags &= ~SKCIPHER_WALK_SLEEP;
466	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
467		       SKCIPHER_WALK_SLEEP : 0;
468
469	walk->blocksize = crypto_skcipher_blocksize(tfm);
470	walk->stride = crypto_skcipher_walksize(tfm);
471	walk->ivsize = crypto_skcipher_ivsize(tfm);
472	walk->alignmask = crypto_skcipher_alignmask(tfm);
473
474	return skcipher_walk_first(walk);
475}
476
477int skcipher_walk_virt(struct skcipher_walk *walk,
478		       struct skcipher_request *req, bool atomic)
479{
480	int err;
481
482	might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
483
484	walk->flags &= ~SKCIPHER_WALK_PHYS;
485
486	err = skcipher_walk_skcipher(walk, req);
487
488	walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
489
490	return err;
491}
492EXPORT_SYMBOL_GPL(skcipher_walk_virt);
493
494int skcipher_walk_async(struct skcipher_walk *walk,
495			struct skcipher_request *req)
496{
497	walk->flags |= SKCIPHER_WALK_PHYS;
498
499	INIT_LIST_HEAD(&walk->buffers);
500
501	return skcipher_walk_skcipher(walk, req);
502}
503EXPORT_SYMBOL_GPL(skcipher_walk_async);
504
505static int skcipher_walk_aead_common(struct skcipher_walk *walk,
506				     struct aead_request *req, bool atomic)
507{
508	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
509	int err;
510
511	walk->nbytes = 0;
512	walk->iv = req->iv;
513	walk->oiv = req->iv;
514
515	if (unlikely(!walk->total))
516		return 0;
517
518	walk->flags &= ~SKCIPHER_WALK_PHYS;
519
520	scatterwalk_start(&walk->in, req->src);
521	scatterwalk_start(&walk->out, req->dst);
522
523	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
524	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
525
526	scatterwalk_done(&walk->in, 0, walk->total);
527	scatterwalk_done(&walk->out, 0, walk->total);
528
529	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
530		walk->flags |= SKCIPHER_WALK_SLEEP;
531	else
532		walk->flags &= ~SKCIPHER_WALK_SLEEP;
533
534	walk->blocksize = crypto_aead_blocksize(tfm);
535	walk->stride = crypto_aead_chunksize(tfm);
536	walk->ivsize = crypto_aead_ivsize(tfm);
537	walk->alignmask = crypto_aead_alignmask(tfm);
538
539	err = skcipher_walk_first(walk);
540
541	if (atomic)
542		walk->flags &= ~SKCIPHER_WALK_SLEEP;
543
544	return err;
545}
546
547int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
548			       struct aead_request *req, bool atomic)
549{
550	walk->total = req->cryptlen;
 
 
551
552	return skcipher_walk_aead_common(walk, req, atomic);
553}
554EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
555
556int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
557			       struct aead_request *req, bool atomic)
558{
559	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
560
561	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
562
563	return skcipher_walk_aead_common(walk, req, atomic);
564}
565EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
566
567static void skcipher_set_needkey(struct crypto_skcipher *tfm)
568{
569	if (crypto_skcipher_max_keysize(tfm) != 0)
570		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
571}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
572
573static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
574				     const u8 *key, unsigned int keylen)
575{
576	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
577	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
578	u8 *buffer, *alignbuffer;
579	unsigned long absize;
580	int ret;
581
582	absize = keylen + alignmask;
583	buffer = kmalloc(absize, GFP_ATOMIC);
584	if (!buffer)
585		return -ENOMEM;
586
587	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
588	memcpy(alignbuffer, key, keylen);
589	ret = cipher->setkey(tfm, alignbuffer, keylen);
590	kfree_sensitive(buffer);
591	return ret;
592}
593
594int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
595			   unsigned int keylen)
596{
597	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
598	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
599	int err;
600
601	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
602		return -EINVAL;
603
604	if ((unsigned long)key & alignmask)
605		err = skcipher_setkey_unaligned(tfm, key, keylen);
606	else
607		err = cipher->setkey(tfm, key, keylen);
608
609	if (unlikely(err)) {
610		skcipher_set_needkey(tfm);
611		return err;
612	}
613
614	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
615	return 0;
616}
617EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
618
619int crypto_skcipher_encrypt(struct skcipher_request *req)
620{
621	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
622	struct crypto_alg *alg = tfm->base.__crt_alg;
623	unsigned int cryptlen = req->cryptlen;
624	int ret;
625
626	crypto_stats_get(alg);
627	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
628		ret = -ENOKEY;
629	else
630		ret = crypto_skcipher_alg(tfm)->encrypt(req);
631	crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
632	return ret;
633}
634EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
635
636int crypto_skcipher_decrypt(struct skcipher_request *req)
637{
638	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
639	struct crypto_alg *alg = tfm->base.__crt_alg;
640	unsigned int cryptlen = req->cryptlen;
641	int ret;
642
643	crypto_stats_get(alg);
644	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
645		ret = -ENOKEY;
646	else
647		ret = crypto_skcipher_alg(tfm)->decrypt(req);
648	crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
649	return ret;
650}
651EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
652
653static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
654{
655	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
656	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
657
658	alg->exit(skcipher);
659}
660
661static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
662{
663	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
664	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
665
666	skcipher_set_needkey(skcipher);
667
668	if (alg->exit)
669		skcipher->base.exit = crypto_skcipher_exit_tfm;
670
671	if (alg->init)
672		return alg->init(skcipher);
673
674	return 0;
675}
676
677static void crypto_skcipher_free_instance(struct crypto_instance *inst)
678{
679	struct skcipher_instance *skcipher =
680		container_of(inst, struct skcipher_instance, s.base);
681
682	skcipher->free(skcipher);
683}
684
685static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
686	__maybe_unused;
687static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
688{
689	struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
690						     base);
691
692	seq_printf(m, "type         : skcipher\n");
693	seq_printf(m, "async        : %s\n",
694		   alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
695	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
696	seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
697	seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
698	seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
699	seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
700	seq_printf(m, "walksize     : %u\n", skcipher->walksize);
701}
702
703#ifdef CONFIG_NET
704static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
705{
706	struct crypto_report_blkcipher rblkcipher;
707	struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
708						     base);
709
710	memset(&rblkcipher, 0, sizeof(rblkcipher));
711
712	strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
713	strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
714
715	rblkcipher.blocksize = alg->cra_blocksize;
716	rblkcipher.min_keysize = skcipher->min_keysize;
717	rblkcipher.max_keysize = skcipher->max_keysize;
718	rblkcipher.ivsize = skcipher->ivsize;
719
720	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
721		       sizeof(rblkcipher), &rblkcipher);
722}
723#else
724static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
725{
726	return -ENOSYS;
727}
728#endif
729
730static const struct crypto_type crypto_skcipher_type = {
731	.extsize = crypto_alg_extsize,
732	.init_tfm = crypto_skcipher_init_tfm,
733	.free = crypto_skcipher_free_instance,
734#ifdef CONFIG_PROC_FS
735	.show = crypto_skcipher_show,
736#endif
737	.report = crypto_skcipher_report,
738	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
739	.maskset = CRYPTO_ALG_TYPE_MASK,
740	.type = CRYPTO_ALG_TYPE_SKCIPHER,
741	.tfmsize = offsetof(struct crypto_skcipher, base),
742};
743
744int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
745			 struct crypto_instance *inst,
746			 const char *name, u32 type, u32 mask)
747{
748	spawn->base.frontend = &crypto_skcipher_type;
749	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
750}
751EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
752
753struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
754					      u32 type, u32 mask)
755{
756	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
757}
758EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
759
760struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
761				const char *alg_name, u32 type, u32 mask)
762{
763	struct crypto_skcipher *tfm;
764
765	/* Only sync algorithms allowed. */
766	mask |= CRYPTO_ALG_ASYNC;
767
768	tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
769
770	/*
771	 * Make sure we do not allocate something that might get used with
772	 * an on-stack request: check the request size.
773	 */
774	if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
775				    MAX_SYNC_SKCIPHER_REQSIZE)) {
776		crypto_free_skcipher(tfm);
777		return ERR_PTR(-EINVAL);
778	}
779
780	return (struct crypto_sync_skcipher *)tfm;
781}
782EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
783
784int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
785{
786	return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
787}
788EXPORT_SYMBOL_GPL(crypto_has_skcipher);
789
790static int skcipher_prepare_alg(struct skcipher_alg *alg)
791{
792	struct crypto_alg *base = &alg->base;
793
794	if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
795	    alg->walksize > PAGE_SIZE / 8)
796		return -EINVAL;
797
798	if (!alg->chunksize)
799		alg->chunksize = base->cra_blocksize;
800	if (!alg->walksize)
801		alg->walksize = alg->chunksize;
802
803	base->cra_type = &crypto_skcipher_type;
804	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
805	base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
806
807	return 0;
808}
809
810int crypto_register_skcipher(struct skcipher_alg *alg)
811{
812	struct crypto_alg *base = &alg->base;
813	int err;
814
815	err = skcipher_prepare_alg(alg);
816	if (err)
817		return err;
818
819	return crypto_register_alg(base);
820}
821EXPORT_SYMBOL_GPL(crypto_register_skcipher);
822
823void crypto_unregister_skcipher(struct skcipher_alg *alg)
824{
825	crypto_unregister_alg(&alg->base);
826}
827EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
828
829int crypto_register_skciphers(struct skcipher_alg *algs, int count)
830{
831	int i, ret;
832
833	for (i = 0; i < count; i++) {
834		ret = crypto_register_skcipher(&algs[i]);
835		if (ret)
836			goto err;
837	}
838
839	return 0;
840
841err:
842	for (--i; i >= 0; --i)
843		crypto_unregister_skcipher(&algs[i]);
844
845	return ret;
846}
847EXPORT_SYMBOL_GPL(crypto_register_skciphers);
848
849void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
850{
851	int i;
852
853	for (i = count - 1; i >= 0; --i)
854		crypto_unregister_skcipher(&algs[i]);
855}
856EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
857
858int skcipher_register_instance(struct crypto_template *tmpl,
859			   struct skcipher_instance *inst)
860{
861	int err;
862
863	if (WARN_ON(!inst->free))
864		return -EINVAL;
865
866	err = skcipher_prepare_alg(&inst->alg);
867	if (err)
868		return err;
869
870	return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
871}
872EXPORT_SYMBOL_GPL(skcipher_register_instance);
873
874static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
875				  unsigned int keylen)
876{
877	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
878
879	crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
880	crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
881				CRYPTO_TFM_REQ_MASK);
882	return crypto_cipher_setkey(cipher, key, keylen);
883}
884
885static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
886{
887	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
888	struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
889	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
890	struct crypto_cipher *cipher;
891
892	cipher = crypto_spawn_cipher(spawn);
893	if (IS_ERR(cipher))
894		return PTR_ERR(cipher);
895
896	ctx->cipher = cipher;
897	return 0;
898}
899
900static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
901{
902	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
903
904	crypto_free_cipher(ctx->cipher);
905}
906
907static void skcipher_free_instance_simple(struct skcipher_instance *inst)
908{
909	crypto_drop_cipher(skcipher_instance_ctx(inst));
910	kfree(inst);
911}
912
913/**
914 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
915 *
916 * Allocate an skcipher_instance for a simple block cipher mode of operation,
917 * e.g. cbc or ecb.  The instance context will have just a single crypto_spawn,
918 * that for the underlying cipher.  The {min,max}_keysize, ivsize, blocksize,
919 * alignmask, and priority are set from the underlying cipher but can be
920 * overridden if needed.  The tfm context defaults to skcipher_ctx_simple, and
921 * default ->setkey(), ->init(), and ->exit() methods are installed.
922 *
923 * @tmpl: the template being instantiated
924 * @tb: the template parameters
925 *
926 * Return: a pointer to the new instance, or an ERR_PTR().  The caller still
927 *	   needs to register the instance.
928 */
929struct skcipher_instance *skcipher_alloc_instance_simple(
930	struct crypto_template *tmpl, struct rtattr **tb)
931{
932	u32 mask;
933	struct skcipher_instance *inst;
934	struct crypto_cipher_spawn *spawn;
935	struct crypto_alg *cipher_alg;
936	int err;
937
938	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
939	if (err)
940		return ERR_PTR(err);
941
942	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
943	if (!inst)
944		return ERR_PTR(-ENOMEM);
945	spawn = skcipher_instance_ctx(inst);
946
947	err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
948				 crypto_attr_alg_name(tb[1]), 0, mask);
949	if (err)
950		goto err_free_inst;
951	cipher_alg = crypto_spawn_cipher_alg(spawn);
952
953	err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
954				  cipher_alg);
955	if (err)
956		goto err_free_inst;
957
958	inst->free = skcipher_free_instance_simple;
959
960	/* Default algorithm properties, can be overridden */
961	inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
962	inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
963	inst->alg.base.cra_priority = cipher_alg->cra_priority;
964	inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
965	inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
966	inst->alg.ivsize = cipher_alg->cra_blocksize;
967
968	/* Use skcipher_ctx_simple by default, can be overridden */
969	inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
970	inst->alg.setkey = skcipher_setkey_simple;
971	inst->alg.init = skcipher_init_tfm_simple;
972	inst->alg.exit = skcipher_exit_tfm_simple;
973
974	return inst;
975
976err_free_inst:
977	skcipher_free_instance_simple(inst);
978	return ERR_PTR(err);
979}
980EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
981
982MODULE_LICENSE("GPL");
983MODULE_DESCRIPTION("Symmetric key cipher type");
984MODULE_IMPORT_NS(CRYPTO_INTERNAL);