Linux Audio

Check our new training course

Loading...
v3.15
  1/* XTS: as defined in IEEE1619/D16
  2 *	http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
  3 *	(sector sizes which are not a multiple of 16 bytes are,
  4 *	however currently unsupported)
  5 *
  6 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
  7 *
  8 * Based om ecb.c
  9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 10 *
 11 * This program is free software; you can redistribute it and/or modify it
 12 * under the terms of the GNU General Public License as published by the Free
 13 * Software Foundation; either version 2 of the License, or (at your option)
 14 * any later version.
 15 */
 16#include <crypto/algapi.h>
 
 17#include <linux/err.h>
 18#include <linux/init.h>
 19#include <linux/kernel.h>
 20#include <linux/module.h>
 21#include <linux/scatterlist.h>
 22#include <linux/slab.h>
 23
 24#include <crypto/xts.h>
 25#include <crypto/b128ops.h>
 26#include <crypto/gf128mul.h>
 27
 
 
 28struct priv {
 29	struct crypto_cipher *child;
 30	struct crypto_cipher *tweak;
 31};
 32
 33static int setkey(struct crypto_tfm *parent, const u8 *key,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34		  unsigned int keylen)
 35{
 36	struct priv *ctx = crypto_tfm_ctx(parent);
 37	struct crypto_cipher *child = ctx->tweak;
 38	u32 *flags = &parent->crt_flags;
 39	int err;
 40
 41	/* key consists of keys of equal size concatenated, therefore
 42	 * the length must be even */
 43	if (keylen % 2) {
 44		/* tell the user why there was an error */
 45		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 46		return -EINVAL;
 47	}
 48
 49	/* we need two cipher instances: one to compute the initial 'tweak'
 50	 * by encrypting the IV (usually the 'plain' iv) and the other
 51	 * one to encrypt and decrypt the data */
 52
 53	/* tweak cipher, uses Key2 i.e. the second half of *key */
 54	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 55	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
 
 56				       CRYPTO_TFM_REQ_MASK);
 57	err = crypto_cipher_setkey(child, key + keylen/2, keylen/2);
 
 
 58	if (err)
 59		return err;
 60
 61	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
 62				     CRYPTO_TFM_RES_MASK);
 63
 64	child = ctx->child;
 
 
 
 
 
 
 65
 66	/* data cipher, uses Key1 i.e. the first half of *key */
 67	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 68	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
 69				       CRYPTO_TFM_REQ_MASK);
 70	err = crypto_cipher_setkey(child, key, keylen/2);
 71	if (err)
 72		return err;
 73
 74	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
 75				     CRYPTO_TFM_RES_MASK);
 
 
 
 
 
 
 
 
 76
 77	return 0;
 78}
 79
 80struct sinfo {
 81	be128 *t;
 82	struct crypto_tfm *tfm;
 83	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
 84};
 85
 86static inline void xts_round(struct sinfo *s, void *dst, const void *src)
 87{
 88	be128_xor(dst, s->t, src);		/* PP <- T xor P */
 89	s->fn(s->tfm, dst, dst);		/* CC <- E(Key1,PP) */
 90	be128_xor(dst, dst, s->t);		/* C <- T xor CC */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91}
 92
 93static int crypt(struct blkcipher_desc *d,
 94		 struct blkcipher_walk *w, struct priv *ctx,
 95		 void (*tw)(struct crypto_tfm *, u8 *, const u8 *),
 96		 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
 97{
 98	int err;
 99	unsigned int avail;
 
100	const int bs = XTS_BLOCK_SIZE;
101	struct sinfo s = {
102		.tfm = crypto_cipher_tfm(ctx->child),
103		.fn = fn
104	};
105	u8 *wsrc;
106	u8 *wdst;
107
108	err = blkcipher_walk_virt(d, w);
109	if (!w->nbytes)
110		return err;
111
112	s.t = (be128 *)w->iv;
113	avail = w->nbytes;
 
114
115	wsrc = w->src.virt.addr;
116	wdst = w->dst.virt.addr;
117
118	/* calculate first value of T */
119	tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv);
120
121	goto first;
 
 
 
 
 
 
122
123	for (;;) {
124		do {
125			gf128mul_x_ble(s.t, s.t);
 
 
 
126
127first:
128			xts_round(&s, wdst, wsrc);
129
130			wsrc += bs;
131			wdst += bs;
132		} while ((avail -= bs) >= bs);
133
134		err = blkcipher_walk_done(d, w, avail);
135		if (!w->nbytes)
136			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
138		avail = w->nbytes;
 
 
 
 
 
139
140		wsrc = w->src.virt.addr;
141		wdst = w->dst.virt.addr;
 
 
 
 
 
 
 
142	}
143
 
144	return err;
145}
146
147static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
148		   struct scatterlist *src, unsigned int nbytes)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149{
150	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
151	struct blkcipher_walk w;
152
153	blkcipher_walk_init(&w, dst, src, nbytes);
154	return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
155		     crypto_cipher_alg(ctx->child)->cia_encrypt);
 
 
 
 
 
 
 
 
 
 
 
 
156}
157
158static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
159		   struct scatterlist *src, unsigned int nbytes)
160{
161	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
162	struct blkcipher_walk w;
 
 
 
 
 
 
 
 
 
 
 
 
163
164	blkcipher_walk_init(&w, dst, src, nbytes);
165	return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
166		     crypto_cipher_alg(ctx->child)->cia_decrypt);
167}
168
169int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
170	      struct scatterlist *ssrc, unsigned int nbytes,
171	      struct xts_crypt_req *req)
172{
173	const unsigned int bsize = XTS_BLOCK_SIZE;
174	const unsigned int max_blks = req->tbuflen / bsize;
175	struct blkcipher_walk walk;
176	unsigned int nblocks;
177	be128 *src, *dst, *t;
178	be128 *t_buf = req->tbuf;
179	int err, i;
180
181	BUG_ON(max_blks < 1);
182
183	blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
184
185	err = blkcipher_walk_virt(desc, &walk);
186	nbytes = walk.nbytes;
187	if (!nbytes)
188		return err;
189
190	nblocks = min(nbytes / bsize, max_blks);
191	src = (be128 *)walk.src.virt.addr;
192	dst = (be128 *)walk.dst.virt.addr;
193
194	/* calculate first value of T */
195	req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
196
197	i = 0;
198	goto first;
199
200	for (;;) {
201		do {
202			for (i = 0; i < nblocks; i++) {
203				gf128mul_x_ble(&t_buf[i], t);
204first:
205				t = &t_buf[i];
206
207				/* PP <- T xor P */
208				be128_xor(dst + i, t, src + i);
209			}
210
211			/* CC <- E(Key2,PP) */
212			req->crypt_fn(req->crypt_ctx, (u8 *)dst,
213				      nblocks * bsize);
214
215			/* C <- T xor CC */
216			for (i = 0; i < nblocks; i++)
217				be128_xor(dst + i, dst + i, &t_buf[i]);
218
219			src += nblocks;
220			dst += nblocks;
221			nbytes -= nblocks * bsize;
222			nblocks = min(nbytes / bsize, max_blks);
223		} while (nblocks > 0);
224
225		*(be128 *)walk.iv = *t;
226
227		err = blkcipher_walk_done(desc, &walk, nbytes);
228		nbytes = walk.nbytes;
229		if (!nbytes)
230			break;
231
232		nblocks = min(nbytes / bsize, max_blks);
233		src = (be128 *)walk.src.virt.addr;
234		dst = (be128 *)walk.dst.virt.addr;
235	}
236
237	return err;
238}
239EXPORT_SYMBOL_GPL(xts_crypt);
240
241static int init_tfm(struct crypto_tfm *tfm)
242{
243	struct crypto_cipher *cipher;
244	struct crypto_instance *inst = (void *)tfm->__crt_alg;
245	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
246	struct priv *ctx = crypto_tfm_ctx(tfm);
247	u32 *flags = &tfm->crt_flags;
248
249	cipher = crypto_spawn_cipher(spawn);
250	if (IS_ERR(cipher))
251		return PTR_ERR(cipher);
252
253	if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
254		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
255		crypto_free_cipher(cipher);
256		return -EINVAL;
257	}
258
259	ctx->child = cipher;
260
261	cipher = crypto_spawn_cipher(spawn);
262	if (IS_ERR(cipher)) {
263		crypto_free_cipher(ctx->child);
264		return PTR_ERR(cipher);
 
 
 
 
 
 
265	}
266
267	/* this check isn't really needed, leave it here just in case */
268	if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
269		crypto_free_cipher(cipher);
270		crypto_free_cipher(ctx->child);
271		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
272		return -EINVAL;
273	}
274
275	ctx->tweak = cipher;
 
276
277	return 0;
278}
279
280static void exit_tfm(struct crypto_tfm *tfm)
281{
282	struct priv *ctx = crypto_tfm_ctx(tfm);
283	crypto_free_cipher(ctx->child);
 
284	crypto_free_cipher(ctx->tweak);
285}
286
287static struct crypto_instance *alloc(struct rtattr **tb)
 
 
 
 
 
 
288{
289	struct crypto_instance *inst;
290	struct crypto_alg *alg;
 
 
 
 
291	int err;
292
293	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294	if (err)
295		return ERR_PTR(err);
 
 
296
297	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
298				  CRYPTO_ALG_TYPE_MASK);
299	if (IS_ERR(alg))
300		return ERR_CAST(alg);
301
302	inst = crypto_alloc_instance("xts", alg);
303	if (IS_ERR(inst))
304		goto out_put_alg;
305
306	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
307	inst->alg.cra_priority = alg->cra_priority;
308	inst->alg.cra_blocksize = alg->cra_blocksize;
 
309
310	if (alg->cra_alignmask < 7)
311		inst->alg.cra_alignmask = 7;
312	else
313		inst->alg.cra_alignmask = alg->cra_alignmask;
314
315	inst->alg.cra_type = &crypto_blkcipher_type;
 
 
 
 
316
317	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
318	inst->alg.cra_blkcipher.min_keysize =
319		2 * alg->cra_cipher.cia_min_keysize;
320	inst->alg.cra_blkcipher.max_keysize =
321		2 * alg->cra_cipher.cia_max_keysize;
322
323	inst->alg.cra_ctxsize = sizeof(struct priv);
 
324
325	inst->alg.cra_init = init_tfm;
326	inst->alg.cra_exit = exit_tfm;
327
328	inst->alg.cra_blkcipher.setkey = setkey;
329	inst->alg.cra_blkcipher.encrypt = encrypt;
330	inst->alg.cra_blkcipher.decrypt = decrypt;
 
 
331
332out_put_alg:
333	crypto_mod_put(alg);
334	return inst;
335}
 
336
337static void free(struct crypto_instance *inst)
338{
339	crypto_drop_spawn(crypto_instance_ctx(inst));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340	kfree(inst);
 
341}
342
343static struct crypto_template crypto_tmpl = {
344	.name = "xts",
345	.alloc = alloc,
346	.free = free,
347	.module = THIS_MODULE,
348};
349
350static int __init crypto_module_init(void)
351{
352	return crypto_register_template(&crypto_tmpl);
353}
354
355static void __exit crypto_module_exit(void)
356{
357	crypto_unregister_template(&crypto_tmpl);
358}
359
360module_init(crypto_module_init);
361module_exit(crypto_module_exit);
362
363MODULE_LICENSE("GPL");
364MODULE_DESCRIPTION("XTS block cipher mode");
v4.10.11
  1/* XTS: as defined in IEEE1619/D16
  2 *	http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
  3 *	(sector sizes which are not a multiple of 16 bytes are,
  4 *	however currently unsupported)
  5 *
  6 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
  7 *
  8 * Based on ecb.c
  9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 10 *
 11 * This program is free software; you can redistribute it and/or modify it
 12 * under the terms of the GNU General Public License as published by the Free
 13 * Software Foundation; either version 2 of the License, or (at your option)
 14 * any later version.
 15 */
 16#include <crypto/internal/skcipher.h>
 17#include <crypto/scatterwalk.h>
 18#include <linux/err.h>
 19#include <linux/init.h>
 20#include <linux/kernel.h>
 21#include <linux/module.h>
 22#include <linux/scatterlist.h>
 23#include <linux/slab.h>
 24
 25#include <crypto/xts.h>
 26#include <crypto/b128ops.h>
 27#include <crypto/gf128mul.h>
 28
 29#define XTS_BUFFER_SIZE 128u
 30
 31struct priv {
 32	struct crypto_skcipher *child;
 33	struct crypto_cipher *tweak;
 34};
 35
 36struct xts_instance_ctx {
 37	struct crypto_skcipher_spawn spawn;
 38	char name[CRYPTO_MAX_ALG_NAME];
 39};
 40
 41struct rctx {
 42	be128 buf[XTS_BUFFER_SIZE / sizeof(be128)];
 43
 44	be128 t;
 45
 46	be128 *ext;
 47
 48	struct scatterlist srcbuf[2];
 49	struct scatterlist dstbuf[2];
 50	struct scatterlist *src;
 51	struct scatterlist *dst;
 52
 53	unsigned int left;
 54
 55	struct skcipher_request subreq;
 56};
 57
 58static int setkey(struct crypto_skcipher *parent, const u8 *key,
 59		  unsigned int keylen)
 60{
 61	struct priv *ctx = crypto_skcipher_ctx(parent);
 62	struct crypto_skcipher *child;
 63	struct crypto_cipher *tweak;
 64	int err;
 65
 66	err = xts_verify_key(parent, key, keylen);
 67	if (err)
 68		return err;
 69
 70	keylen /= 2;
 
 
 71
 72	/* we need two cipher instances: one to compute the initial 'tweak'
 73	 * by encrypting the IV (usually the 'plain' iv) and the other
 74	 * one to encrypt and decrypt the data */
 75
 76	/* tweak cipher, uses Key2 i.e. the second half of *key */
 77	tweak = ctx->tweak;
 78	crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
 79	crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
 80				       CRYPTO_TFM_REQ_MASK);
 81	err = crypto_cipher_setkey(tweak, key + keylen, keylen);
 82	crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
 83					  CRYPTO_TFM_RES_MASK);
 84	if (err)
 85		return err;
 86
 87	/* data cipher, uses Key1 i.e. the first half of *key */
 
 
 88	child = ctx->child;
 89	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 90	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 91					 CRYPTO_TFM_REQ_MASK);
 92	err = crypto_skcipher_setkey(child, key, keylen);
 93	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
 94					  CRYPTO_TFM_RES_MASK);
 95
 96	return err;
 97}
 
 
 
 
 
 98
 99static int post_crypt(struct skcipher_request *req)
100{
101	struct rctx *rctx = skcipher_request_ctx(req);
102	be128 *buf = rctx->ext ?: rctx->buf;
103	struct skcipher_request *subreq;
104	const int bs = XTS_BLOCK_SIZE;
105	struct skcipher_walk w;
106	struct scatterlist *sg;
107	unsigned offset;
108	int err;
109
110	subreq = &rctx->subreq;
111	err = skcipher_walk_virt(&w, subreq, false);
112
113	while (w.nbytes) {
114		unsigned int avail = w.nbytes;
115		be128 *wdst;
 
 
116
117		wdst = w.dst.virt.addr;
118
119		do {
120			be128_xor(wdst, buf++, wdst);
121			wdst++;
122		} while ((avail -= bs) >= bs);
123
124		err = skcipher_walk_done(&w, avail);
125	}
126
127	rctx->left -= subreq->cryptlen;
128
129	if (err || !rctx->left)
130		goto out;
131
132	rctx->dst = rctx->dstbuf;
133
134	scatterwalk_done(&w.out, 0, 1);
135	sg = w.out.sg;
136	offset = w.out.offset;
137
138	if (rctx->dst != sg) {
139		rctx->dst[0] = *sg;
140		sg_unmark_end(rctx->dst);
141		scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
142	}
143	rctx->dst[0].length -= offset - sg->offset;
144	rctx->dst[0].offset = offset;
145
146out:
147	return err;
148}
149
150static int pre_crypt(struct skcipher_request *req)
 
 
 
151{
152	struct rctx *rctx = skcipher_request_ctx(req);
153	be128 *buf = rctx->ext ?: rctx->buf;
154	struct skcipher_request *subreq;
155	const int bs = XTS_BLOCK_SIZE;
156	struct skcipher_walk w;
157	struct scatterlist *sg;
158	unsigned cryptlen;
159	unsigned offset;
160	bool more;
161	int err;
162
163	subreq = &rctx->subreq;
164	cryptlen = subreq->cryptlen;
 
165
166	more = rctx->left > cryptlen;
167	if (!more)
168		cryptlen = rctx->left;
169
170	skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
171				   cryptlen, NULL);
172
173	err = skcipher_walk_virt(&w, subreq, false);
 
174
175	while (w.nbytes) {
176		unsigned int avail = w.nbytes;
177		be128 *wsrc;
178		be128 *wdst;
179
180		wsrc = w.src.virt.addr;
181		wdst = w.dst.virt.addr;
182
 
183		do {
184			*buf++ = rctx->t;
185			be128_xor(wdst++, &rctx->t, wsrc++);
186			gf128mul_x_ble(&rctx->t, &rctx->t);
187		} while ((avail -= bs) >= bs);
188
189		err = skcipher_walk_done(&w, avail);
190	}
191
192	skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
193				   cryptlen, NULL);
 
194
195	if (err || !more)
196		goto out;
197
198	rctx->src = rctx->srcbuf;
199
200	scatterwalk_done(&w.in, 0, 1);
201	sg = w.in.sg;
202	offset = w.in.offset;
203
204	if (rctx->src != sg) {
205		rctx->src[0] = *sg;
206		sg_unmark_end(rctx->src);
207		scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
208	}
209	rctx->src[0].length -= offset - sg->offset;
210	rctx->src[0].offset = offset;
211
212out:
213	return err;
214}
215
216static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
217{
218	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
219	struct rctx *rctx = skcipher_request_ctx(req);
220	struct skcipher_request *subreq;
221	gfp_t gfp;
222
223	subreq = &rctx->subreq;
224	skcipher_request_set_tfm(subreq, ctx->child);
225	skcipher_request_set_callback(subreq, req->base.flags, done, req);
226
227	gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
228							   GFP_ATOMIC;
229	rctx->ext = NULL;
230
231	subreq->cryptlen = XTS_BUFFER_SIZE;
232	if (req->cryptlen > XTS_BUFFER_SIZE) {
233		unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
234
235		rctx->ext = kmalloc(n, gfp);
236		if (rctx->ext)
237			subreq->cryptlen = n;
238	}
239
240	rctx->src = req->src;
241	rctx->dst = req->dst;
242	rctx->left = req->cryptlen;
243
244	/* calculate first value of T */
245	crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
246
247	return 0;
248}
249
250static void exit_crypt(struct skcipher_request *req)
251{
252	struct rctx *rctx = skcipher_request_ctx(req);
253
254	rctx->left = 0;
255
256	if (rctx->ext)
257		kzfree(rctx->ext);
258}
259
260static int do_encrypt(struct skcipher_request *req, int err)
261{
262	struct rctx *rctx = skcipher_request_ctx(req);
263	struct skcipher_request *subreq;
264
265	subreq = &rctx->subreq;
266
267	while (!err && rctx->left) {
268		err = pre_crypt(req) ?:
269		      crypto_skcipher_encrypt(subreq) ?:
270		      post_crypt(req);
271
272		if (err == -EINPROGRESS ||
273		    (err == -EBUSY &&
274		     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
275			return err;
276	}
277
278	exit_crypt(req);
279	return err;
280}
281
282static void encrypt_done(struct crypto_async_request *areq, int err)
283{
284	struct skcipher_request *req = areq->data;
285	struct skcipher_request *subreq;
286	struct rctx *rctx;
287
288	rctx = skcipher_request_ctx(req);
289	subreq = &rctx->subreq;
290	subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
291
292	err = do_encrypt(req, err ?: post_crypt(req));
293	if (rctx->left)
294		return;
295
296	skcipher_request_complete(req, err);
297}
298
299static int encrypt(struct skcipher_request *req)
300{
301	return do_encrypt(req, init_crypt(req, encrypt_done));
302}
303
304static int do_decrypt(struct skcipher_request *req, int err)
305{
306	struct rctx *rctx = skcipher_request_ctx(req);
307	struct skcipher_request *subreq;
308
309	subreq = &rctx->subreq;
310
311	while (!err && rctx->left) {
312		err = pre_crypt(req) ?:
313		      crypto_skcipher_decrypt(subreq) ?:
314		      post_crypt(req);
315
316		if (err == -EINPROGRESS ||
317		    (err == -EBUSY &&
318		     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
319			return err;
320	}
321
322	exit_crypt(req);
323	return err;
324}
325
326static void decrypt_done(struct crypto_async_request *areq, int err)
 
327{
328	struct skcipher_request *req = areq->data;
329	struct skcipher_request *subreq;
330	struct rctx *rctx;
331
332	rctx = skcipher_request_ctx(req);
333	subreq = &rctx->subreq;
334	subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
335
336	err = do_decrypt(req, err ?: post_crypt(req));
337	if (rctx->left)
338		return;
339
340	skcipher_request_complete(req, err);
341}
342
343static int decrypt(struct skcipher_request *req)
344{
345	return do_decrypt(req, init_crypt(req, decrypt_done));
346}
347
348int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
349	      struct scatterlist *ssrc, unsigned int nbytes,
350	      struct xts_crypt_req *req)
351{
352	const unsigned int bsize = XTS_BLOCK_SIZE;
353	const unsigned int max_blks = req->tbuflen / bsize;
354	struct blkcipher_walk walk;
355	unsigned int nblocks;
356	be128 *src, *dst, *t;
357	be128 *t_buf = req->tbuf;
358	int err, i;
359
360	BUG_ON(max_blks < 1);
361
362	blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
363
364	err = blkcipher_walk_virt(desc, &walk);
365	nbytes = walk.nbytes;
366	if (!nbytes)
367		return err;
368
369	nblocks = min(nbytes / bsize, max_blks);
370	src = (be128 *)walk.src.virt.addr;
371	dst = (be128 *)walk.dst.virt.addr;
372
373	/* calculate first value of T */
374	req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
375
376	i = 0;
377	goto first;
378
379	for (;;) {
380		do {
381			for (i = 0; i < nblocks; i++) {
382				gf128mul_x_ble(&t_buf[i], t);
383first:
384				t = &t_buf[i];
385
386				/* PP <- T xor P */
387				be128_xor(dst + i, t, src + i);
388			}
389
390			/* CC <- E(Key2,PP) */
391			req->crypt_fn(req->crypt_ctx, (u8 *)dst,
392				      nblocks * bsize);
393
394			/* C <- T xor CC */
395			for (i = 0; i < nblocks; i++)
396				be128_xor(dst + i, dst + i, &t_buf[i]);
397
398			src += nblocks;
399			dst += nblocks;
400			nbytes -= nblocks * bsize;
401			nblocks = min(nbytes / bsize, max_blks);
402		} while (nblocks > 0);
403
404		*(be128 *)walk.iv = *t;
405
406		err = blkcipher_walk_done(desc, &walk, nbytes);
407		nbytes = walk.nbytes;
408		if (!nbytes)
409			break;
410
411		nblocks = min(nbytes / bsize, max_blks);
412		src = (be128 *)walk.src.virt.addr;
413		dst = (be128 *)walk.dst.virt.addr;
414	}
415
416	return err;
417}
418EXPORT_SYMBOL_GPL(xts_crypt);
419
420static int init_tfm(struct crypto_skcipher *tfm)
421{
422	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
423	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
424	struct priv *ctx = crypto_skcipher_ctx(tfm);
425	struct crypto_skcipher *child;
426	struct crypto_cipher *tweak;
 
 
 
 
 
 
 
 
 
 
 
 
427
428	child = crypto_spawn_skcipher(&ictx->spawn);
429	if (IS_ERR(child))
430		return PTR_ERR(child);
431
432	ctx->child = child;
433
434	tweak = crypto_alloc_cipher(ictx->name, 0, 0);
435	if (IS_ERR(tweak)) {
436		crypto_free_skcipher(ctx->child);
437		return PTR_ERR(tweak);
438	}
439
440	ctx->tweak = tweak;
 
 
 
 
 
 
441
442	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
443					 sizeof(struct rctx));
444
445	return 0;
446}
447
448static void exit_tfm(struct crypto_skcipher *tfm)
449{
450	struct priv *ctx = crypto_skcipher_ctx(tfm);
451
452	crypto_free_skcipher(ctx->child);
453	crypto_free_cipher(ctx->tweak);
454}
455
456static void free(struct skcipher_instance *inst)
457{
458	crypto_drop_skcipher(skcipher_instance_ctx(inst));
459	kfree(inst);
460}
461
462static int create(struct crypto_template *tmpl, struct rtattr **tb)
463{
464	struct skcipher_instance *inst;
465	struct crypto_attr_type *algt;
466	struct xts_instance_ctx *ctx;
467	struct skcipher_alg *alg;
468	const char *cipher_name;
469	u32 mask;
470	int err;
471
472	algt = crypto_get_attr_type(tb);
473	if (IS_ERR(algt))
474		return PTR_ERR(algt);
475
476	if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
477		return -EINVAL;
478
479	cipher_name = crypto_attr_alg_name(tb[1]);
480	if (IS_ERR(cipher_name))
481		return PTR_ERR(cipher_name);
482
483	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
484	if (!inst)
485		return -ENOMEM;
486
487	ctx = skcipher_instance_ctx(inst);
488
489	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
490
491	mask = crypto_requires_off(algt->type, algt->mask,
492				   CRYPTO_ALG_NEED_FALLBACK |
493				   CRYPTO_ALG_ASYNC);
494
495	err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
496	if (err == -ENOENT) {
497		err = -ENAMETOOLONG;
498		if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
499			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
500			goto err_free_inst;
501
502		err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
503	}
504
505	if (err)
506		goto err_free_inst;
507
508	alg = crypto_skcipher_spawn_alg(&ctx->spawn);
509
510	err = -EINVAL;
511	if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
512		goto err_drop_spawn;
 
513
514	if (crypto_skcipher_alg_ivsize(alg))
515		goto err_drop_spawn;
 
516
517	err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
518				  &alg->base);
519	if (err)
520		goto err_drop_spawn;
521
522	err = -EINVAL;
523	cipher_name = alg->base.cra_name;
 
 
524
525	/* Alas we screwed up the naming so we have to mangle the
526	 * cipher name.
527	 */
528	if (!strncmp(cipher_name, "ecb(", 4)) {
529		unsigned len;
530
531		len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
532		if (len < 2 || len >= sizeof(ctx->name))
533			goto err_drop_spawn;
 
 
534
535		if (ctx->name[len - 1] != ')')
536			goto err_drop_spawn;
537
538		ctx->name[len - 1] = 0;
 
539
540		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
541			     "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
542			return -ENAMETOOLONG;
543	} else
544		goto err_drop_spawn;
545
546	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
547	inst->alg.base.cra_priority = alg->base.cra_priority;
548	inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
549	inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
550				       (__alignof__(u64) - 1);
551
552	inst->alg.ivsize = XTS_BLOCK_SIZE;
553	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
554	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
555
556	inst->alg.base.cra_ctxsize = sizeof(struct priv);
557
558	inst->alg.init = init_tfm;
559	inst->alg.exit = exit_tfm;
560
561	inst->alg.setkey = setkey;
562	inst->alg.encrypt = encrypt;
563	inst->alg.decrypt = decrypt;
564
565	inst->free = free;
566
567	err = skcipher_register_instance(tmpl, inst);
568	if (err)
569		goto err_drop_spawn;
570
571out:
572	return err;
573
574err_drop_spawn:
575	crypto_drop_skcipher(&ctx->spawn);
576err_free_inst:
577	kfree(inst);
578	goto out;
579}
580
581static struct crypto_template crypto_tmpl = {
582	.name = "xts",
583	.create = create,
 
584	.module = THIS_MODULE,
585};
586
587static int __init crypto_module_init(void)
588{
589	return crypto_register_template(&crypto_tmpl);
590}
591
592static void __exit crypto_module_exit(void)
593{
594	crypto_unregister_template(&crypto_tmpl);
595}
596
597module_init(crypto_module_init);
598module_exit(crypto_module_exit);
599
600MODULE_LICENSE("GPL");
601MODULE_DESCRIPTION("XTS block cipher mode");
602MODULE_ALIAS_CRYPTO("xts");