Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* XTS: as defined in IEEE1619/D16
  3 *	http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
 
 
  4 *
  5 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
  6 *
  7 * Based on ecb.c
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
  9 */
 10#include <crypto/internal/skcipher.h>
 11#include <crypto/scatterwalk.h>
 12#include <linux/err.h>
 13#include <linux/init.h>
 14#include <linux/kernel.h>
 15#include <linux/module.h>
 16#include <linux/scatterlist.h>
 17#include <linux/slab.h>
 18
 19#include <crypto/xts.h>
 20#include <crypto/b128ops.h>
 21#include <crypto/gf128mul.h>
 22
 
 
 23struct priv {
 24	struct crypto_skcipher *child;
 25	struct crypto_cipher *tweak;
 26};
 27
 28struct xts_instance_ctx {
 29	struct crypto_skcipher_spawn spawn;
 30	char name[CRYPTO_MAX_ALG_NAME];
 31};
 32
 33struct rctx {
 
 
 34	le128 t;
 35	struct scatterlist *tail;
 36	struct scatterlist sg[2];
 
 
 
 
 
 
 
 
 37	struct skcipher_request subreq;
 38};
 39
 40static int setkey(struct crypto_skcipher *parent, const u8 *key,
 41		  unsigned int keylen)
 42{
 43	struct priv *ctx = crypto_skcipher_ctx(parent);
 44	struct crypto_skcipher *child;
 45	struct crypto_cipher *tweak;
 46	int err;
 47
 48	err = xts_verify_key(parent, key, keylen);
 49	if (err)
 50		return err;
 51
 52	keylen /= 2;
 53
 54	/* we need two cipher instances: one to compute the initial 'tweak'
 55	 * by encrypting the IV (usually the 'plain' iv) and the other
 56	 * one to encrypt and decrypt the data */
 57
 58	/* tweak cipher, uses Key2 i.e. the second half of *key */
 59	tweak = ctx->tweak;
 60	crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
 61	crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
 62				       CRYPTO_TFM_REQ_MASK);
 63	err = crypto_cipher_setkey(tweak, key + keylen, keylen);
 64	crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
 65					  CRYPTO_TFM_RES_MASK);
 66	if (err)
 67		return err;
 68
 69	/* data cipher, uses Key1 i.e. the first half of *key */
 70	child = ctx->child;
 71	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 72	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 73					 CRYPTO_TFM_REQ_MASK);
 74	err = crypto_skcipher_setkey(child, key, keylen);
 75	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
 76					  CRYPTO_TFM_RES_MASK);
 77
 78	return err;
 79}
 80
 81/*
 82 * We compute the tweak masks twice (both before and after the ECB encryption or
 83 * decryption) to avoid having to allocate a temporary buffer and/or make
 84 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
 85 * just doing the gf128mul_x_ble() calls again.
 86 */
 87static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
 88{
 89	struct rctx *rctx = skcipher_request_ctx(req);
 90	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 91	const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
 92	const int bs = XTS_BLOCK_SIZE;
 93	struct skcipher_walk w;
 94	le128 t = rctx->t;
 
 95	int err;
 96
 97	if (second_pass) {
 98		req = &rctx->subreq;
 99		/* set to our TFM to enforce correct alignment: */
100		skcipher_request_set_tfm(req, tfm);
101	}
102	err = skcipher_walk_virt(&w, req, false);
103
104	while (w.nbytes) {
105		unsigned int avail = w.nbytes;
106		le128 *wsrc;
107		le128 *wdst;
108
109		wsrc = w.src.virt.addr;
110		wdst = w.dst.virt.addr;
111
112		do {
113			if (unlikely(cts) &&
114			    w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
115				if (!enc) {
116					if (second_pass)
117						rctx->t = t;
118					gf128mul_x_ble(&t, &t);
119				}
120				le128_xor(wdst, &t, wsrc);
121				if (enc && second_pass)
122					gf128mul_x_ble(&rctx->t, &t);
123				skcipher_walk_done(&w, avail - bs);
124				return 0;
125			}
126
127			le128_xor(wdst++, &t, wsrc++);
128			gf128mul_x_ble(&t, &t);
129		} while ((avail -= bs) >= bs);
130
131		err = skcipher_walk_done(&w, avail);
132	}
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134	return err;
135}
136
137static int xor_tweak_pre(struct skcipher_request *req, bool enc)
138{
139	return xor_tweak(req, false, enc);
140}
 
 
 
 
 
 
 
 
141
142static int xor_tweak_post(struct skcipher_request *req, bool enc)
143{
144	return xor_tweak(req, true, enc);
145}
146
147static void cts_done(struct crypto_async_request *areq, int err)
148{
149	struct skcipher_request *req = areq->data;
150	le128 b;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
152	if (!err) {
153		struct rctx *rctx = skcipher_request_ctx(req);
154
155		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
156		le128_xor(&b, &rctx->t, &b);
157		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
 
 
 
 
 
158	}
 
 
159
160	skcipher_request_complete(req, err);
 
161}
162
163static int cts_final(struct skcipher_request *req,
164		     int (*crypt)(struct skcipher_request *req))
165{
166	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
167	int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
168	struct rctx *rctx = skcipher_request_ctx(req);
169	struct skcipher_request *subreq = &rctx->subreq;
170	int tail = req->cryptlen % XTS_BLOCK_SIZE;
171	le128 b[2];
172	int err;
173
174	rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
175				      offset - XTS_BLOCK_SIZE);
 
176
177	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
178	memcpy(b + 1, b, tail);
179	scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
 
 
 
 
 
 
 
 
 
180
181	le128_xor(b, &rctx->t, b);
 
 
182
183	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
 
184
185	skcipher_request_set_tfm(subreq, ctx->child);
186	skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
187	skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
188				   XTS_BLOCK_SIZE, NULL);
189
190	err = crypt(subreq);
191	if (err)
192		return err;
193
194	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
195	le128_xor(b, &rctx->t, b);
196	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
197
198	return 0;
 
199}
200
201static void encrypt_done(struct crypto_async_request *areq, int err)
202{
203	struct skcipher_request *req = areq->data;
 
204
205	if (!err) {
206		struct rctx *rctx = skcipher_request_ctx(req);
207
208		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
209		err = xor_tweak_post(req, true);
 
 
210
211		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
212			err = cts_final(req, crypto_skcipher_encrypt);
213			if (err == -EINPROGRESS)
214				return;
215		}
216	}
217
218	skcipher_request_complete(req, err);
 
219}
220
221static void decrypt_done(struct crypto_async_request *areq, int err)
222{
223	struct skcipher_request *req = areq->data;
 
 
224
225	if (!err) {
226		struct rctx *rctx = skcipher_request_ctx(req);
227
228		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
229		err = xor_tweak_post(req, false);
230
231		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
232			err = cts_final(req, crypto_skcipher_decrypt);
233			if (err == -EINPROGRESS)
234				return;
235		}
236	}
237
 
 
 
 
 
 
 
 
238	skcipher_request_complete(req, err);
239}
240
241static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
 
 
 
 
 
242{
243	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
244	struct rctx *rctx = skcipher_request_ctx(req);
245	struct skcipher_request *subreq = &rctx->subreq;
246
247	if (req->cryptlen < XTS_BLOCK_SIZE)
248		return -EINVAL;
249
250	skcipher_request_set_tfm(subreq, ctx->child);
251	skcipher_request_set_callback(subreq, req->base.flags, compl, req);
252	skcipher_request_set_crypt(subreq, req->dst, req->dst,
253				   req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
254
255	/* calculate first value of T */
256	crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
 
257
258	return 0;
 
259}
260
261static int encrypt(struct skcipher_request *req)
262{
263	struct rctx *rctx = skcipher_request_ctx(req);
264	struct skcipher_request *subreq = &rctx->subreq;
265	int err;
266
267	err = init_crypt(req, encrypt_done) ?:
268	      xor_tweak_pre(req, true) ?:
269	      crypto_skcipher_encrypt(subreq) ?:
270	      xor_tweak_post(req, true);
271
272	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
273		return err;
 
 
 
 
 
 
274
275	return cts_final(req, crypto_skcipher_encrypt);
 
 
 
 
 
276}
277
278static int decrypt(struct skcipher_request *req)
279{
280	struct rctx *rctx = skcipher_request_ctx(req);
281	struct skcipher_request *subreq = &rctx->subreq;
282	int err;
283
284	err = init_crypt(req, decrypt_done) ?:
285	      xor_tweak_pre(req, false) ?:
286	      crypto_skcipher_decrypt(subreq) ?:
287	      xor_tweak_post(req, false);
288
289	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
290		return err;
291
292	return cts_final(req, crypto_skcipher_decrypt);
293}
294
295static int init_tfm(struct crypto_skcipher *tfm)
296{
297	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
298	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
299	struct priv *ctx = crypto_skcipher_ctx(tfm);
300	struct crypto_skcipher *child;
301	struct crypto_cipher *tweak;
302
303	child = crypto_spawn_skcipher(&ictx->spawn);
304	if (IS_ERR(child))
305		return PTR_ERR(child);
306
307	ctx->child = child;
308
309	tweak = crypto_alloc_cipher(ictx->name, 0, 0);
310	if (IS_ERR(tweak)) {
311		crypto_free_skcipher(ctx->child);
312		return PTR_ERR(tweak);
313	}
314
315	ctx->tweak = tweak;
316
317	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
318					 sizeof(struct rctx));
319
320	return 0;
321}
322
323static void exit_tfm(struct crypto_skcipher *tfm)
324{
325	struct priv *ctx = crypto_skcipher_ctx(tfm);
326
327	crypto_free_skcipher(ctx->child);
328	crypto_free_cipher(ctx->tweak);
329}
330
331static void free(struct skcipher_instance *inst)
332{
333	crypto_drop_skcipher(skcipher_instance_ctx(inst));
334	kfree(inst);
335}
336
337static int create(struct crypto_template *tmpl, struct rtattr **tb)
338{
339	struct skcipher_instance *inst;
340	struct crypto_attr_type *algt;
341	struct xts_instance_ctx *ctx;
342	struct skcipher_alg *alg;
343	const char *cipher_name;
344	u32 mask;
345	int err;
346
347	algt = crypto_get_attr_type(tb);
348	if (IS_ERR(algt))
349		return PTR_ERR(algt);
350
351	if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
352		return -EINVAL;
353
354	cipher_name = crypto_attr_alg_name(tb[1]);
355	if (IS_ERR(cipher_name))
356		return PTR_ERR(cipher_name);
357
358	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
359	if (!inst)
360		return -ENOMEM;
361
362	ctx = skcipher_instance_ctx(inst);
363
364	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
365
366	mask = crypto_requires_off(algt->type, algt->mask,
367				   CRYPTO_ALG_NEED_FALLBACK |
368				   CRYPTO_ALG_ASYNC);
369
370	err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
371	if (err == -ENOENT) {
372		err = -ENAMETOOLONG;
373		if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
374			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
375			goto err_free_inst;
376
377		err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
378	}
379
380	if (err)
381		goto err_free_inst;
382
383	alg = crypto_skcipher_spawn_alg(&ctx->spawn);
384
385	err = -EINVAL;
386	if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
387		goto err_drop_spawn;
388
389	if (crypto_skcipher_alg_ivsize(alg))
390		goto err_drop_spawn;
391
392	err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
393				  &alg->base);
394	if (err)
395		goto err_drop_spawn;
396
397	err = -EINVAL;
398	cipher_name = alg->base.cra_name;
399
400	/* Alas we screwed up the naming so we have to mangle the
401	 * cipher name.
402	 */
403	if (!strncmp(cipher_name, "ecb(", 4)) {
404		unsigned len;
405
406		len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
407		if (len < 2 || len >= sizeof(ctx->name))
408			goto err_drop_spawn;
409
410		if (ctx->name[len - 1] != ')')
411			goto err_drop_spawn;
412
413		ctx->name[len - 1] = 0;
414
415		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
416			     "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
417			err = -ENAMETOOLONG;
418			goto err_drop_spawn;
419		}
420	} else
421		goto err_drop_spawn;
422
423	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
424	inst->alg.base.cra_priority = alg->base.cra_priority;
425	inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
426	inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
427				       (__alignof__(u64) - 1);
428
429	inst->alg.ivsize = XTS_BLOCK_SIZE;
430	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
431	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
432
433	inst->alg.base.cra_ctxsize = sizeof(struct priv);
434
435	inst->alg.init = init_tfm;
436	inst->alg.exit = exit_tfm;
437
438	inst->alg.setkey = setkey;
439	inst->alg.encrypt = encrypt;
440	inst->alg.decrypt = decrypt;
441
442	inst->free = free;
443
444	err = skcipher_register_instance(tmpl, inst);
445	if (err)
446		goto err_drop_spawn;
447
448out:
449	return err;
450
451err_drop_spawn:
452	crypto_drop_skcipher(&ctx->spawn);
453err_free_inst:
454	kfree(inst);
455	goto out;
456}
457
458static struct crypto_template crypto_tmpl = {
459	.name = "xts",
460	.create = create,
461	.module = THIS_MODULE,
462};
463
464static int __init crypto_module_init(void)
465{
466	return crypto_register_template(&crypto_tmpl);
467}
468
469static void __exit crypto_module_exit(void)
470{
471	crypto_unregister_template(&crypto_tmpl);
472}
473
474subsys_initcall(crypto_module_init);
475module_exit(crypto_module_exit);
476
477MODULE_LICENSE("GPL");
478MODULE_DESCRIPTION("XTS block cipher mode");
479MODULE_ALIAS_CRYPTO("xts");
v4.17
 
  1/* XTS: as defined in IEEE1619/D16
  2 *	http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
  3 *	(sector sizes which are not a multiple of 16 bytes are,
  4 *	however currently unsupported)
  5 *
  6 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
  7 *
  8 * Based on ecb.c
  9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 10 *
 11 * This program is free software; you can redistribute it and/or modify it
 12 * under the terms of the GNU General Public License as published by the Free
 13 * Software Foundation; either version 2 of the License, or (at your option)
 14 * any later version.
 15 */
 16#include <crypto/internal/skcipher.h>
 17#include <crypto/scatterwalk.h>
 18#include <linux/err.h>
 19#include <linux/init.h>
 20#include <linux/kernel.h>
 21#include <linux/module.h>
 22#include <linux/scatterlist.h>
 23#include <linux/slab.h>
 24
 25#include <crypto/xts.h>
 26#include <crypto/b128ops.h>
 27#include <crypto/gf128mul.h>
 28
 29#define XTS_BUFFER_SIZE 128u
 30
 31struct priv {
 32	struct crypto_skcipher *child;
 33	struct crypto_cipher *tweak;
 34};
 35
 36struct xts_instance_ctx {
 37	struct crypto_skcipher_spawn spawn;
 38	char name[CRYPTO_MAX_ALG_NAME];
 39};
 40
 41struct rctx {
 42	le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
 43
 44	le128 t;
 45
 46	le128 *ext;
 47
 48	struct scatterlist srcbuf[2];
 49	struct scatterlist dstbuf[2];
 50	struct scatterlist *src;
 51	struct scatterlist *dst;
 52
 53	unsigned int left;
 54
 55	struct skcipher_request subreq;
 56};
 57
 58static int setkey(struct crypto_skcipher *parent, const u8 *key,
 59		  unsigned int keylen)
 60{
 61	struct priv *ctx = crypto_skcipher_ctx(parent);
 62	struct crypto_skcipher *child;
 63	struct crypto_cipher *tweak;
 64	int err;
 65
 66	err = xts_verify_key(parent, key, keylen);
 67	if (err)
 68		return err;
 69
 70	keylen /= 2;
 71
 72	/* we need two cipher instances: one to compute the initial 'tweak'
 73	 * by encrypting the IV (usually the 'plain' iv) and the other
 74	 * one to encrypt and decrypt the data */
 75
 76	/* tweak cipher, uses Key2 i.e. the second half of *key */
 77	tweak = ctx->tweak;
 78	crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
 79	crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
 80				       CRYPTO_TFM_REQ_MASK);
 81	err = crypto_cipher_setkey(tweak, key + keylen, keylen);
 82	crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
 83					  CRYPTO_TFM_RES_MASK);
 84	if (err)
 85		return err;
 86
 87	/* data cipher, uses Key1 i.e. the first half of *key */
 88	child = ctx->child;
 89	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 90	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 91					 CRYPTO_TFM_REQ_MASK);
 92	err = crypto_skcipher_setkey(child, key, keylen);
 93	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
 94					  CRYPTO_TFM_RES_MASK);
 95
 96	return err;
 97}
 98
 99static int post_crypt(struct skcipher_request *req)
 
 
 
 
 
 
100{
101	struct rctx *rctx = skcipher_request_ctx(req);
102	le128 *buf = rctx->ext ?: rctx->buf;
103	struct skcipher_request *subreq;
104	const int bs = XTS_BLOCK_SIZE;
105	struct skcipher_walk w;
106	struct scatterlist *sg;
107	unsigned offset;
108	int err;
109
110	subreq = &rctx->subreq;
111	err = skcipher_walk_virt(&w, subreq, false);
 
 
 
 
112
113	while (w.nbytes) {
114		unsigned int avail = w.nbytes;
 
115		le128 *wdst;
116
 
117		wdst = w.dst.virt.addr;
118
119		do {
120			le128_xor(wdst, buf++, wdst);
121			wdst++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122		} while ((avail -= bs) >= bs);
123
124		err = skcipher_walk_done(&w, avail);
125	}
126
127	rctx->left -= subreq->cryptlen;
128
129	if (err || !rctx->left)
130		goto out;
131
132	rctx->dst = rctx->dstbuf;
133
134	scatterwalk_done(&w.out, 0, 1);
135	sg = w.out.sg;
136	offset = w.out.offset;
137
138	if (rctx->dst != sg) {
139		rctx->dst[0] = *sg;
140		sg_unmark_end(rctx->dst);
141		scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
142	}
143	rctx->dst[0].length -= offset - sg->offset;
144	rctx->dst[0].offset = offset;
145
146out:
147	return err;
148}
149
150static int pre_crypt(struct skcipher_request *req)
151{
152	struct rctx *rctx = skcipher_request_ctx(req);
153	le128 *buf = rctx->ext ?: rctx->buf;
154	struct skcipher_request *subreq;
155	const int bs = XTS_BLOCK_SIZE;
156	struct skcipher_walk w;
157	struct scatterlist *sg;
158	unsigned cryptlen;
159	unsigned offset;
160	bool more;
161	int err;
162
163	subreq = &rctx->subreq;
164	cryptlen = subreq->cryptlen;
 
 
165
166	more = rctx->left > cryptlen;
167	if (!more)
168		cryptlen = rctx->left;
169
170	skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
171				   cryptlen, NULL);
172
173	err = skcipher_walk_virt(&w, subreq, false);
174
175	while (w.nbytes) {
176		unsigned int avail = w.nbytes;
177		le128 *wsrc;
178		le128 *wdst;
179
180		wsrc = w.src.virt.addr;
181		wdst = w.dst.virt.addr;
182
183		do {
184			*buf++ = rctx->t;
185			le128_xor(wdst++, &rctx->t, wsrc++);
186			gf128mul_x_ble(&rctx->t, &rctx->t);
187		} while ((avail -= bs) >= bs);
188
189		err = skcipher_walk_done(&w, avail);
190	}
191
192	skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
193				   cryptlen, NULL);
194
195	if (err || !more)
196		goto out;
197
198	rctx->src = rctx->srcbuf;
 
199
200	scatterwalk_done(&w.in, 0, 1);
201	sg = w.in.sg;
202	offset = w.in.offset;
203
204	if (rctx->src != sg) {
205		rctx->src[0] = *sg;
206		sg_unmark_end(rctx->src);
207		scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
208	}
209	rctx->src[0].length -= offset - sg->offset;
210	rctx->src[0].offset = offset;
211
212out:
213	return err;
214}
215
216static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
 
217{
218	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 
219	struct rctx *rctx = skcipher_request_ctx(req);
220	struct skcipher_request *subreq;
221	gfp_t gfp;
 
 
222
223	subreq = &rctx->subreq;
224	skcipher_request_set_tfm(subreq, ctx->child);
225	skcipher_request_set_callback(subreq, req->base.flags, done, req);
226
227	gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
228							   GFP_ATOMIC;
229	rctx->ext = NULL;
230
231	subreq->cryptlen = XTS_BUFFER_SIZE;
232	if (req->cryptlen > XTS_BUFFER_SIZE) {
233		unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
234
235		rctx->ext = kmalloc(n, gfp);
236		if (rctx->ext)
237			subreq->cryptlen = n;
238	}
239
240	rctx->src = req->src;
241	rctx->dst = req->dst;
242	rctx->left = req->cryptlen;
243
244	/* calculate first value of T */
245	crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
246
247	return 0;
248}
 
 
249
250static void exit_crypt(struct skcipher_request *req)
251{
252	struct rctx *rctx = skcipher_request_ctx(req);
253
254	rctx->left = 0;
 
 
255
256	if (rctx->ext)
257		kzfree(rctx->ext);
258}
259
260static int do_encrypt(struct skcipher_request *req, int err)
261{
262	struct rctx *rctx = skcipher_request_ctx(req);
263	struct skcipher_request *subreq;
264
265	subreq = &rctx->subreq;
 
266
267	while (!err && rctx->left) {
268		err = pre_crypt(req) ?:
269		      crypto_skcipher_encrypt(subreq) ?:
270		      post_crypt(req);
271
272		if (err == -EINPROGRESS || err == -EBUSY)
273			return err;
 
 
 
274	}
275
276	exit_crypt(req);
277	return err;
278}
279
280static void encrypt_done(struct crypto_async_request *areq, int err)
281{
282	struct skcipher_request *req = areq->data;
283	struct skcipher_request *subreq;
284	struct rctx *rctx;
285
286	rctx = skcipher_request_ctx(req);
 
 
 
 
287
288	if (err == -EINPROGRESS) {
289		if (rctx->left != req->cryptlen)
290			return;
291		goto out;
 
292	}
293
294	subreq = &rctx->subreq;
295	subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
296
297	err = do_encrypt(req, err ?: post_crypt(req));
298	if (rctx->left)
299		return;
300
301out:
302	skcipher_request_complete(req, err);
303}
304
305static int encrypt(struct skcipher_request *req)
306{
307	return do_encrypt(req, init_crypt(req, encrypt_done));
308}
309
310static int do_decrypt(struct skcipher_request *req, int err)
311{
 
312	struct rctx *rctx = skcipher_request_ctx(req);
313	struct skcipher_request *subreq;
314
315	subreq = &rctx->subreq;
 
316
317	while (!err && rctx->left) {
318		err = pre_crypt(req) ?:
319		      crypto_skcipher_decrypt(subreq) ?:
320		      post_crypt(req);
321
322		if (err == -EINPROGRESS || err == -EBUSY)
323			return err;
324	}
325
326	exit_crypt(req);
327	return err;
328}
329
330static void decrypt_done(struct crypto_async_request *areq, int err)
331{
332	struct skcipher_request *req = areq->data;
333	struct skcipher_request *subreq;
334	struct rctx *rctx;
335
336	rctx = skcipher_request_ctx(req);
 
 
 
337
338	if (err == -EINPROGRESS) {
339		if (rctx->left != req->cryptlen)
340			return;
341		goto out;
342	}
343
344	subreq = &rctx->subreq;
345	subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
346
347	err = do_decrypt(req, err ?: post_crypt(req));
348	if (rctx->left)
349		return;
350
351out:
352	skcipher_request_complete(req, err);
353}
354
355static int decrypt(struct skcipher_request *req)
356{
357	return do_decrypt(req, init_crypt(req, decrypt_done));
 
 
 
 
 
 
 
 
 
 
 
 
358}
359
360static int init_tfm(struct crypto_skcipher *tfm)
361{
362	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
363	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
364	struct priv *ctx = crypto_skcipher_ctx(tfm);
365	struct crypto_skcipher *child;
366	struct crypto_cipher *tweak;
367
368	child = crypto_spawn_skcipher(&ictx->spawn);
369	if (IS_ERR(child))
370		return PTR_ERR(child);
371
372	ctx->child = child;
373
374	tweak = crypto_alloc_cipher(ictx->name, 0, 0);
375	if (IS_ERR(tweak)) {
376		crypto_free_skcipher(ctx->child);
377		return PTR_ERR(tweak);
378	}
379
380	ctx->tweak = tweak;
381
382	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
383					 sizeof(struct rctx));
384
385	return 0;
386}
387
388static void exit_tfm(struct crypto_skcipher *tfm)
389{
390	struct priv *ctx = crypto_skcipher_ctx(tfm);
391
392	crypto_free_skcipher(ctx->child);
393	crypto_free_cipher(ctx->tweak);
394}
395
396static void free(struct skcipher_instance *inst)
397{
398	crypto_drop_skcipher(skcipher_instance_ctx(inst));
399	kfree(inst);
400}
401
402static int create(struct crypto_template *tmpl, struct rtattr **tb)
403{
404	struct skcipher_instance *inst;
405	struct crypto_attr_type *algt;
406	struct xts_instance_ctx *ctx;
407	struct skcipher_alg *alg;
408	const char *cipher_name;
409	u32 mask;
410	int err;
411
412	algt = crypto_get_attr_type(tb);
413	if (IS_ERR(algt))
414		return PTR_ERR(algt);
415
416	if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
417		return -EINVAL;
418
419	cipher_name = crypto_attr_alg_name(tb[1]);
420	if (IS_ERR(cipher_name))
421		return PTR_ERR(cipher_name);
422
423	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
424	if (!inst)
425		return -ENOMEM;
426
427	ctx = skcipher_instance_ctx(inst);
428
429	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
430
431	mask = crypto_requires_off(algt->type, algt->mask,
432				   CRYPTO_ALG_NEED_FALLBACK |
433				   CRYPTO_ALG_ASYNC);
434
435	err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
436	if (err == -ENOENT) {
437		err = -ENAMETOOLONG;
438		if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
439			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
440			goto err_free_inst;
441
442		err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
443	}
444
445	if (err)
446		goto err_free_inst;
447
448	alg = crypto_skcipher_spawn_alg(&ctx->spawn);
449
450	err = -EINVAL;
451	if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
452		goto err_drop_spawn;
453
454	if (crypto_skcipher_alg_ivsize(alg))
455		goto err_drop_spawn;
456
457	err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
458				  &alg->base);
459	if (err)
460		goto err_drop_spawn;
461
462	err = -EINVAL;
463	cipher_name = alg->base.cra_name;
464
465	/* Alas we screwed up the naming so we have to mangle the
466	 * cipher name.
467	 */
468	if (!strncmp(cipher_name, "ecb(", 4)) {
469		unsigned len;
470
471		len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
472		if (len < 2 || len >= sizeof(ctx->name))
473			goto err_drop_spawn;
474
475		if (ctx->name[len - 1] != ')')
476			goto err_drop_spawn;
477
478		ctx->name[len - 1] = 0;
479
480		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
481			     "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
482			err = -ENAMETOOLONG;
483			goto err_drop_spawn;
484		}
485	} else
486		goto err_drop_spawn;
487
488	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
489	inst->alg.base.cra_priority = alg->base.cra_priority;
490	inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
491	inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
492				       (__alignof__(u64) - 1);
493
494	inst->alg.ivsize = XTS_BLOCK_SIZE;
495	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
496	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
497
498	inst->alg.base.cra_ctxsize = sizeof(struct priv);
499
500	inst->alg.init = init_tfm;
501	inst->alg.exit = exit_tfm;
502
503	inst->alg.setkey = setkey;
504	inst->alg.encrypt = encrypt;
505	inst->alg.decrypt = decrypt;
506
507	inst->free = free;
508
509	err = skcipher_register_instance(tmpl, inst);
510	if (err)
511		goto err_drop_spawn;
512
513out:
514	return err;
515
516err_drop_spawn:
517	crypto_drop_skcipher(&ctx->spawn);
518err_free_inst:
519	kfree(inst);
520	goto out;
521}
522
523static struct crypto_template crypto_tmpl = {
524	.name = "xts",
525	.create = create,
526	.module = THIS_MODULE,
527};
528
529static int __init crypto_module_init(void)
530{
531	return crypto_register_template(&crypto_tmpl);
532}
533
534static void __exit crypto_module_exit(void)
535{
536	crypto_unregister_template(&crypto_tmpl);
537}
538
539module_init(crypto_module_init);
540module_exit(crypto_module_exit);
541
542MODULE_LICENSE("GPL");
543MODULE_DESCRIPTION("XTS block cipher mode");
544MODULE_ALIAS_CRYPTO("xts");