Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* XTS: as defined in IEEE1619/D16
  3 *	http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
  4 *
  5 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
  6 *
  7 * Based on ecb.c
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9 */
 10#include <crypto/internal/skcipher.h>
 11#include <crypto/scatterwalk.h>
 12#include <linux/err.h>
 13#include <linux/init.h>
 14#include <linux/kernel.h>
 15#include <linux/module.h>
 16#include <linux/scatterlist.h>
 17#include <linux/slab.h>
 18
 19#include <crypto/xts.h>
 20#include <crypto/b128ops.h>
 21#include <crypto/gf128mul.h>
 22
 23struct priv {
 24	struct crypto_skcipher *child;
 25	struct crypto_cipher *tweak;
 26};
 27
 28struct xts_instance_ctx {
 29	struct crypto_skcipher_spawn spawn;
 30	char name[CRYPTO_MAX_ALG_NAME];
 31};
 32
 33struct rctx {
 34	le128 t;
 35	struct scatterlist *tail;
 36	struct scatterlist sg[2];
 37	struct skcipher_request subreq;
 38};
 39
 40static int setkey(struct crypto_skcipher *parent, const u8 *key,
 41		  unsigned int keylen)
 42{
 43	struct priv *ctx = crypto_skcipher_ctx(parent);
 44	struct crypto_skcipher *child;
 45	struct crypto_cipher *tweak;
 46	int err;
 47
 48	err = xts_verify_key(parent, key, keylen);
 49	if (err)
 50		return err;
 51
 52	keylen /= 2;
 53
 54	/* we need two cipher instances: one to compute the initial 'tweak'
 55	 * by encrypting the IV (usually the 'plain' iv) and the other
 56	 * one to encrypt and decrypt the data */
 57
 58	/* tweak cipher, uses Key2 i.e. the second half of *key */
 59	tweak = ctx->tweak;
 60	crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
 61	crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
 62				       CRYPTO_TFM_REQ_MASK);
 63	err = crypto_cipher_setkey(tweak, key + keylen, keylen);
 64	crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
 65					  CRYPTO_TFM_RES_MASK);
 66	if (err)
 67		return err;
 68
 69	/* data cipher, uses Key1 i.e. the first half of *key */
 70	child = ctx->child;
 71	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 72	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 73					 CRYPTO_TFM_REQ_MASK);
 74	err = crypto_skcipher_setkey(child, key, keylen);
 75	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
 76					  CRYPTO_TFM_RES_MASK);
 77
 78	return err;
 79}
 80
 81/*
 82 * We compute the tweak masks twice (both before and after the ECB encryption or
 83 * decryption) to avoid having to allocate a temporary buffer and/or make
 84 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
 85 * just doing the gf128mul_x_ble() calls again.
 86 */
 87static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
 
 88{
 89	struct rctx *rctx = skcipher_request_ctx(req);
 90	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 91	const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
 92	const int bs = XTS_BLOCK_SIZE;
 93	struct skcipher_walk w;
 94	le128 t = rctx->t;
 95	int err;
 96
 97	if (second_pass) {
 98		req = &rctx->subreq;
 99		/* set to our TFM to enforce correct alignment: */
100		skcipher_request_set_tfm(req, tfm);
101	}
102	err = skcipher_walk_virt(&w, req, false);
103
104	while (w.nbytes) {
105		unsigned int avail = w.nbytes;
106		le128 *wsrc;
107		le128 *wdst;
108
109		wsrc = w.src.virt.addr;
110		wdst = w.dst.virt.addr;
111
112		do {
113			if (unlikely(cts) &&
114			    w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
115				if (!enc) {
116					if (second_pass)
117						rctx->t = t;
118					gf128mul_x_ble(&t, &t);
119				}
120				le128_xor(wdst, &t, wsrc);
121				if (enc && second_pass)
122					gf128mul_x_ble(&rctx->t, &t);
123				skcipher_walk_done(&w, avail - bs);
124				return 0;
125			}
126
127			le128_xor(wdst++, &t, wsrc++);
128			gf128mul_x_ble(&t, &t);
129		} while ((avail -= bs) >= bs);
130
131		err = skcipher_walk_done(&w, avail);
132	}
133
134	return err;
135}
136
137static int xor_tweak_pre(struct skcipher_request *req, bool enc)
138{
139	return xor_tweak(req, false, enc);
140}
141
142static int xor_tweak_post(struct skcipher_request *req, bool enc)
143{
144	return xor_tweak(req, true, enc);
145}
146
147static void cts_done(struct crypto_async_request *areq, int err)
148{
149	struct skcipher_request *req = areq->data;
150	le128 b;
151
152	if (!err) {
153		struct rctx *rctx = skcipher_request_ctx(req);
154
155		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
156		le128_xor(&b, &rctx->t, &b);
157		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
158	}
159
160	skcipher_request_complete(req, err);
161}
162
163static int cts_final(struct skcipher_request *req,
164		     int (*crypt)(struct skcipher_request *req))
165{
166	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 
167	int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
168	struct rctx *rctx = skcipher_request_ctx(req);
169	struct skcipher_request *subreq = &rctx->subreq;
170	int tail = req->cryptlen % XTS_BLOCK_SIZE;
171	le128 b[2];
172	int err;
173
174	rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
175				      offset - XTS_BLOCK_SIZE);
176
177	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
178	memcpy(b + 1, b, tail);
179	scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
180
181	le128_xor(b, &rctx->t, b);
182
183	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
184
185	skcipher_request_set_tfm(subreq, ctx->child);
186	skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
 
187	skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
188				   XTS_BLOCK_SIZE, NULL);
189
190	err = crypt(subreq);
191	if (err)
192		return err;
193
194	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
195	le128_xor(b, &rctx->t, b);
196	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
197
198	return 0;
199}
200
201static void encrypt_done(struct crypto_async_request *areq, int err)
202{
203	struct skcipher_request *req = areq->data;
204
205	if (!err) {
206		struct rctx *rctx = skcipher_request_ctx(req);
207
208		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
209		err = xor_tweak_post(req, true);
210
211		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
212			err = cts_final(req, crypto_skcipher_encrypt);
213			if (err == -EINPROGRESS)
214				return;
215		}
216	}
217
218	skcipher_request_complete(req, err);
219}
220
221static void decrypt_done(struct crypto_async_request *areq, int err)
222{
223	struct skcipher_request *req = areq->data;
224
225	if (!err) {
226		struct rctx *rctx = skcipher_request_ctx(req);
227
228		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
229		err = xor_tweak_post(req, false);
230
231		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
232			err = cts_final(req, crypto_skcipher_decrypt);
233			if (err == -EINPROGRESS)
234				return;
235		}
236	}
237
238	skcipher_request_complete(req, err);
239}
240
241static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
 
242{
243	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
244	struct rctx *rctx = skcipher_request_ctx(req);
 
245	struct skcipher_request *subreq = &rctx->subreq;
246
247	if (req->cryptlen < XTS_BLOCK_SIZE)
248		return -EINVAL;
249
250	skcipher_request_set_tfm(subreq, ctx->child);
251	skcipher_request_set_callback(subreq, req->base.flags, compl, req);
252	skcipher_request_set_crypt(subreq, req->dst, req->dst,
253				   req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
254
255	/* calculate first value of T */
256	crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
257
258	return 0;
259}
260
261static int encrypt(struct skcipher_request *req)
262{
263	struct rctx *rctx = skcipher_request_ctx(req);
264	struct skcipher_request *subreq = &rctx->subreq;
265	int err;
266
267	err = init_crypt(req, encrypt_done) ?:
268	      xor_tweak_pre(req, true) ?:
269	      crypto_skcipher_encrypt(subreq) ?:
270	      xor_tweak_post(req, true);
271
272	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
273		return err;
274
275	return cts_final(req, crypto_skcipher_encrypt);
276}
277
278static int decrypt(struct skcipher_request *req)
279{
280	struct rctx *rctx = skcipher_request_ctx(req);
281	struct skcipher_request *subreq = &rctx->subreq;
282	int err;
283
284	err = init_crypt(req, decrypt_done) ?:
285	      xor_tweak_pre(req, false) ?:
286	      crypto_skcipher_decrypt(subreq) ?:
287	      xor_tweak_post(req, false);
288
289	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
290		return err;
291
292	return cts_final(req, crypto_skcipher_decrypt);
293}
294
295static int init_tfm(struct crypto_skcipher *tfm)
296{
297	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
298	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
299	struct priv *ctx = crypto_skcipher_ctx(tfm);
300	struct crypto_skcipher *child;
301	struct crypto_cipher *tweak;
302
303	child = crypto_spawn_skcipher(&ictx->spawn);
304	if (IS_ERR(child))
305		return PTR_ERR(child);
306
307	ctx->child = child;
308
309	tweak = crypto_alloc_cipher(ictx->name, 0, 0);
310	if (IS_ERR(tweak)) {
311		crypto_free_skcipher(ctx->child);
312		return PTR_ERR(tweak);
313	}
314
315	ctx->tweak = tweak;
316
317	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
318					 sizeof(struct rctx));
319
320	return 0;
321}
322
323static void exit_tfm(struct crypto_skcipher *tfm)
324{
325	struct priv *ctx = crypto_skcipher_ctx(tfm);
326
327	crypto_free_skcipher(ctx->child);
328	crypto_free_cipher(ctx->tweak);
329}
330
331static void free(struct skcipher_instance *inst)
332{
333	crypto_drop_skcipher(skcipher_instance_ctx(inst));
 
 
334	kfree(inst);
335}
336
337static int create(struct crypto_template *tmpl, struct rtattr **tb)
338{
339	struct skcipher_instance *inst;
340	struct crypto_attr_type *algt;
341	struct xts_instance_ctx *ctx;
342	struct skcipher_alg *alg;
343	const char *cipher_name;
344	u32 mask;
345	int err;
346
347	algt = crypto_get_attr_type(tb);
348	if (IS_ERR(algt))
349		return PTR_ERR(algt);
350
351	if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
352		return -EINVAL;
353
354	cipher_name = crypto_attr_alg_name(tb[1]);
355	if (IS_ERR(cipher_name))
356		return PTR_ERR(cipher_name);
357
358	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
359	if (!inst)
360		return -ENOMEM;
361
362	ctx = skcipher_instance_ctx(inst);
363
364	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
365
366	mask = crypto_requires_off(algt->type, algt->mask,
367				   CRYPTO_ALG_NEED_FALLBACK |
368				   CRYPTO_ALG_ASYNC);
369
370	err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
371	if (err == -ENOENT) {
372		err = -ENAMETOOLONG;
373		if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
374			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
375			goto err_free_inst;
376
377		err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
 
 
378	}
379
380	if (err)
381		goto err_free_inst;
382
383	alg = crypto_skcipher_spawn_alg(&ctx->spawn);
384
385	err = -EINVAL;
386	if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
387		goto err_drop_spawn;
388
389	if (crypto_skcipher_alg_ivsize(alg))
390		goto err_drop_spawn;
391
392	err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
393				  &alg->base);
394	if (err)
395		goto err_drop_spawn;
396
397	err = -EINVAL;
398	cipher_name = alg->base.cra_name;
399
400	/* Alas we screwed up the naming so we have to mangle the
401	 * cipher name.
402	 */
403	if (!strncmp(cipher_name, "ecb(", 4)) {
404		unsigned len;
405
406		len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
407		if (len < 2 || len >= sizeof(ctx->name))
408			goto err_drop_spawn;
409
410		if (ctx->name[len - 1] != ')')
411			goto err_drop_spawn;
412
413		ctx->name[len - 1] = 0;
414
415		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
416			     "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
417			err = -ENAMETOOLONG;
418			goto err_drop_spawn;
419		}
420	} else
421		goto err_drop_spawn;
422
423	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
424	inst->alg.base.cra_priority = alg->base.cra_priority;
425	inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
426	inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
427				       (__alignof__(u64) - 1);
428
429	inst->alg.ivsize = XTS_BLOCK_SIZE;
430	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
431	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
432
433	inst->alg.base.cra_ctxsize = sizeof(struct priv);
434
435	inst->alg.init = init_tfm;
436	inst->alg.exit = exit_tfm;
437
438	inst->alg.setkey = setkey;
439	inst->alg.encrypt = encrypt;
440	inst->alg.decrypt = decrypt;
441
442	inst->free = free;
443
444	err = skcipher_register_instance(tmpl, inst);
445	if (err)
446		goto err_drop_spawn;
447
448out:
449	return err;
450
451err_drop_spawn:
452	crypto_drop_skcipher(&ctx->spawn);
453err_free_inst:
454	kfree(inst);
455	goto out;
 
456}
457
458static struct crypto_template crypto_tmpl = {
459	.name = "xts",
460	.create = create,
461	.module = THIS_MODULE,
462};
463
464static int __init crypto_module_init(void)
465{
466	return crypto_register_template(&crypto_tmpl);
467}
468
469static void __exit crypto_module_exit(void)
470{
471	crypto_unregister_template(&crypto_tmpl);
472}
473
474subsys_initcall(crypto_module_init);
475module_exit(crypto_module_exit);
476
477MODULE_LICENSE("GPL");
478MODULE_DESCRIPTION("XTS block cipher mode");
479MODULE_ALIAS_CRYPTO("xts");
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* XTS: as defined in IEEE1619/D16
  3 *	http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
  4 *
  5 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
  6 *
  7 * Based on ecb.c
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9 */
 10#include <crypto/internal/skcipher.h>
 11#include <crypto/scatterwalk.h>
 12#include <linux/err.h>
 13#include <linux/init.h>
 14#include <linux/kernel.h>
 15#include <linux/module.h>
 16#include <linux/scatterlist.h>
 17#include <linux/slab.h>
 18
 19#include <crypto/xts.h>
 20#include <crypto/b128ops.h>
 21#include <crypto/gf128mul.h>
 22
 23struct xts_tfm_ctx {
 24	struct crypto_skcipher *child;
 25	struct crypto_cipher *tweak;
 26};
 27
 28struct xts_instance_ctx {
 29	struct crypto_skcipher_spawn spawn;
 30	char name[CRYPTO_MAX_ALG_NAME];
 31};
 32
 33struct xts_request_ctx {
 34	le128 t;
 35	struct scatterlist *tail;
 36	struct scatterlist sg[2];
 37	struct skcipher_request subreq;
 38};
 39
 40static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
 41		      unsigned int keylen)
 42{
 43	struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
 44	struct crypto_skcipher *child;
 45	struct crypto_cipher *tweak;
 46	int err;
 47
 48	err = xts_verify_key(parent, key, keylen);
 49	if (err)
 50		return err;
 51
 52	keylen /= 2;
 53
 54	/* we need two cipher instances: one to compute the initial 'tweak'
 55	 * by encrypting the IV (usually the 'plain' iv) and the other
 56	 * one to encrypt and decrypt the data */
 57
 58	/* tweak cipher, uses Key2 i.e. the second half of *key */
 59	tweak = ctx->tweak;
 60	crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
 61	crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
 62				       CRYPTO_TFM_REQ_MASK);
 63	err = crypto_cipher_setkey(tweak, key + keylen, keylen);
 
 
 64	if (err)
 65		return err;
 66
 67	/* data cipher, uses Key1 i.e. the first half of *key */
 68	child = ctx->child;
 69	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 70	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 71					 CRYPTO_TFM_REQ_MASK);
 72	return crypto_skcipher_setkey(child, key, keylen);
 
 
 
 
 73}
 74
 75/*
 76 * We compute the tweak masks twice (both before and after the ECB encryption or
 77 * decryption) to avoid having to allocate a temporary buffer and/or make
 78 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
 79 * just doing the gf128mul_x_ble() calls again.
 80 */
 81static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
 82			 bool enc)
 83{
 84	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 85	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 86	const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
 87	const int bs = XTS_BLOCK_SIZE;
 88	struct skcipher_walk w;
 89	le128 t = rctx->t;
 90	int err;
 91
 92	if (second_pass) {
 93		req = &rctx->subreq;
 94		/* set to our TFM to enforce correct alignment: */
 95		skcipher_request_set_tfm(req, tfm);
 96	}
 97	err = skcipher_walk_virt(&w, req, false);
 98
 99	while (w.nbytes) {
100		unsigned int avail = w.nbytes;
101		le128 *wsrc;
102		le128 *wdst;
103
104		wsrc = w.src.virt.addr;
105		wdst = w.dst.virt.addr;
106
107		do {
108			if (unlikely(cts) &&
109			    w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
110				if (!enc) {
111					if (second_pass)
112						rctx->t = t;
113					gf128mul_x_ble(&t, &t);
114				}
115				le128_xor(wdst, &t, wsrc);
116				if (enc && second_pass)
117					gf128mul_x_ble(&rctx->t, &t);
118				skcipher_walk_done(&w, avail - bs);
119				return 0;
120			}
121
122			le128_xor(wdst++, &t, wsrc++);
123			gf128mul_x_ble(&t, &t);
124		} while ((avail -= bs) >= bs);
125
126		err = skcipher_walk_done(&w, avail);
127	}
128
129	return err;
130}
131
132static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
133{
134	return xts_xor_tweak(req, false, enc);
135}
136
137static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
138{
139	return xts_xor_tweak(req, true, enc);
140}
141
142static void xts_cts_done(struct crypto_async_request *areq, int err)
143{
144	struct skcipher_request *req = areq->data;
145	le128 b;
146
147	if (!err) {
148		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
149
150		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
151		le128_xor(&b, &rctx->t, &b);
152		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
153	}
154
155	skcipher_request_complete(req, err);
156}
157
158static int xts_cts_final(struct skcipher_request *req,
159			 int (*crypt)(struct skcipher_request *req))
160{
161	const struct xts_tfm_ctx *ctx =
162		crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
163	int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
164	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
165	struct skcipher_request *subreq = &rctx->subreq;
166	int tail = req->cryptlen % XTS_BLOCK_SIZE;
167	le128 b[2];
168	int err;
169
170	rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
171				      offset - XTS_BLOCK_SIZE);
172
173	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
174	b[1] = b[0];
175	scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
176
177	le128_xor(b, &rctx->t, b);
178
179	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
180
181	skcipher_request_set_tfm(subreq, ctx->child);
182	skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
183				      req);
184	skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
185				   XTS_BLOCK_SIZE, NULL);
186
187	err = crypt(subreq);
188	if (err)
189		return err;
190
191	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
192	le128_xor(b, &rctx->t, b);
193	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
194
195	return 0;
196}
197
198static void xts_encrypt_done(struct crypto_async_request *areq, int err)
199{
200	struct skcipher_request *req = areq->data;
201
202	if (!err) {
203		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
204
205		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
206		err = xts_xor_tweak_post(req, true);
207
208		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
209			err = xts_cts_final(req, crypto_skcipher_encrypt);
210			if (err == -EINPROGRESS)
211				return;
212		}
213	}
214
215	skcipher_request_complete(req, err);
216}
217
218static void xts_decrypt_done(struct crypto_async_request *areq, int err)
219{
220	struct skcipher_request *req = areq->data;
221
222	if (!err) {
223		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
224
225		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
226		err = xts_xor_tweak_post(req, false);
227
228		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
229			err = xts_cts_final(req, crypto_skcipher_decrypt);
230			if (err == -EINPROGRESS)
231				return;
232		}
233	}
234
235	skcipher_request_complete(req, err);
236}
237
238static int xts_init_crypt(struct skcipher_request *req,
239			  crypto_completion_t compl)
240{
241	const struct xts_tfm_ctx *ctx =
242		crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
243	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
244	struct skcipher_request *subreq = &rctx->subreq;
245
246	if (req->cryptlen < XTS_BLOCK_SIZE)
247		return -EINVAL;
248
249	skcipher_request_set_tfm(subreq, ctx->child);
250	skcipher_request_set_callback(subreq, req->base.flags, compl, req);
251	skcipher_request_set_crypt(subreq, req->dst, req->dst,
252				   req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
253
254	/* calculate first value of T */
255	crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
256
257	return 0;
258}
259
260static int xts_encrypt(struct skcipher_request *req)
261{
262	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
263	struct skcipher_request *subreq = &rctx->subreq;
264	int err;
265
266	err = xts_init_crypt(req, xts_encrypt_done) ?:
267	      xts_xor_tweak_pre(req, true) ?:
268	      crypto_skcipher_encrypt(subreq) ?:
269	      xts_xor_tweak_post(req, true);
270
271	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
272		return err;
273
274	return xts_cts_final(req, crypto_skcipher_encrypt);
275}
276
277static int xts_decrypt(struct skcipher_request *req)
278{
279	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
280	struct skcipher_request *subreq = &rctx->subreq;
281	int err;
282
283	err = xts_init_crypt(req, xts_decrypt_done) ?:
284	      xts_xor_tweak_pre(req, false) ?:
285	      crypto_skcipher_decrypt(subreq) ?:
286	      xts_xor_tweak_post(req, false);
287
288	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
289		return err;
290
291	return xts_cts_final(req, crypto_skcipher_decrypt);
292}
293
294static int xts_init_tfm(struct crypto_skcipher *tfm)
295{
296	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
297	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
298	struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
299	struct crypto_skcipher *child;
300	struct crypto_cipher *tweak;
301
302	child = crypto_spawn_skcipher(&ictx->spawn);
303	if (IS_ERR(child))
304		return PTR_ERR(child);
305
306	ctx->child = child;
307
308	tweak = crypto_alloc_cipher(ictx->name, 0, 0);
309	if (IS_ERR(tweak)) {
310		crypto_free_skcipher(ctx->child);
311		return PTR_ERR(tweak);
312	}
313
314	ctx->tweak = tweak;
315
316	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
317					 sizeof(struct xts_request_ctx));
318
319	return 0;
320}
321
322static void xts_exit_tfm(struct crypto_skcipher *tfm)
323{
324	struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
325
326	crypto_free_skcipher(ctx->child);
327	crypto_free_cipher(ctx->tweak);
328}
329
330static void xts_free_instance(struct skcipher_instance *inst)
331{
332	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
333
334	crypto_drop_skcipher(&ictx->spawn);
335	kfree(inst);
336}
337
338static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
339{
340	struct skcipher_instance *inst;
 
341	struct xts_instance_ctx *ctx;
342	struct skcipher_alg *alg;
343	const char *cipher_name;
344	u32 mask;
345	int err;
346
347	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
348	if (err)
349		return err;
 
 
 
350
351	cipher_name = crypto_attr_alg_name(tb[1]);
352	if (IS_ERR(cipher_name))
353		return PTR_ERR(cipher_name);
354
355	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
356	if (!inst)
357		return -ENOMEM;
358
359	ctx = skcipher_instance_ctx(inst);
360
361	err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
362				   cipher_name, 0, mask);
 
 
 
 
 
363	if (err == -ENOENT) {
364		err = -ENAMETOOLONG;
365		if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
366			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
367			goto err_free_inst;
368
369		err = crypto_grab_skcipher(&ctx->spawn,
370					   skcipher_crypto_instance(inst),
371					   ctx->name, 0, mask);
372	}
373
374	if (err)
375		goto err_free_inst;
376
377	alg = crypto_skcipher_spawn_alg(&ctx->spawn);
378
379	err = -EINVAL;
380	if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
381		goto err_free_inst;
382
383	if (crypto_skcipher_alg_ivsize(alg))
384		goto err_free_inst;
385
386	err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
387				  &alg->base);
388	if (err)
389		goto err_free_inst;
390
391	err = -EINVAL;
392	cipher_name = alg->base.cra_name;
393
394	/* Alas we screwed up the naming so we have to mangle the
395	 * cipher name.
396	 */
397	if (!strncmp(cipher_name, "ecb(", 4)) {
398		unsigned len;
399
400		len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
401		if (len < 2 || len >= sizeof(ctx->name))
402			goto err_free_inst;
403
404		if (ctx->name[len - 1] != ')')
405			goto err_free_inst;
406
407		ctx->name[len - 1] = 0;
408
409		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
410			     "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
411			err = -ENAMETOOLONG;
412			goto err_free_inst;
413		}
414	} else
415		goto err_free_inst;
416
 
417	inst->alg.base.cra_priority = alg->base.cra_priority;
418	inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
419	inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
420				       (__alignof__(u64) - 1);
421
422	inst->alg.ivsize = XTS_BLOCK_SIZE;
423	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
424	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
425
426	inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
427
428	inst->alg.init = xts_init_tfm;
429	inst->alg.exit = xts_exit_tfm;
430
431	inst->alg.setkey = xts_setkey;
432	inst->alg.encrypt = xts_encrypt;
433	inst->alg.decrypt = xts_decrypt;
434
435	inst->free = xts_free_instance;
436
437	err = skcipher_register_instance(tmpl, inst);
438	if (err) {
 
 
 
 
 
 
 
439err_free_inst:
440		xts_free_instance(inst);
441	}
442	return err;
443}
444
445static struct crypto_template xts_tmpl = {
446	.name = "xts",
447	.create = xts_create,
448	.module = THIS_MODULE,
449};
450
451static int __init xts_module_init(void)
452{
453	return crypto_register_template(&xts_tmpl);
454}
455
456static void __exit xts_module_exit(void)
457{
458	crypto_unregister_template(&xts_tmpl);
459}
460
461subsys_initcall(xts_module_init);
462module_exit(xts_module_exit);
463
464MODULE_LICENSE("GPL");
465MODULE_DESCRIPTION("XTS block cipher mode");
466MODULE_ALIAS_CRYPTO("xts");