Loading...
1/* XTS: as defined in IEEE1619/D16
2 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
3 * (sector sizes which are not a multiple of 16 bytes are,
4 * however currently unsupported)
5 *
6 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
7 *
8 * Based om ecb.c
9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 */
16#include <crypto/algapi.h>
17#include <linux/err.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/slab.h>
23
24#include <crypto/xts.h>
25#include <crypto/b128ops.h>
26#include <crypto/gf128mul.h>
27
28struct priv {
29 struct crypto_cipher *child;
30 struct crypto_cipher *tweak;
31};
32
33static int setkey(struct crypto_tfm *parent, const u8 *key,
34 unsigned int keylen)
35{
36 struct priv *ctx = crypto_tfm_ctx(parent);
37 struct crypto_cipher *child = ctx->tweak;
38 int err;
39
40 err = xts_check_key(parent, key, keylen);
41 if (err)
42 return err;
43
44 /* we need two cipher instances: one to compute the initial 'tweak'
45 * by encrypting the IV (usually the 'plain' iv) and the other
46 * one to encrypt and decrypt the data */
47
48 /* tweak cipher, uses Key2 i.e. the second half of *key */
49 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
50 crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
51 CRYPTO_TFM_REQ_MASK);
52 err = crypto_cipher_setkey(child, key + keylen/2, keylen/2);
53 if (err)
54 return err;
55
56 crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
57 CRYPTO_TFM_RES_MASK);
58
59 child = ctx->child;
60
61 /* data cipher, uses Key1 i.e. the first half of *key */
62 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
63 crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
64 CRYPTO_TFM_REQ_MASK);
65 err = crypto_cipher_setkey(child, key, keylen/2);
66 if (err)
67 return err;
68
69 crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
70 CRYPTO_TFM_RES_MASK);
71
72 return 0;
73}
74
75struct sinfo {
76 be128 *t;
77 struct crypto_tfm *tfm;
78 void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
79};
80
81static inline void xts_round(struct sinfo *s, void *dst, const void *src)
82{
83 be128_xor(dst, s->t, src); /* PP <- T xor P */
84 s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */
85 be128_xor(dst, dst, s->t); /* C <- T xor CC */
86}
87
88static int crypt(struct blkcipher_desc *d,
89 struct blkcipher_walk *w, struct priv *ctx,
90 void (*tw)(struct crypto_tfm *, u8 *, const u8 *),
91 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
92{
93 int err;
94 unsigned int avail;
95 const int bs = XTS_BLOCK_SIZE;
96 struct sinfo s = {
97 .tfm = crypto_cipher_tfm(ctx->child),
98 .fn = fn
99 };
100 u8 *wsrc;
101 u8 *wdst;
102
103 err = blkcipher_walk_virt(d, w);
104 if (!w->nbytes)
105 return err;
106
107 s.t = (be128 *)w->iv;
108 avail = w->nbytes;
109
110 wsrc = w->src.virt.addr;
111 wdst = w->dst.virt.addr;
112
113 /* calculate first value of T */
114 tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv);
115
116 goto first;
117
118 for (;;) {
119 do {
120 gf128mul_x_ble(s.t, s.t);
121
122first:
123 xts_round(&s, wdst, wsrc);
124
125 wsrc += bs;
126 wdst += bs;
127 } while ((avail -= bs) >= bs);
128
129 err = blkcipher_walk_done(d, w, avail);
130 if (!w->nbytes)
131 break;
132
133 avail = w->nbytes;
134
135 wsrc = w->src.virt.addr;
136 wdst = w->dst.virt.addr;
137 }
138
139 return err;
140}
141
142static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
143 struct scatterlist *src, unsigned int nbytes)
144{
145 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
146 struct blkcipher_walk w;
147
148 blkcipher_walk_init(&w, dst, src, nbytes);
149 return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
150 crypto_cipher_alg(ctx->child)->cia_encrypt);
151}
152
153static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
154 struct scatterlist *src, unsigned int nbytes)
155{
156 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
157 struct blkcipher_walk w;
158
159 blkcipher_walk_init(&w, dst, src, nbytes);
160 return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
161 crypto_cipher_alg(ctx->child)->cia_decrypt);
162}
163
164int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
165 struct scatterlist *ssrc, unsigned int nbytes,
166 struct xts_crypt_req *req)
167{
168 const unsigned int bsize = XTS_BLOCK_SIZE;
169 const unsigned int max_blks = req->tbuflen / bsize;
170 struct blkcipher_walk walk;
171 unsigned int nblocks;
172 be128 *src, *dst, *t;
173 be128 *t_buf = req->tbuf;
174 int err, i;
175
176 BUG_ON(max_blks < 1);
177
178 blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
179
180 err = blkcipher_walk_virt(desc, &walk);
181 nbytes = walk.nbytes;
182 if (!nbytes)
183 return err;
184
185 nblocks = min(nbytes / bsize, max_blks);
186 src = (be128 *)walk.src.virt.addr;
187 dst = (be128 *)walk.dst.virt.addr;
188
189 /* calculate first value of T */
190 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
191
192 i = 0;
193 goto first;
194
195 for (;;) {
196 do {
197 for (i = 0; i < nblocks; i++) {
198 gf128mul_x_ble(&t_buf[i], t);
199first:
200 t = &t_buf[i];
201
202 /* PP <- T xor P */
203 be128_xor(dst + i, t, src + i);
204 }
205
206 /* CC <- E(Key2,PP) */
207 req->crypt_fn(req->crypt_ctx, (u8 *)dst,
208 nblocks * bsize);
209
210 /* C <- T xor CC */
211 for (i = 0; i < nblocks; i++)
212 be128_xor(dst + i, dst + i, &t_buf[i]);
213
214 src += nblocks;
215 dst += nblocks;
216 nbytes -= nblocks * bsize;
217 nblocks = min(nbytes / bsize, max_blks);
218 } while (nblocks > 0);
219
220 *(be128 *)walk.iv = *t;
221
222 err = blkcipher_walk_done(desc, &walk, nbytes);
223 nbytes = walk.nbytes;
224 if (!nbytes)
225 break;
226
227 nblocks = min(nbytes / bsize, max_blks);
228 src = (be128 *)walk.src.virt.addr;
229 dst = (be128 *)walk.dst.virt.addr;
230 }
231
232 return err;
233}
234EXPORT_SYMBOL_GPL(xts_crypt);
235
236static int init_tfm(struct crypto_tfm *tfm)
237{
238 struct crypto_cipher *cipher;
239 struct crypto_instance *inst = (void *)tfm->__crt_alg;
240 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
241 struct priv *ctx = crypto_tfm_ctx(tfm);
242 u32 *flags = &tfm->crt_flags;
243
244 cipher = crypto_spawn_cipher(spawn);
245 if (IS_ERR(cipher))
246 return PTR_ERR(cipher);
247
248 if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
249 *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
250 crypto_free_cipher(cipher);
251 return -EINVAL;
252 }
253
254 ctx->child = cipher;
255
256 cipher = crypto_spawn_cipher(spawn);
257 if (IS_ERR(cipher)) {
258 crypto_free_cipher(ctx->child);
259 return PTR_ERR(cipher);
260 }
261
262 /* this check isn't really needed, leave it here just in case */
263 if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
264 crypto_free_cipher(cipher);
265 crypto_free_cipher(ctx->child);
266 *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
267 return -EINVAL;
268 }
269
270 ctx->tweak = cipher;
271
272 return 0;
273}
274
275static void exit_tfm(struct crypto_tfm *tfm)
276{
277 struct priv *ctx = crypto_tfm_ctx(tfm);
278 crypto_free_cipher(ctx->child);
279 crypto_free_cipher(ctx->tweak);
280}
281
282static struct crypto_instance *alloc(struct rtattr **tb)
283{
284 struct crypto_instance *inst;
285 struct crypto_alg *alg;
286 int err;
287
288 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
289 if (err)
290 return ERR_PTR(err);
291
292 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
293 CRYPTO_ALG_TYPE_MASK);
294 if (IS_ERR(alg))
295 return ERR_CAST(alg);
296
297 inst = crypto_alloc_instance("xts", alg);
298 if (IS_ERR(inst))
299 goto out_put_alg;
300
301 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
302 inst->alg.cra_priority = alg->cra_priority;
303 inst->alg.cra_blocksize = alg->cra_blocksize;
304
305 if (alg->cra_alignmask < 7)
306 inst->alg.cra_alignmask = 7;
307 else
308 inst->alg.cra_alignmask = alg->cra_alignmask;
309
310 inst->alg.cra_type = &crypto_blkcipher_type;
311
312 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
313 inst->alg.cra_blkcipher.min_keysize =
314 2 * alg->cra_cipher.cia_min_keysize;
315 inst->alg.cra_blkcipher.max_keysize =
316 2 * alg->cra_cipher.cia_max_keysize;
317
318 inst->alg.cra_ctxsize = sizeof(struct priv);
319
320 inst->alg.cra_init = init_tfm;
321 inst->alg.cra_exit = exit_tfm;
322
323 inst->alg.cra_blkcipher.setkey = setkey;
324 inst->alg.cra_blkcipher.encrypt = encrypt;
325 inst->alg.cra_blkcipher.decrypt = decrypt;
326
327out_put_alg:
328 crypto_mod_put(alg);
329 return inst;
330}
331
332static void free(struct crypto_instance *inst)
333{
334 crypto_drop_spawn(crypto_instance_ctx(inst));
335 kfree(inst);
336}
337
338static struct crypto_template crypto_tmpl = {
339 .name = "xts",
340 .alloc = alloc,
341 .free = free,
342 .module = THIS_MODULE,
343};
344
345static int __init crypto_module_init(void)
346{
347 return crypto_register_template(&crypto_tmpl);
348}
349
350static void __exit crypto_module_exit(void)
351{
352 crypto_unregister_template(&crypto_tmpl);
353}
354
355module_init(crypto_module_init);
356module_exit(crypto_module_exit);
357
358MODULE_LICENSE("GPL");
359MODULE_DESCRIPTION("XTS block cipher mode");
360MODULE_ALIAS_CRYPTO("xts");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* XTS: as defined in IEEE1619/D16
3 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
4 *
5 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
6 *
7 * Based on ecb.c
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 */
10#include <crypto/internal/skcipher.h>
11#include <crypto/scatterwalk.h>
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/scatterlist.h>
17#include <linux/slab.h>
18
19#include <crypto/xts.h>
20#include <crypto/b128ops.h>
21#include <crypto/gf128mul.h>
22
23struct xts_tfm_ctx {
24 struct crypto_skcipher *child;
25 struct crypto_cipher *tweak;
26};
27
28struct xts_instance_ctx {
29 struct crypto_skcipher_spawn spawn;
30 char name[CRYPTO_MAX_ALG_NAME];
31};
32
33struct xts_request_ctx {
34 le128 t;
35 struct scatterlist *tail;
36 struct scatterlist sg[2];
37 struct skcipher_request subreq;
38};
39
40static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
41 unsigned int keylen)
42{
43 struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
44 struct crypto_skcipher *child;
45 struct crypto_cipher *tweak;
46 int err;
47
48 err = xts_verify_key(parent, key, keylen);
49 if (err)
50 return err;
51
52 keylen /= 2;
53
54 /* we need two cipher instances: one to compute the initial 'tweak'
55 * by encrypting the IV (usually the 'plain' iv) and the other
56 * one to encrypt and decrypt the data */
57
58 /* tweak cipher, uses Key2 i.e. the second half of *key */
59 tweak = ctx->tweak;
60 crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
61 crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
62 CRYPTO_TFM_REQ_MASK);
63 err = crypto_cipher_setkey(tweak, key + keylen, keylen);
64 if (err)
65 return err;
66
67 /* data cipher, uses Key1 i.e. the first half of *key */
68 child = ctx->child;
69 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
70 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
71 CRYPTO_TFM_REQ_MASK);
72 return crypto_skcipher_setkey(child, key, keylen);
73}
74
75/*
76 * We compute the tweak masks twice (both before and after the ECB encryption or
77 * decryption) to avoid having to allocate a temporary buffer and/or make
78 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
79 * just doing the gf128mul_x_ble() calls again.
80 */
81static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
82 bool enc)
83{
84 struct xts_request_ctx *rctx = skcipher_request_ctx(req);
85 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
86 const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
87 const int bs = XTS_BLOCK_SIZE;
88 struct skcipher_walk w;
89 le128 t = rctx->t;
90 int err;
91
92 if (second_pass) {
93 req = &rctx->subreq;
94 /* set to our TFM to enforce correct alignment: */
95 skcipher_request_set_tfm(req, tfm);
96 }
97 err = skcipher_walk_virt(&w, req, false);
98
99 while (w.nbytes) {
100 unsigned int avail = w.nbytes;
101 le128 *wsrc;
102 le128 *wdst;
103
104 wsrc = w.src.virt.addr;
105 wdst = w.dst.virt.addr;
106
107 do {
108 if (unlikely(cts) &&
109 w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
110 if (!enc) {
111 if (second_pass)
112 rctx->t = t;
113 gf128mul_x_ble(&t, &t);
114 }
115 le128_xor(wdst, &t, wsrc);
116 if (enc && second_pass)
117 gf128mul_x_ble(&rctx->t, &t);
118 skcipher_walk_done(&w, avail - bs);
119 return 0;
120 }
121
122 le128_xor(wdst++, &t, wsrc++);
123 gf128mul_x_ble(&t, &t);
124 } while ((avail -= bs) >= bs);
125
126 err = skcipher_walk_done(&w, avail);
127 }
128
129 return err;
130}
131
132static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
133{
134 return xts_xor_tweak(req, false, enc);
135}
136
137static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
138{
139 return xts_xor_tweak(req, true, enc);
140}
141
142static void xts_cts_done(struct crypto_async_request *areq, int err)
143{
144 struct skcipher_request *req = areq->data;
145 le128 b;
146
147 if (!err) {
148 struct xts_request_ctx *rctx = skcipher_request_ctx(req);
149
150 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
151 le128_xor(&b, &rctx->t, &b);
152 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
153 }
154
155 skcipher_request_complete(req, err);
156}
157
158static int xts_cts_final(struct skcipher_request *req,
159 int (*crypt)(struct skcipher_request *req))
160{
161 const struct xts_tfm_ctx *ctx =
162 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
163 int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
164 struct xts_request_ctx *rctx = skcipher_request_ctx(req);
165 struct skcipher_request *subreq = &rctx->subreq;
166 int tail = req->cryptlen % XTS_BLOCK_SIZE;
167 le128 b[2];
168 int err;
169
170 rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
171 offset - XTS_BLOCK_SIZE);
172
173 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
174 b[1] = b[0];
175 scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
176
177 le128_xor(b, &rctx->t, b);
178
179 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
180
181 skcipher_request_set_tfm(subreq, ctx->child);
182 skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
183 req);
184 skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
185 XTS_BLOCK_SIZE, NULL);
186
187 err = crypt(subreq);
188 if (err)
189 return err;
190
191 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
192 le128_xor(b, &rctx->t, b);
193 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
194
195 return 0;
196}
197
198static void xts_encrypt_done(struct crypto_async_request *areq, int err)
199{
200 struct skcipher_request *req = areq->data;
201
202 if (!err) {
203 struct xts_request_ctx *rctx = skcipher_request_ctx(req);
204
205 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
206 err = xts_xor_tweak_post(req, true);
207
208 if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
209 err = xts_cts_final(req, crypto_skcipher_encrypt);
210 if (err == -EINPROGRESS)
211 return;
212 }
213 }
214
215 skcipher_request_complete(req, err);
216}
217
218static void xts_decrypt_done(struct crypto_async_request *areq, int err)
219{
220 struct skcipher_request *req = areq->data;
221
222 if (!err) {
223 struct xts_request_ctx *rctx = skcipher_request_ctx(req);
224
225 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
226 err = xts_xor_tweak_post(req, false);
227
228 if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
229 err = xts_cts_final(req, crypto_skcipher_decrypt);
230 if (err == -EINPROGRESS)
231 return;
232 }
233 }
234
235 skcipher_request_complete(req, err);
236}
237
238static int xts_init_crypt(struct skcipher_request *req,
239 crypto_completion_t compl)
240{
241 const struct xts_tfm_ctx *ctx =
242 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
243 struct xts_request_ctx *rctx = skcipher_request_ctx(req);
244 struct skcipher_request *subreq = &rctx->subreq;
245
246 if (req->cryptlen < XTS_BLOCK_SIZE)
247 return -EINVAL;
248
249 skcipher_request_set_tfm(subreq, ctx->child);
250 skcipher_request_set_callback(subreq, req->base.flags, compl, req);
251 skcipher_request_set_crypt(subreq, req->dst, req->dst,
252 req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
253
254 /* calculate first value of T */
255 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
256
257 return 0;
258}
259
260static int xts_encrypt(struct skcipher_request *req)
261{
262 struct xts_request_ctx *rctx = skcipher_request_ctx(req);
263 struct skcipher_request *subreq = &rctx->subreq;
264 int err;
265
266 err = xts_init_crypt(req, xts_encrypt_done) ?:
267 xts_xor_tweak_pre(req, true) ?:
268 crypto_skcipher_encrypt(subreq) ?:
269 xts_xor_tweak_post(req, true);
270
271 if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
272 return err;
273
274 return xts_cts_final(req, crypto_skcipher_encrypt);
275}
276
277static int xts_decrypt(struct skcipher_request *req)
278{
279 struct xts_request_ctx *rctx = skcipher_request_ctx(req);
280 struct skcipher_request *subreq = &rctx->subreq;
281 int err;
282
283 err = xts_init_crypt(req, xts_decrypt_done) ?:
284 xts_xor_tweak_pre(req, false) ?:
285 crypto_skcipher_decrypt(subreq) ?:
286 xts_xor_tweak_post(req, false);
287
288 if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
289 return err;
290
291 return xts_cts_final(req, crypto_skcipher_decrypt);
292}
293
294static int xts_init_tfm(struct crypto_skcipher *tfm)
295{
296 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
297 struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
298 struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
299 struct crypto_skcipher *child;
300 struct crypto_cipher *tweak;
301
302 child = crypto_spawn_skcipher(&ictx->spawn);
303 if (IS_ERR(child))
304 return PTR_ERR(child);
305
306 ctx->child = child;
307
308 tweak = crypto_alloc_cipher(ictx->name, 0, 0);
309 if (IS_ERR(tweak)) {
310 crypto_free_skcipher(ctx->child);
311 return PTR_ERR(tweak);
312 }
313
314 ctx->tweak = tweak;
315
316 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
317 sizeof(struct xts_request_ctx));
318
319 return 0;
320}
321
322static void xts_exit_tfm(struct crypto_skcipher *tfm)
323{
324 struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
325
326 crypto_free_skcipher(ctx->child);
327 crypto_free_cipher(ctx->tweak);
328}
329
330static void xts_free_instance(struct skcipher_instance *inst)
331{
332 struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
333
334 crypto_drop_skcipher(&ictx->spawn);
335 kfree(inst);
336}
337
338static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
339{
340 struct skcipher_instance *inst;
341 struct xts_instance_ctx *ctx;
342 struct skcipher_alg *alg;
343 const char *cipher_name;
344 u32 mask;
345 int err;
346
347 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
348 if (err)
349 return err;
350
351 cipher_name = crypto_attr_alg_name(tb[1]);
352 if (IS_ERR(cipher_name))
353 return PTR_ERR(cipher_name);
354
355 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
356 if (!inst)
357 return -ENOMEM;
358
359 ctx = skcipher_instance_ctx(inst);
360
361 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
362 cipher_name, 0, mask);
363 if (err == -ENOENT) {
364 err = -ENAMETOOLONG;
365 if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
366 cipher_name) >= CRYPTO_MAX_ALG_NAME)
367 goto err_free_inst;
368
369 err = crypto_grab_skcipher(&ctx->spawn,
370 skcipher_crypto_instance(inst),
371 ctx->name, 0, mask);
372 }
373
374 if (err)
375 goto err_free_inst;
376
377 alg = crypto_skcipher_spawn_alg(&ctx->spawn);
378
379 err = -EINVAL;
380 if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
381 goto err_free_inst;
382
383 if (crypto_skcipher_alg_ivsize(alg))
384 goto err_free_inst;
385
386 err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
387 &alg->base);
388 if (err)
389 goto err_free_inst;
390
391 err = -EINVAL;
392 cipher_name = alg->base.cra_name;
393
394 /* Alas we screwed up the naming so we have to mangle the
395 * cipher name.
396 */
397 if (!strncmp(cipher_name, "ecb(", 4)) {
398 unsigned len;
399
400 len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
401 if (len < 2 || len >= sizeof(ctx->name))
402 goto err_free_inst;
403
404 if (ctx->name[len - 1] != ')')
405 goto err_free_inst;
406
407 ctx->name[len - 1] = 0;
408
409 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
410 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
411 err = -ENAMETOOLONG;
412 goto err_free_inst;
413 }
414 } else
415 goto err_free_inst;
416
417 inst->alg.base.cra_priority = alg->base.cra_priority;
418 inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
419 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
420 (__alignof__(u64) - 1);
421
422 inst->alg.ivsize = XTS_BLOCK_SIZE;
423 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
424 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
425
426 inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
427
428 inst->alg.init = xts_init_tfm;
429 inst->alg.exit = xts_exit_tfm;
430
431 inst->alg.setkey = xts_setkey;
432 inst->alg.encrypt = xts_encrypt;
433 inst->alg.decrypt = xts_decrypt;
434
435 inst->free = xts_free_instance;
436
437 err = skcipher_register_instance(tmpl, inst);
438 if (err) {
439err_free_inst:
440 xts_free_instance(inst);
441 }
442 return err;
443}
444
445static struct crypto_template xts_tmpl = {
446 .name = "xts",
447 .create = xts_create,
448 .module = THIS_MODULE,
449};
450
451static int __init xts_module_init(void)
452{
453 return crypto_register_template(&xts_tmpl);
454}
455
456static void __exit xts_module_exit(void)
457{
458 crypto_unregister_template(&xts_tmpl);
459}
460
461subsys_initcall(xts_module_init);
462module_exit(xts_module_exit);
463
464MODULE_LICENSE("GPL");
465MODULE_DESCRIPTION("XTS block cipher mode");
466MODULE_ALIAS_CRYPTO("xts");