Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* LRW: as defined by Cyril Guyot in
  3 *	http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
  4 *
  5 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
  6 *
  7 * Based on ecb.c
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9 */
 10/* This implementation is checked against the test vectors in the above
 11 * document and by a test vector provided by Ken Buchanan at
 12 * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
 13 *
 14 * The test vectors are included in the testing module tcrypt.[ch] */
 15
 16#include <crypto/internal/skcipher.h>
 17#include <crypto/scatterwalk.h>
 18#include <linux/err.h>
 19#include <linux/init.h>
 20#include <linux/kernel.h>
 21#include <linux/module.h>
 22#include <linux/scatterlist.h>
 23#include <linux/slab.h>
 24
 25#include <crypto/b128ops.h>
 26#include <crypto/gf128mul.h>
 27
 28#define LRW_BLOCK_SIZE 16
 29
 30struct lrw_tfm_ctx {
 31	struct crypto_skcipher *child;
 32
 33	/*
 34	 * optimizes multiplying a random (non incrementing, as at the
 35	 * start of a new sector) value with key2, we could also have
 36	 * used 4k optimization tables or no optimization at all. In the
 37	 * latter case we would have to store key2 here
 38	 */
 39	struct gf128mul_64k *table;
 40
 41	/*
 42	 * stores:
 43	 *  key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
 44	 *  key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
 45	 *  key2*{ 0,0,...1,1,1,1,1 }, etc
 46	 * needed for optimized multiplication of incrementing values
 47	 * with key2
 48	 */
 49	be128 mulinc[128];
 50};
 51
 52struct lrw_request_ctx {
 53	be128 t;
 54	struct skcipher_request subreq;
 55};
 56
 57static inline void lrw_setbit128_bbe(void *b, int bit)
 58{
 59	__set_bit(bit ^ (0x80 -
 60#ifdef __BIG_ENDIAN
 61			 BITS_PER_LONG
 62#else
 63			 BITS_PER_BYTE
 64#endif
 65			), b);
 66}
 67
 68static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key,
 69		      unsigned int keylen)
 70{
 71	struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
 72	struct crypto_skcipher *child = ctx->child;
 73	int err, bsize = LRW_BLOCK_SIZE;
 74	const u8 *tweak = key + keylen - bsize;
 75	be128 tmp = { 0 };
 76	int i;
 77
 78	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 79	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 80					 CRYPTO_TFM_REQ_MASK);
 81	err = crypto_skcipher_setkey(child, key, keylen - bsize);
 
 
 82	if (err)
 83		return err;
 84
 85	if (ctx->table)
 86		gf128mul_free_64k(ctx->table);
 87
 88	/* initialize multiplication table for Key2 */
 89	ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
 90	if (!ctx->table)
 91		return -ENOMEM;
 92
 93	/* initialize optimization table */
 94	for (i = 0; i < 128; i++) {
 95		lrw_setbit128_bbe(&tmp, i);
 96		ctx->mulinc[i] = tmp;
 97		gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
 98	}
 99
100	return 0;
101}
102
103/*
104 * Returns the number of trailing '1' bits in the words of the counter, which is
105 * represented by 4 32-bit words, arranged from least to most significant.
106 * At the same time, increments the counter by one.
107 *
108 * For example:
109 *
110 * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
111 * int i = lrw_next_index(&counter);
112 * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
113 */
114static int lrw_next_index(u32 *counter)
115{
116	int i, res = 0;
117
118	for (i = 0; i < 4; i++) {
119		if (counter[i] + 1 != 0)
120			return res + ffz(counter[i]++);
121
122		counter[i] = 0;
123		res += 32;
124	}
125
126	/*
127	 * If we get here, then x == 128 and we are incrementing the counter
128	 * from all ones to all zeros. This means we must return index 127, i.e.
129	 * the one corresponding to key2*{ 1,...,1 }.
130	 */
131	return 127;
132}
133
134/*
135 * We compute the tweak masks twice (both before and after the ECB encryption or
136 * decryption) to avoid having to allocate a temporary buffer and/or make
137 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
138 * just doing the lrw_next_index() calls again.
139 */
140static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
141{
142	const int bs = LRW_BLOCK_SIZE;
143	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
144	const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
145	struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
146	be128 t = rctx->t;
147	struct skcipher_walk w;
148	__be32 *iv;
149	u32 counter[4];
150	int err;
151
152	if (second_pass) {
153		req = &rctx->subreq;
154		/* set to our TFM to enforce correct alignment: */
155		skcipher_request_set_tfm(req, tfm);
156	}
157
158	err = skcipher_walk_virt(&w, req, false);
159	if (err)
160		return err;
161
162	iv = (__be32 *)w.iv;
163	counter[0] = be32_to_cpu(iv[3]);
164	counter[1] = be32_to_cpu(iv[2]);
165	counter[2] = be32_to_cpu(iv[1]);
166	counter[3] = be32_to_cpu(iv[0]);
167
168	while (w.nbytes) {
169		unsigned int avail = w.nbytes;
170		be128 *wsrc;
171		be128 *wdst;
172
173		wsrc = w.src.virt.addr;
174		wdst = w.dst.virt.addr;
175
176		do {
177			be128_xor(wdst++, &t, wsrc++);
178
179			/* T <- I*Key2, using the optimization
180			 * discussed in the specification */
181			be128_xor(&t, &t,
182				  &ctx->mulinc[lrw_next_index(counter)]);
183		} while ((avail -= bs) >= bs);
184
185		if (second_pass && w.nbytes == w.total) {
186			iv[0] = cpu_to_be32(counter[3]);
187			iv[1] = cpu_to_be32(counter[2]);
188			iv[2] = cpu_to_be32(counter[1]);
189			iv[3] = cpu_to_be32(counter[0]);
190		}
191
192		err = skcipher_walk_done(&w, avail);
193	}
194
195	return err;
196}
197
198static int lrw_xor_tweak_pre(struct skcipher_request *req)
199{
200	return lrw_xor_tweak(req, false);
201}
202
203static int lrw_xor_tweak_post(struct skcipher_request *req)
204{
205	return lrw_xor_tweak(req, true);
206}
207
208static void lrw_crypt_done(struct crypto_async_request *areq, int err)
209{
210	struct skcipher_request *req = areq->data;
211
212	if (!err) {
213		struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
214
215		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
216		err = lrw_xor_tweak_post(req);
217	}
218
219	skcipher_request_complete(req, err);
220}
221
222static void lrw_init_crypt(struct skcipher_request *req)
223{
224	const struct lrw_tfm_ctx *ctx =
225		crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
226	struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
227	struct skcipher_request *subreq = &rctx->subreq;
228
229	skcipher_request_set_tfm(subreq, ctx->child);
230	skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done,
231				      req);
232	/* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
233	skcipher_request_set_crypt(subreq, req->dst, req->dst,
234				   req->cryptlen, req->iv);
235
236	/* calculate first value of T */
237	memcpy(&rctx->t, req->iv, sizeof(rctx->t));
238
239	/* T <- I*Key2 */
240	gf128mul_64k_bbe(&rctx->t, ctx->table);
241}
242
243static int lrw_encrypt(struct skcipher_request *req)
244{
245	struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
246	struct skcipher_request *subreq = &rctx->subreq;
247
248	lrw_init_crypt(req);
249	return lrw_xor_tweak_pre(req) ?:
250		crypto_skcipher_encrypt(subreq) ?:
251		lrw_xor_tweak_post(req);
252}
253
254static int lrw_decrypt(struct skcipher_request *req)
255{
256	struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
257	struct skcipher_request *subreq = &rctx->subreq;
258
259	lrw_init_crypt(req);
260	return lrw_xor_tweak_pre(req) ?:
261		crypto_skcipher_decrypt(subreq) ?:
262		lrw_xor_tweak_post(req);
263}
264
265static int lrw_init_tfm(struct crypto_skcipher *tfm)
266{
267	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
268	struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
269	struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
270	struct crypto_skcipher *cipher;
271
272	cipher = crypto_spawn_skcipher(spawn);
273	if (IS_ERR(cipher))
274		return PTR_ERR(cipher);
275
276	ctx->child = cipher;
277
278	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
279					 sizeof(struct lrw_request_ctx));
280
281	return 0;
282}
283
284static void lrw_exit_tfm(struct crypto_skcipher *tfm)
285{
286	struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
287
288	if (ctx->table)
289		gf128mul_free_64k(ctx->table);
290	crypto_free_skcipher(ctx->child);
291}
292
293static void lrw_free_instance(struct skcipher_instance *inst)
294{
295	crypto_drop_skcipher(skcipher_instance_ctx(inst));
296	kfree(inst);
297}
298
299static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
300{
301	struct crypto_skcipher_spawn *spawn;
302	struct skcipher_instance *inst;
 
303	struct skcipher_alg *alg;
304	const char *cipher_name;
305	char ecb_name[CRYPTO_MAX_ALG_NAME];
306	u32 mask;
307	int err;
308
309	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
310	if (err)
311		return err;
 
 
 
312
313	cipher_name = crypto_attr_alg_name(tb[1]);
314	if (IS_ERR(cipher_name))
315		return PTR_ERR(cipher_name);
316
317	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
318	if (!inst)
319		return -ENOMEM;
320
321	spawn = skcipher_instance_ctx(inst);
322
323	err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
324				   cipher_name, 0, mask);
 
 
325	if (err == -ENOENT) {
326		err = -ENAMETOOLONG;
327		if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
328			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
329			goto err_free_inst;
330
331		err = crypto_grab_skcipher(spawn,
332					   skcipher_crypto_instance(inst),
333					   ecb_name, 0, mask);
334	}
335
336	if (err)
337		goto err_free_inst;
338
339	alg = crypto_skcipher_spawn_alg(spawn);
340
341	err = -EINVAL;
342	if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
343		goto err_free_inst;
344
345	if (crypto_skcipher_alg_ivsize(alg))
346		goto err_free_inst;
347
348	err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
349				  &alg->base);
350	if (err)
351		goto err_free_inst;
352
353	err = -EINVAL;
354	cipher_name = alg->base.cra_name;
355
356	/* Alas we screwed up the naming so we have to mangle the
357	 * cipher name.
358	 */
359	if (!strncmp(cipher_name, "ecb(", 4)) {
360		unsigned len;
361
362		len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
363		if (len < 2 || len >= sizeof(ecb_name))
364			goto err_free_inst;
365
366		if (ecb_name[len - 1] != ')')
367			goto err_free_inst;
368
369		ecb_name[len - 1] = 0;
370
371		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
372			     "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
373			err = -ENAMETOOLONG;
374			goto err_free_inst;
375		}
376	} else
377		goto err_free_inst;
378
 
379	inst->alg.base.cra_priority = alg->base.cra_priority;
380	inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
381	inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
382				       (__alignof__(be128) - 1);
383
384	inst->alg.ivsize = LRW_BLOCK_SIZE;
385	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
386				LRW_BLOCK_SIZE;
387	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
388				LRW_BLOCK_SIZE;
389
390	inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx);
391
392	inst->alg.init = lrw_init_tfm;
393	inst->alg.exit = lrw_exit_tfm;
394
395	inst->alg.setkey = lrw_setkey;
396	inst->alg.encrypt = lrw_encrypt;
397	inst->alg.decrypt = lrw_decrypt;
398
399	inst->free = lrw_free_instance;
400
401	err = skcipher_register_instance(tmpl, inst);
402	if (err) {
403err_free_inst:
404		lrw_free_instance(inst);
405	}
406	return err;
 
 
 
 
 
 
407}
408
409static struct crypto_template lrw_tmpl = {
410	.name = "lrw",
411	.create = lrw_create,
412	.module = THIS_MODULE,
413};
414
415static int __init lrw_module_init(void)
416{
417	return crypto_register_template(&lrw_tmpl);
418}
419
420static void __exit lrw_module_exit(void)
421{
422	crypto_unregister_template(&lrw_tmpl);
423}
424
425subsys_initcall(lrw_module_init);
426module_exit(lrw_module_exit);
427
428MODULE_LICENSE("GPL");
429MODULE_DESCRIPTION("LRW block cipher mode");
430MODULE_ALIAS_CRYPTO("lrw");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* LRW: as defined by Cyril Guyot in
  3 *	http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
  4 *
  5 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
  6 *
  7 * Based on ecb.c
  8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9 */
 10/* This implementation is checked against the test vectors in the above
 11 * document and by a test vector provided by Ken Buchanan at
 12 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
 13 *
 14 * The test vectors are included in the testing module tcrypt.[ch] */
 15
 16#include <crypto/internal/skcipher.h>
 17#include <crypto/scatterwalk.h>
 18#include <linux/err.h>
 19#include <linux/init.h>
 20#include <linux/kernel.h>
 21#include <linux/module.h>
 22#include <linux/scatterlist.h>
 23#include <linux/slab.h>
 24
 25#include <crypto/b128ops.h>
 26#include <crypto/gf128mul.h>
 27
 28#define LRW_BLOCK_SIZE 16
 29
 30struct priv {
 31	struct crypto_skcipher *child;
 32
 33	/*
 34	 * optimizes multiplying a random (non incrementing, as at the
 35	 * start of a new sector) value with key2, we could also have
 36	 * used 4k optimization tables or no optimization at all. In the
 37	 * latter case we would have to store key2 here
 38	 */
 39	struct gf128mul_64k *table;
 40
 41	/*
 42	 * stores:
 43	 *  key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
 44	 *  key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
 45	 *  key2*{ 0,0,...1,1,1,1,1 }, etc
 46	 * needed for optimized multiplication of incrementing values
 47	 * with key2
 48	 */
 49	be128 mulinc[128];
 50};
 51
 52struct rctx {
 53	be128 t;
 54	struct skcipher_request subreq;
 55};
 56
 57static inline void setbit128_bbe(void *b, int bit)
 58{
 59	__set_bit(bit ^ (0x80 -
 60#ifdef __BIG_ENDIAN
 61			 BITS_PER_LONG
 62#else
 63			 BITS_PER_BYTE
 64#endif
 65			), b);
 66}
 67
 68static int setkey(struct crypto_skcipher *parent, const u8 *key,
 69		  unsigned int keylen)
 70{
 71	struct priv *ctx = crypto_skcipher_ctx(parent);
 72	struct crypto_skcipher *child = ctx->child;
 73	int err, bsize = LRW_BLOCK_SIZE;
 74	const u8 *tweak = key + keylen - bsize;
 75	be128 tmp = { 0 };
 76	int i;
 77
 78	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 79	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 80					 CRYPTO_TFM_REQ_MASK);
 81	err = crypto_skcipher_setkey(child, key, keylen - bsize);
 82	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
 83					  CRYPTO_TFM_RES_MASK);
 84	if (err)
 85		return err;
 86
 87	if (ctx->table)
 88		gf128mul_free_64k(ctx->table);
 89
 90	/* initialize multiplication table for Key2 */
 91	ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
 92	if (!ctx->table)
 93		return -ENOMEM;
 94
 95	/* initialize optimization table */
 96	for (i = 0; i < 128; i++) {
 97		setbit128_bbe(&tmp, i);
 98		ctx->mulinc[i] = tmp;
 99		gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
100	}
101
102	return 0;
103}
104
105/*
106 * Returns the number of trailing '1' bits in the words of the counter, which is
107 * represented by 4 32-bit words, arranged from least to most significant.
108 * At the same time, increments the counter by one.
109 *
110 * For example:
111 *
112 * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
113 * int i = next_index(&counter);
114 * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
115 */
116static int next_index(u32 *counter)
117{
118	int i, res = 0;
119
120	for (i = 0; i < 4; i++) {
121		if (counter[i] + 1 != 0)
122			return res + ffz(counter[i]++);
123
124		counter[i] = 0;
125		res += 32;
126	}
127
128	/*
129	 * If we get here, then x == 128 and we are incrementing the counter
130	 * from all ones to all zeros. This means we must return index 127, i.e.
131	 * the one corresponding to key2*{ 1,...,1 }.
132	 */
133	return 127;
134}
135
136/*
137 * We compute the tweak masks twice (both before and after the ECB encryption or
138 * decryption) to avoid having to allocate a temporary buffer and/or make
139 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
140 * just doing the next_index() calls again.
141 */
142static int xor_tweak(struct skcipher_request *req, bool second_pass)
143{
144	const int bs = LRW_BLOCK_SIZE;
145	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
146	struct priv *ctx = crypto_skcipher_ctx(tfm);
147	struct rctx *rctx = skcipher_request_ctx(req);
148	be128 t = rctx->t;
149	struct skcipher_walk w;
150	__be32 *iv;
151	u32 counter[4];
152	int err;
153
154	if (second_pass) {
155		req = &rctx->subreq;
156		/* set to our TFM to enforce correct alignment: */
157		skcipher_request_set_tfm(req, tfm);
158	}
159
160	err = skcipher_walk_virt(&w, req, false);
161	if (err)
162		return err;
163
164	iv = (__be32 *)w.iv;
165	counter[0] = be32_to_cpu(iv[3]);
166	counter[1] = be32_to_cpu(iv[2]);
167	counter[2] = be32_to_cpu(iv[1]);
168	counter[3] = be32_to_cpu(iv[0]);
169
170	while (w.nbytes) {
171		unsigned int avail = w.nbytes;
172		be128 *wsrc;
173		be128 *wdst;
174
175		wsrc = w.src.virt.addr;
176		wdst = w.dst.virt.addr;
177
178		do {
179			be128_xor(wdst++, &t, wsrc++);
180
181			/* T <- I*Key2, using the optimization
182			 * discussed in the specification */
183			be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
 
184		} while ((avail -= bs) >= bs);
185
186		if (second_pass && w.nbytes == w.total) {
187			iv[0] = cpu_to_be32(counter[3]);
188			iv[1] = cpu_to_be32(counter[2]);
189			iv[2] = cpu_to_be32(counter[1]);
190			iv[3] = cpu_to_be32(counter[0]);
191		}
192
193		err = skcipher_walk_done(&w, avail);
194	}
195
196	return err;
197}
198
199static int xor_tweak_pre(struct skcipher_request *req)
200{
201	return xor_tweak(req, false);
202}
203
204static int xor_tweak_post(struct skcipher_request *req)
205{
206	return xor_tweak(req, true);
207}
208
209static void crypt_done(struct crypto_async_request *areq, int err)
210{
211	struct skcipher_request *req = areq->data;
212
213	if (!err) {
214		struct rctx *rctx = skcipher_request_ctx(req);
215
216		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
217		err = xor_tweak_post(req);
218	}
219
220	skcipher_request_complete(req, err);
221}
222
223static void init_crypt(struct skcipher_request *req)
224{
225	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
226	struct rctx *rctx = skcipher_request_ctx(req);
 
227	struct skcipher_request *subreq = &rctx->subreq;
228
229	skcipher_request_set_tfm(subreq, ctx->child);
230	skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
 
231	/* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
232	skcipher_request_set_crypt(subreq, req->dst, req->dst,
233				   req->cryptlen, req->iv);
234
235	/* calculate first value of T */
236	memcpy(&rctx->t, req->iv, sizeof(rctx->t));
237
238	/* T <- I*Key2 */
239	gf128mul_64k_bbe(&rctx->t, ctx->table);
240}
241
242static int encrypt(struct skcipher_request *req)
243{
244	struct rctx *rctx = skcipher_request_ctx(req);
245	struct skcipher_request *subreq = &rctx->subreq;
246
247	init_crypt(req);
248	return xor_tweak_pre(req) ?:
249		crypto_skcipher_encrypt(subreq) ?:
250		xor_tweak_post(req);
251}
252
253static int decrypt(struct skcipher_request *req)
254{
255	struct rctx *rctx = skcipher_request_ctx(req);
256	struct skcipher_request *subreq = &rctx->subreq;
257
258	init_crypt(req);
259	return xor_tweak_pre(req) ?:
260		crypto_skcipher_decrypt(subreq) ?:
261		xor_tweak_post(req);
262}
263
264static int init_tfm(struct crypto_skcipher *tfm)
265{
266	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
267	struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
268	struct priv *ctx = crypto_skcipher_ctx(tfm);
269	struct crypto_skcipher *cipher;
270
271	cipher = crypto_spawn_skcipher(spawn);
272	if (IS_ERR(cipher))
273		return PTR_ERR(cipher);
274
275	ctx->child = cipher;
276
277	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
278					 sizeof(struct rctx));
279
280	return 0;
281}
282
283static void exit_tfm(struct crypto_skcipher *tfm)
284{
285	struct priv *ctx = crypto_skcipher_ctx(tfm);
286
287	if (ctx->table)
288		gf128mul_free_64k(ctx->table);
289	crypto_free_skcipher(ctx->child);
290}
291
292static void free(struct skcipher_instance *inst)
293{
294	crypto_drop_skcipher(skcipher_instance_ctx(inst));
295	kfree(inst);
296}
297
298static int create(struct crypto_template *tmpl, struct rtattr **tb)
299{
300	struct crypto_skcipher_spawn *spawn;
301	struct skcipher_instance *inst;
302	struct crypto_attr_type *algt;
303	struct skcipher_alg *alg;
304	const char *cipher_name;
305	char ecb_name[CRYPTO_MAX_ALG_NAME];
 
306	int err;
307
308	algt = crypto_get_attr_type(tb);
309	if (IS_ERR(algt))
310		return PTR_ERR(algt);
311
312	if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
313		return -EINVAL;
314
315	cipher_name = crypto_attr_alg_name(tb[1]);
316	if (IS_ERR(cipher_name))
317		return PTR_ERR(cipher_name);
318
319	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
320	if (!inst)
321		return -ENOMEM;
322
323	spawn = skcipher_instance_ctx(inst);
324
325	crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
326	err = crypto_grab_skcipher(spawn, cipher_name, 0,
327				   crypto_requires_sync(algt->type,
328							algt->mask));
329	if (err == -ENOENT) {
330		err = -ENAMETOOLONG;
331		if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
332			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
333			goto err_free_inst;
334
335		err = crypto_grab_skcipher(spawn, ecb_name, 0,
336					   crypto_requires_sync(algt->type,
337								algt->mask));
338	}
339
340	if (err)
341		goto err_free_inst;
342
343	alg = crypto_skcipher_spawn_alg(spawn);
344
345	err = -EINVAL;
346	if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
347		goto err_drop_spawn;
348
349	if (crypto_skcipher_alg_ivsize(alg))
350		goto err_drop_spawn;
351
352	err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
353				  &alg->base);
354	if (err)
355		goto err_drop_spawn;
356
357	err = -EINVAL;
358	cipher_name = alg->base.cra_name;
359
360	/* Alas we screwed up the naming so we have to mangle the
361	 * cipher name.
362	 */
363	if (!strncmp(cipher_name, "ecb(", 4)) {
364		unsigned len;
365
366		len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
367		if (len < 2 || len >= sizeof(ecb_name))
368			goto err_drop_spawn;
369
370		if (ecb_name[len - 1] != ')')
371			goto err_drop_spawn;
372
373		ecb_name[len - 1] = 0;
374
375		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
376			     "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
377			err = -ENAMETOOLONG;
378			goto err_drop_spawn;
379		}
380	} else
381		goto err_drop_spawn;
382
383	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
384	inst->alg.base.cra_priority = alg->base.cra_priority;
385	inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
386	inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
387				       (__alignof__(be128) - 1);
388
389	inst->alg.ivsize = LRW_BLOCK_SIZE;
390	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
391				LRW_BLOCK_SIZE;
392	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
393				LRW_BLOCK_SIZE;
394
395	inst->alg.base.cra_ctxsize = sizeof(struct priv);
396
397	inst->alg.init = init_tfm;
398	inst->alg.exit = exit_tfm;
399
400	inst->alg.setkey = setkey;
401	inst->alg.encrypt = encrypt;
402	inst->alg.decrypt = decrypt;
403
404	inst->free = free;
405
406	err = skcipher_register_instance(tmpl, inst);
407	if (err)
408		goto err_drop_spawn;
409
410out:
411	return err;
412
413err_drop_spawn:
414	crypto_drop_skcipher(spawn);
415err_free_inst:
416	kfree(inst);
417	goto out;
418}
419
420static struct crypto_template crypto_tmpl = {
421	.name = "lrw",
422	.create = create,
423	.module = THIS_MODULE,
424};
425
426static int __init crypto_module_init(void)
427{
428	return crypto_register_template(&crypto_tmpl);
429}
430
431static void __exit crypto_module_exit(void)
432{
433	crypto_unregister_template(&crypto_tmpl);
434}
435
436subsys_initcall(crypto_module_init);
437module_exit(crypto_module_exit);
438
439MODULE_LICENSE("GPL");
440MODULE_DESCRIPTION("LRW block cipher mode");
441MODULE_ALIAS_CRYPTO("lrw");