Loading...
1/* LRW: as defined by Cyril Guyot in
2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
3 *
4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
5 *
6 * Based om ecb.c
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 */
14/* This implementation is checked against the test vectors in the above
15 * document and by a test vector provided by Ken Buchanan at
16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
17 *
18 * The test vectors are included in the testing module tcrypt.[ch] */
19#include <crypto/algapi.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26
27#include <crypto/b128ops.h>
28#include <crypto/gf128mul.h>
29
30struct priv {
31 struct crypto_cipher *child;
32 /* optimizes multiplying a random (non incrementing, as at the
33 * start of a new sector) value with key2, we could also have
34 * used 4k optimization tables or no optimization at all. In the
35 * latter case we would have to store key2 here */
36 struct gf128mul_64k *table;
37 /* stores:
38 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
39 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
40 * key2*{ 0,0,...1,1,1,1,1 }, etc
41 * needed for optimized multiplication of incrementing values
42 * with key2 */
43 be128 mulinc[128];
44};
45
46static inline void setbit128_bbe(void *b, int bit)
47{
48 __set_bit(bit ^ (0x80 -
49#ifdef __BIG_ENDIAN
50 BITS_PER_LONG
51#else
52 BITS_PER_BYTE
53#endif
54 ), b);
55}
56
57static int setkey(struct crypto_tfm *parent, const u8 *key,
58 unsigned int keylen)
59{
60 struct priv *ctx = crypto_tfm_ctx(parent);
61 struct crypto_cipher *child = ctx->child;
62 int err, i;
63 be128 tmp = { 0 };
64 int bsize = crypto_cipher_blocksize(child);
65
66 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
67 crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
68 CRYPTO_TFM_REQ_MASK);
69 if ((err = crypto_cipher_setkey(child, key, keylen - bsize)))
70 return err;
71 crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
72 CRYPTO_TFM_RES_MASK);
73
74 if (ctx->table)
75 gf128mul_free_64k(ctx->table);
76
77 /* initialize multiplication table for Key2 */
78 ctx->table = gf128mul_init_64k_bbe((be128 *)(key + keylen - bsize));
79 if (!ctx->table)
80 return -ENOMEM;
81
82 /* initialize optimization table */
83 for (i = 0; i < 128; i++) {
84 setbit128_bbe(&tmp, i);
85 ctx->mulinc[i] = tmp;
86 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
87 }
88
89 return 0;
90}
91
92struct sinfo {
93 be128 t;
94 struct crypto_tfm *tfm;
95 void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
96};
97
98static inline void inc(be128 *iv)
99{
100 be64_add_cpu(&iv->b, 1);
101 if (!iv->b)
102 be64_add_cpu(&iv->a, 1);
103}
104
105static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
106{
107 be128_xor(dst, &s->t, src); /* PP <- T xor P */
108 s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */
109 be128_xor(dst, dst, &s->t); /* C <- T xor CC */
110}
111
112/* this returns the number of consequative 1 bits starting
113 * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
114static inline int get_index128(be128 *block)
115{
116 int x;
117 __be32 *p = (__be32 *) block;
118
119 for (p += 3, x = 0; x < 128; p--, x += 32) {
120 u32 val = be32_to_cpup(p);
121
122 if (!~val)
123 continue;
124
125 return x + ffz(val);
126 }
127
128 return x;
129}
130
131static int crypt(struct blkcipher_desc *d,
132 struct blkcipher_walk *w, struct priv *ctx,
133 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
134{
135 int err;
136 unsigned int avail;
137 const int bs = crypto_cipher_blocksize(ctx->child);
138 struct sinfo s = {
139 .tfm = crypto_cipher_tfm(ctx->child),
140 .fn = fn
141 };
142 be128 *iv;
143 u8 *wsrc;
144 u8 *wdst;
145
146 err = blkcipher_walk_virt(d, w);
147 if (!(avail = w->nbytes))
148 return err;
149
150 wsrc = w->src.virt.addr;
151 wdst = w->dst.virt.addr;
152
153 /* calculate first value of T */
154 iv = (be128 *)w->iv;
155 s.t = *iv;
156
157 /* T <- I*Key2 */
158 gf128mul_64k_bbe(&s.t, ctx->table);
159
160 goto first;
161
162 for (;;) {
163 do {
164 /* T <- I*Key2, using the optimization
165 * discussed in the specification */
166 be128_xor(&s.t, &s.t, &ctx->mulinc[get_index128(iv)]);
167 inc(iv);
168
169first:
170 lrw_round(&s, wdst, wsrc);
171
172 wsrc += bs;
173 wdst += bs;
174 } while ((avail -= bs) >= bs);
175
176 err = blkcipher_walk_done(d, w, avail);
177 if (!(avail = w->nbytes))
178 break;
179
180 wsrc = w->src.virt.addr;
181 wdst = w->dst.virt.addr;
182 }
183
184 return err;
185}
186
187static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
188 struct scatterlist *src, unsigned int nbytes)
189{
190 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
191 struct blkcipher_walk w;
192
193 blkcipher_walk_init(&w, dst, src, nbytes);
194 return crypt(desc, &w, ctx,
195 crypto_cipher_alg(ctx->child)->cia_encrypt);
196}
197
198static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
199 struct scatterlist *src, unsigned int nbytes)
200{
201 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
202 struct blkcipher_walk w;
203
204 blkcipher_walk_init(&w, dst, src, nbytes);
205 return crypt(desc, &w, ctx,
206 crypto_cipher_alg(ctx->child)->cia_decrypt);
207}
208
209static int init_tfm(struct crypto_tfm *tfm)
210{
211 struct crypto_cipher *cipher;
212 struct crypto_instance *inst = (void *)tfm->__crt_alg;
213 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
214 struct priv *ctx = crypto_tfm_ctx(tfm);
215 u32 *flags = &tfm->crt_flags;
216
217 cipher = crypto_spawn_cipher(spawn);
218 if (IS_ERR(cipher))
219 return PTR_ERR(cipher);
220
221 if (crypto_cipher_blocksize(cipher) != 16) {
222 *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
223 return -EINVAL;
224 }
225
226 ctx->child = cipher;
227 return 0;
228}
229
230static void exit_tfm(struct crypto_tfm *tfm)
231{
232 struct priv *ctx = crypto_tfm_ctx(tfm);
233 if (ctx->table)
234 gf128mul_free_64k(ctx->table);
235 crypto_free_cipher(ctx->child);
236}
237
238static struct crypto_instance *alloc(struct rtattr **tb)
239{
240 struct crypto_instance *inst;
241 struct crypto_alg *alg;
242 int err;
243
244 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
245 if (err)
246 return ERR_PTR(err);
247
248 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
249 CRYPTO_ALG_TYPE_MASK);
250 if (IS_ERR(alg))
251 return ERR_CAST(alg);
252
253 inst = crypto_alloc_instance("lrw", alg);
254 if (IS_ERR(inst))
255 goto out_put_alg;
256
257 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
258 inst->alg.cra_priority = alg->cra_priority;
259 inst->alg.cra_blocksize = alg->cra_blocksize;
260
261 if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
262 else inst->alg.cra_alignmask = alg->cra_alignmask;
263 inst->alg.cra_type = &crypto_blkcipher_type;
264
265 if (!(alg->cra_blocksize % 4))
266 inst->alg.cra_alignmask |= 3;
267 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
268 inst->alg.cra_blkcipher.min_keysize =
269 alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
270 inst->alg.cra_blkcipher.max_keysize =
271 alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
272
273 inst->alg.cra_ctxsize = sizeof(struct priv);
274
275 inst->alg.cra_init = init_tfm;
276 inst->alg.cra_exit = exit_tfm;
277
278 inst->alg.cra_blkcipher.setkey = setkey;
279 inst->alg.cra_blkcipher.encrypt = encrypt;
280 inst->alg.cra_blkcipher.decrypt = decrypt;
281
282out_put_alg:
283 crypto_mod_put(alg);
284 return inst;
285}
286
287static void free(struct crypto_instance *inst)
288{
289 crypto_drop_spawn(crypto_instance_ctx(inst));
290 kfree(inst);
291}
292
293static struct crypto_template crypto_tmpl = {
294 .name = "lrw",
295 .alloc = alloc,
296 .free = free,
297 .module = THIS_MODULE,
298};
299
300static int __init crypto_module_init(void)
301{
302 return crypto_register_template(&crypto_tmpl);
303}
304
305static void __exit crypto_module_exit(void)
306{
307 crypto_unregister_template(&crypto_tmpl);
308}
309
310module_init(crypto_module_init);
311module_exit(crypto_module_exit);
312
313MODULE_LICENSE("GPL");
314MODULE_DESCRIPTION("LRW block cipher mode");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* LRW: as defined by Cyril Guyot in
3 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
4 *
5 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
6 *
7 * Based on ecb.c
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 */
10/* This implementation is checked against the test vectors in the above
11 * document and by a test vector provided by Ken Buchanan at
12 * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
13 *
14 * The test vectors are included in the testing module tcrypt.[ch] */
15
16#include <crypto/internal/skcipher.h>
17#include <crypto/scatterwalk.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24
25#include <crypto/b128ops.h>
26#include <crypto/gf128mul.h>
27
28#define LRW_BLOCK_SIZE 16
29
30struct lrw_tfm_ctx {
31 struct crypto_skcipher *child;
32
33 /*
34 * optimizes multiplying a random (non incrementing, as at the
35 * start of a new sector) value with key2, we could also have
36 * used 4k optimization tables or no optimization at all. In the
37 * latter case we would have to store key2 here
38 */
39 struct gf128mul_64k *table;
40
41 /*
42 * stores:
43 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
44 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
45 * key2*{ 0,0,...1,1,1,1,1 }, etc
46 * needed for optimized multiplication of incrementing values
47 * with key2
48 */
49 be128 mulinc[128];
50};
51
52struct lrw_request_ctx {
53 be128 t;
54 struct skcipher_request subreq;
55};
56
57static inline void lrw_setbit128_bbe(void *b, int bit)
58{
59 __set_bit(bit ^ (0x80 -
60#ifdef __BIG_ENDIAN
61 BITS_PER_LONG
62#else
63 BITS_PER_BYTE
64#endif
65 ), b);
66}
67
68static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key,
69 unsigned int keylen)
70{
71 struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
72 struct crypto_skcipher *child = ctx->child;
73 int err, bsize = LRW_BLOCK_SIZE;
74 const u8 *tweak = key + keylen - bsize;
75 be128 tmp = { 0 };
76 int i;
77
78 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
79 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
80 CRYPTO_TFM_REQ_MASK);
81 err = crypto_skcipher_setkey(child, key, keylen - bsize);
82 if (err)
83 return err;
84
85 if (ctx->table)
86 gf128mul_free_64k(ctx->table);
87
88 /* initialize multiplication table for Key2 */
89 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
90 if (!ctx->table)
91 return -ENOMEM;
92
93 /* initialize optimization table */
94 for (i = 0; i < 128; i++) {
95 lrw_setbit128_bbe(&tmp, i);
96 ctx->mulinc[i] = tmp;
97 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
98 }
99
100 return 0;
101}
102
103/*
104 * Returns the number of trailing '1' bits in the words of the counter, which is
105 * represented by 4 32-bit words, arranged from least to most significant.
106 * At the same time, increments the counter by one.
107 *
108 * For example:
109 *
110 * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
111 * int i = lrw_next_index(&counter);
112 * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
113 */
114static int lrw_next_index(u32 *counter)
115{
116 int i, res = 0;
117
118 for (i = 0; i < 4; i++) {
119 if (counter[i] + 1 != 0)
120 return res + ffz(counter[i]++);
121
122 counter[i] = 0;
123 res += 32;
124 }
125
126 /*
127 * If we get here, then x == 128 and we are incrementing the counter
128 * from all ones to all zeros. This means we must return index 127, i.e.
129 * the one corresponding to key2*{ 1,...,1 }.
130 */
131 return 127;
132}
133
134/*
135 * We compute the tweak masks twice (both before and after the ECB encryption or
136 * decryption) to avoid having to allocate a temporary buffer and/or make
137 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
138 * just doing the lrw_next_index() calls again.
139 */
140static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
141{
142 const int bs = LRW_BLOCK_SIZE;
143 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
144 const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
145 struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
146 be128 t = rctx->t;
147 struct skcipher_walk w;
148 __be32 *iv;
149 u32 counter[4];
150 int err;
151
152 if (second_pass) {
153 req = &rctx->subreq;
154 /* set to our TFM to enforce correct alignment: */
155 skcipher_request_set_tfm(req, tfm);
156 }
157
158 err = skcipher_walk_virt(&w, req, false);
159 if (err)
160 return err;
161
162 iv = (__be32 *)w.iv;
163 counter[0] = be32_to_cpu(iv[3]);
164 counter[1] = be32_to_cpu(iv[2]);
165 counter[2] = be32_to_cpu(iv[1]);
166 counter[3] = be32_to_cpu(iv[0]);
167
168 while (w.nbytes) {
169 unsigned int avail = w.nbytes;
170 be128 *wsrc;
171 be128 *wdst;
172
173 wsrc = w.src.virt.addr;
174 wdst = w.dst.virt.addr;
175
176 do {
177 be128_xor(wdst++, &t, wsrc++);
178
179 /* T <- I*Key2, using the optimization
180 * discussed in the specification */
181 be128_xor(&t, &t,
182 &ctx->mulinc[lrw_next_index(counter)]);
183 } while ((avail -= bs) >= bs);
184
185 if (second_pass && w.nbytes == w.total) {
186 iv[0] = cpu_to_be32(counter[3]);
187 iv[1] = cpu_to_be32(counter[2]);
188 iv[2] = cpu_to_be32(counter[1]);
189 iv[3] = cpu_to_be32(counter[0]);
190 }
191
192 err = skcipher_walk_done(&w, avail);
193 }
194
195 return err;
196}
197
198static int lrw_xor_tweak_pre(struct skcipher_request *req)
199{
200 return lrw_xor_tweak(req, false);
201}
202
203static int lrw_xor_tweak_post(struct skcipher_request *req)
204{
205 return lrw_xor_tweak(req, true);
206}
207
208static void lrw_crypt_done(void *data, int err)
209{
210 struct skcipher_request *req = data;
211
212 if (!err) {
213 struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
214
215 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
216 err = lrw_xor_tweak_post(req);
217 }
218
219 skcipher_request_complete(req, err);
220}
221
222static void lrw_init_crypt(struct skcipher_request *req)
223{
224 const struct lrw_tfm_ctx *ctx =
225 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
226 struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
227 struct skcipher_request *subreq = &rctx->subreq;
228
229 skcipher_request_set_tfm(subreq, ctx->child);
230 skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done,
231 req);
232 /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
233 skcipher_request_set_crypt(subreq, req->dst, req->dst,
234 req->cryptlen, req->iv);
235
236 /* calculate first value of T */
237 memcpy(&rctx->t, req->iv, sizeof(rctx->t));
238
239 /* T <- I*Key2 */
240 gf128mul_64k_bbe(&rctx->t, ctx->table);
241}
242
243static int lrw_encrypt(struct skcipher_request *req)
244{
245 struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
246 struct skcipher_request *subreq = &rctx->subreq;
247
248 lrw_init_crypt(req);
249 return lrw_xor_tweak_pre(req) ?:
250 crypto_skcipher_encrypt(subreq) ?:
251 lrw_xor_tweak_post(req);
252}
253
254static int lrw_decrypt(struct skcipher_request *req)
255{
256 struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
257 struct skcipher_request *subreq = &rctx->subreq;
258
259 lrw_init_crypt(req);
260 return lrw_xor_tweak_pre(req) ?:
261 crypto_skcipher_decrypt(subreq) ?:
262 lrw_xor_tweak_post(req);
263}
264
265static int lrw_init_tfm(struct crypto_skcipher *tfm)
266{
267 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
268 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
269 struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
270 struct crypto_skcipher *cipher;
271
272 cipher = crypto_spawn_skcipher(spawn);
273 if (IS_ERR(cipher))
274 return PTR_ERR(cipher);
275
276 ctx->child = cipher;
277
278 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
279 sizeof(struct lrw_request_ctx));
280
281 return 0;
282}
283
284static void lrw_exit_tfm(struct crypto_skcipher *tfm)
285{
286 struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
287
288 if (ctx->table)
289 gf128mul_free_64k(ctx->table);
290 crypto_free_skcipher(ctx->child);
291}
292
293static void lrw_free_instance(struct skcipher_instance *inst)
294{
295 crypto_drop_skcipher(skcipher_instance_ctx(inst));
296 kfree(inst);
297}
298
299static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
300{
301 struct crypto_skcipher_spawn *spawn;
302 struct skcipher_alg_common *alg;
303 struct skcipher_instance *inst;
304 const char *cipher_name;
305 char ecb_name[CRYPTO_MAX_ALG_NAME];
306 u32 mask;
307 int err;
308
309 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
310 if (err)
311 return err;
312
313 cipher_name = crypto_attr_alg_name(tb[1]);
314 if (IS_ERR(cipher_name))
315 return PTR_ERR(cipher_name);
316
317 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
318 if (!inst)
319 return -ENOMEM;
320
321 spawn = skcipher_instance_ctx(inst);
322
323 err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
324 cipher_name, 0, mask);
325 if (err == -ENOENT) {
326 err = -ENAMETOOLONG;
327 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
328 cipher_name) >= CRYPTO_MAX_ALG_NAME)
329 goto err_free_inst;
330
331 err = crypto_grab_skcipher(spawn,
332 skcipher_crypto_instance(inst),
333 ecb_name, 0, mask);
334 }
335
336 if (err)
337 goto err_free_inst;
338
339 alg = crypto_spawn_skcipher_alg_common(spawn);
340
341 err = -EINVAL;
342 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
343 goto err_free_inst;
344
345 if (alg->ivsize)
346 goto err_free_inst;
347
348 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
349 &alg->base);
350 if (err)
351 goto err_free_inst;
352
353 err = -EINVAL;
354 cipher_name = alg->base.cra_name;
355
356 /* Alas we screwed up the naming so we have to mangle the
357 * cipher name.
358 */
359 if (!strncmp(cipher_name, "ecb(", 4)) {
360 int len;
361
362 len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
363 if (len < 2)
364 goto err_free_inst;
365
366 if (ecb_name[len - 1] != ')')
367 goto err_free_inst;
368
369 ecb_name[len - 1] = 0;
370
371 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
372 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
373 err = -ENAMETOOLONG;
374 goto err_free_inst;
375 }
376 } else
377 goto err_free_inst;
378
379 inst->alg.base.cra_priority = alg->base.cra_priority;
380 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
381 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
382 (__alignof__(be128) - 1);
383
384 inst->alg.ivsize = LRW_BLOCK_SIZE;
385 inst->alg.min_keysize = alg->min_keysize + LRW_BLOCK_SIZE;
386 inst->alg.max_keysize = alg->max_keysize + LRW_BLOCK_SIZE;
387
388 inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx);
389
390 inst->alg.init = lrw_init_tfm;
391 inst->alg.exit = lrw_exit_tfm;
392
393 inst->alg.setkey = lrw_setkey;
394 inst->alg.encrypt = lrw_encrypt;
395 inst->alg.decrypt = lrw_decrypt;
396
397 inst->free = lrw_free_instance;
398
399 err = skcipher_register_instance(tmpl, inst);
400 if (err) {
401err_free_inst:
402 lrw_free_instance(inst);
403 }
404 return err;
405}
406
407static struct crypto_template lrw_tmpl = {
408 .name = "lrw",
409 .create = lrw_create,
410 .module = THIS_MODULE,
411};
412
413static int __init lrw_module_init(void)
414{
415 return crypto_register_template(&lrw_tmpl);
416}
417
418static void __exit lrw_module_exit(void)
419{
420 crypto_unregister_template(&lrw_tmpl);
421}
422
423subsys_initcall(lrw_module_init);
424module_exit(lrw_module_exit);
425
426MODULE_LICENSE("GPL");
427MODULE_DESCRIPTION("LRW block cipher mode");
428MODULE_ALIAS_CRYPTO("lrw");
429MODULE_SOFTDEP("pre: ecb");