Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Asynchronous block chaining cipher operations.
4 *
5 * This is the asynchronous version of blkcipher.c indicating completion
6 * via a callback.
7 *
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 */
10
11#include <crypto/internal/skcipher.h>
12#include <linux/err.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/seq_file.h>
16#include <linux/cryptouser.h>
17#include <linux/compiler.h>
18#include <net/netlink.h>
19
20#include <crypto/scatterwalk.h>
21
22#include "internal.h"
23
24struct ablkcipher_buffer {
25 struct list_head entry;
26 struct scatter_walk dst;
27 unsigned int len;
28 void *data;
29};
30
31enum {
32 ABLKCIPHER_WALK_SLOW = 1 << 0,
33};
34
35static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
36{
37 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
38}
39
40void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
41{
42 struct ablkcipher_buffer *p, *tmp;
43
44 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
45 ablkcipher_buffer_write(p);
46 list_del(&p->entry);
47 kfree(p);
48 }
49}
50EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
51
52static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
53 struct ablkcipher_buffer *p)
54{
55 p->dst = walk->out;
56 list_add_tail(&p->entry, &walk->buffers);
57}
58
59/* Get a spot of the specified length that does not straddle a page.
60 * The caller needs to ensure that there is enough space for this operation.
61 */
62static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
63{
64 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
65
66 return max(start, end_page);
67}
68
69static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
70 unsigned int n)
71{
72 for (;;) {
73 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
74
75 if (len_this_page > n)
76 len_this_page = n;
77 scatterwalk_advance(&walk->out, n);
78 if (n == len_this_page)
79 break;
80 n -= len_this_page;
81 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
82 }
83}
84
85static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
86 unsigned int n)
87{
88 scatterwalk_advance(&walk->in, n);
89 scatterwalk_advance(&walk->out, n);
90}
91
92static int ablkcipher_walk_next(struct ablkcipher_request *req,
93 struct ablkcipher_walk *walk);
94
95int ablkcipher_walk_done(struct ablkcipher_request *req,
96 struct ablkcipher_walk *walk, int err)
97{
98 struct crypto_tfm *tfm = req->base.tfm;
99 unsigned int n; /* bytes processed */
100 bool more;
101
102 if (unlikely(err < 0))
103 goto finish;
104
105 n = walk->nbytes - err;
106 walk->total -= n;
107 more = (walk->total != 0);
108
109 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
110 ablkcipher_done_fast(walk, n);
111 } else {
112 if (WARN_ON(err)) {
113 /* unexpected case; didn't process all bytes */
114 err = -EINVAL;
115 goto finish;
116 }
117 ablkcipher_done_slow(walk, n);
118 }
119
120 scatterwalk_done(&walk->in, 0, more);
121 scatterwalk_done(&walk->out, 1, more);
122
123 if (more) {
124 crypto_yield(req->base.flags);
125 return ablkcipher_walk_next(req, walk);
126 }
127 err = 0;
128finish:
129 walk->nbytes = 0;
130 if (walk->iv != req->info)
131 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
132 kfree(walk->iv_buffer);
133 return err;
134}
135EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
136
137static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
138 struct ablkcipher_walk *walk,
139 unsigned int bsize,
140 unsigned int alignmask,
141 void **src_p, void **dst_p)
142{
143 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
144 struct ablkcipher_buffer *p;
145 void *src, *dst, *base;
146 unsigned int n;
147
148 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
149 n += (aligned_bsize * 3 - (alignmask + 1) +
150 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
151
152 p = kmalloc(n, GFP_ATOMIC);
153 if (!p)
154 return ablkcipher_walk_done(req, walk, -ENOMEM);
155
156 base = p + 1;
157
158 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
159 src = dst = ablkcipher_get_spot(dst, bsize);
160
161 p->len = bsize;
162 p->data = dst;
163
164 scatterwalk_copychunks(src, &walk->in, bsize, 0);
165
166 ablkcipher_queue_write(walk, p);
167
168 walk->nbytes = bsize;
169 walk->flags |= ABLKCIPHER_WALK_SLOW;
170
171 *src_p = src;
172 *dst_p = dst;
173
174 return 0;
175}
176
177static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
178 struct crypto_tfm *tfm,
179 unsigned int alignmask)
180{
181 unsigned bs = walk->blocksize;
182 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
183 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
184 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
185 (alignmask + 1);
186 u8 *iv;
187
188 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
189 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
190 if (!walk->iv_buffer)
191 return -ENOMEM;
192
193 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
194 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
195 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
196 iv = ablkcipher_get_spot(iv, ivsize);
197
198 walk->iv = memcpy(iv, walk->iv, ivsize);
199 return 0;
200}
201
202static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
203 struct ablkcipher_walk *walk)
204{
205 walk->src.page = scatterwalk_page(&walk->in);
206 walk->src.offset = offset_in_page(walk->in.offset);
207 walk->dst.page = scatterwalk_page(&walk->out);
208 walk->dst.offset = offset_in_page(walk->out.offset);
209
210 return 0;
211}
212
213static int ablkcipher_walk_next(struct ablkcipher_request *req,
214 struct ablkcipher_walk *walk)
215{
216 struct crypto_tfm *tfm = req->base.tfm;
217 unsigned int alignmask, bsize, n;
218 void *src, *dst;
219 int err;
220
221 alignmask = crypto_tfm_alg_alignmask(tfm);
222 n = walk->total;
223 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
224 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
225 return ablkcipher_walk_done(req, walk, -EINVAL);
226 }
227
228 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
229 src = dst = NULL;
230
231 bsize = min(walk->blocksize, n);
232 n = scatterwalk_clamp(&walk->in, n);
233 n = scatterwalk_clamp(&walk->out, n);
234
235 if (n < bsize ||
236 !scatterwalk_aligned(&walk->in, alignmask) ||
237 !scatterwalk_aligned(&walk->out, alignmask)) {
238 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
239 &src, &dst);
240 goto set_phys_lowmem;
241 }
242
243 walk->nbytes = n;
244
245 return ablkcipher_next_fast(req, walk);
246
247set_phys_lowmem:
248 if (err >= 0) {
249 walk->src.page = virt_to_page(src);
250 walk->dst.page = virt_to_page(dst);
251 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
252 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
253 }
254
255 return err;
256}
257
258static int ablkcipher_walk_first(struct ablkcipher_request *req,
259 struct ablkcipher_walk *walk)
260{
261 struct crypto_tfm *tfm = req->base.tfm;
262 unsigned int alignmask;
263
264 alignmask = crypto_tfm_alg_alignmask(tfm);
265 if (WARN_ON_ONCE(in_irq()))
266 return -EDEADLK;
267
268 walk->iv = req->info;
269 walk->nbytes = walk->total;
270 if (unlikely(!walk->total))
271 return 0;
272
273 walk->iv_buffer = NULL;
274 if (unlikely(((unsigned long)walk->iv & alignmask))) {
275 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
276
277 if (err)
278 return err;
279 }
280
281 scatterwalk_start(&walk->in, walk->in.sg);
282 scatterwalk_start(&walk->out, walk->out.sg);
283
284 return ablkcipher_walk_next(req, walk);
285}
286
287int ablkcipher_walk_phys(struct ablkcipher_request *req,
288 struct ablkcipher_walk *walk)
289{
290 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
291 return ablkcipher_walk_first(req, walk);
292}
293EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
294
295static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
296 unsigned int keylen)
297{
298 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
299 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
300 int ret;
301 u8 *buffer, *alignbuffer;
302 unsigned long absize;
303
304 absize = keylen + alignmask;
305 buffer = kmalloc(absize, GFP_ATOMIC);
306 if (!buffer)
307 return -ENOMEM;
308
309 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
310 memcpy(alignbuffer, key, keylen);
311 ret = cipher->setkey(tfm, alignbuffer, keylen);
312 memset(alignbuffer, 0, keylen);
313 kfree(buffer);
314 return ret;
315}
316
317static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
318 unsigned int keylen)
319{
320 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
321 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
322
323 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
324 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
325 return -EINVAL;
326 }
327
328 if ((unsigned long)key & alignmask)
329 return setkey_unaligned(tfm, key, keylen);
330
331 return cipher->setkey(tfm, key, keylen);
332}
333
334static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
335 u32 mask)
336{
337 return alg->cra_ctxsize;
338}
339
340static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
341 u32 mask)
342{
343 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
344 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
345
346 if (alg->ivsize > PAGE_SIZE / 8)
347 return -EINVAL;
348
349 crt->setkey = setkey;
350 crt->encrypt = alg->encrypt;
351 crt->decrypt = alg->decrypt;
352 crt->base = __crypto_ablkcipher_cast(tfm);
353 crt->ivsize = alg->ivsize;
354
355 return 0;
356}
357
358#ifdef CONFIG_NET
359static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
360{
361 struct crypto_report_blkcipher rblkcipher;
362
363 memset(&rblkcipher, 0, sizeof(rblkcipher));
364
365 strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
366 strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
367
368 rblkcipher.blocksize = alg->cra_blocksize;
369 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
370 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
371 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
372
373 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
374 sizeof(rblkcipher), &rblkcipher);
375}
376#else
377static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
378{
379 return -ENOSYS;
380}
381#endif
382
383static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
384 __maybe_unused;
385static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
386{
387 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
388
389 seq_printf(m, "type : ablkcipher\n");
390 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
391 "yes" : "no");
392 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
393 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
394 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
395 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
396 seq_printf(m, "geniv : <default>\n");
397}
398
399const struct crypto_type crypto_ablkcipher_type = {
400 .ctxsize = crypto_ablkcipher_ctxsize,
401 .init = crypto_init_ablkcipher_ops,
402#ifdef CONFIG_PROC_FS
403 .show = crypto_ablkcipher_show,
404#endif
405 .report = crypto_ablkcipher_report,
406};
407EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
1/*
2 * Asynchronous block chaining cipher operations.
3 *
4 * This is the asynchronous version of blkcipher.c indicating completion
5 * via a callback.
6 *
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/internal/skcipher.h>
17#include <linux/cpumask.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/rtnetlink.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25#include <linux/seq_file.h>
26
27#include <crypto/scatterwalk.h>
28
29#include "internal.h"
30
31static const char *skcipher_default_geniv __read_mostly;
32
33struct ablkcipher_buffer {
34 struct list_head entry;
35 struct scatter_walk dst;
36 unsigned int len;
37 void *data;
38};
39
40enum {
41 ABLKCIPHER_WALK_SLOW = 1 << 0,
42};
43
44static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
45{
46 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
47}
48
49void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
50{
51 struct ablkcipher_buffer *p, *tmp;
52
53 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
54 ablkcipher_buffer_write(p);
55 list_del(&p->entry);
56 kfree(p);
57 }
58}
59EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
60
61static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
62 struct ablkcipher_buffer *p)
63{
64 p->dst = walk->out;
65 list_add_tail(&p->entry, &walk->buffers);
66}
67
68/* Get a spot of the specified length that does not straddle a page.
69 * The caller needs to ensure that there is enough space for this operation.
70 */
71static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
72{
73 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
74 return max(start, end_page);
75}
76
77static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
78 unsigned int bsize)
79{
80 unsigned int n = bsize;
81
82 for (;;) {
83 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
84
85 if (len_this_page > n)
86 len_this_page = n;
87 scatterwalk_advance(&walk->out, n);
88 if (n == len_this_page)
89 break;
90 n -= len_this_page;
91 scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
92 }
93
94 return bsize;
95}
96
97static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
98 unsigned int n)
99{
100 scatterwalk_advance(&walk->in, n);
101 scatterwalk_advance(&walk->out, n);
102
103 return n;
104}
105
106static int ablkcipher_walk_next(struct ablkcipher_request *req,
107 struct ablkcipher_walk *walk);
108
109int ablkcipher_walk_done(struct ablkcipher_request *req,
110 struct ablkcipher_walk *walk, int err)
111{
112 struct crypto_tfm *tfm = req->base.tfm;
113 unsigned int nbytes = 0;
114
115 if (likely(err >= 0)) {
116 unsigned int n = walk->nbytes - err;
117
118 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
119 n = ablkcipher_done_fast(walk, n);
120 else if (WARN_ON(err)) {
121 err = -EINVAL;
122 goto err;
123 } else
124 n = ablkcipher_done_slow(walk, n);
125
126 nbytes = walk->total - n;
127 err = 0;
128 }
129
130 scatterwalk_done(&walk->in, 0, nbytes);
131 scatterwalk_done(&walk->out, 1, nbytes);
132
133err:
134 walk->total = nbytes;
135 walk->nbytes = nbytes;
136
137 if (nbytes) {
138 crypto_yield(req->base.flags);
139 return ablkcipher_walk_next(req, walk);
140 }
141
142 if (walk->iv != req->info)
143 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
144 kfree(walk->iv_buffer);
145
146 return err;
147}
148EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
149
150static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
151 struct ablkcipher_walk *walk,
152 unsigned int bsize,
153 unsigned int alignmask,
154 void **src_p, void **dst_p)
155{
156 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
157 struct ablkcipher_buffer *p;
158 void *src, *dst, *base;
159 unsigned int n;
160
161 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
162 n += (aligned_bsize * 3 - (alignmask + 1) +
163 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
164
165 p = kmalloc(n, GFP_ATOMIC);
166 if (!p)
167 return ablkcipher_walk_done(req, walk, -ENOMEM);
168
169 base = p + 1;
170
171 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
172 src = dst = ablkcipher_get_spot(dst, bsize);
173
174 p->len = bsize;
175 p->data = dst;
176
177 scatterwalk_copychunks(src, &walk->in, bsize, 0);
178
179 ablkcipher_queue_write(walk, p);
180
181 walk->nbytes = bsize;
182 walk->flags |= ABLKCIPHER_WALK_SLOW;
183
184 *src_p = src;
185 *dst_p = dst;
186
187 return 0;
188}
189
190static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
191 struct crypto_tfm *tfm,
192 unsigned int alignmask)
193{
194 unsigned bs = walk->blocksize;
195 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
196 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
197 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
198 (alignmask + 1);
199 u8 *iv;
200
201 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
202 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
203 if (!walk->iv_buffer)
204 return -ENOMEM;
205
206 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
207 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
208 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
209 iv = ablkcipher_get_spot(iv, ivsize);
210
211 walk->iv = memcpy(iv, walk->iv, ivsize);
212 return 0;
213}
214
215static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
216 struct ablkcipher_walk *walk)
217{
218 walk->src.page = scatterwalk_page(&walk->in);
219 walk->src.offset = offset_in_page(walk->in.offset);
220 walk->dst.page = scatterwalk_page(&walk->out);
221 walk->dst.offset = offset_in_page(walk->out.offset);
222
223 return 0;
224}
225
226static int ablkcipher_walk_next(struct ablkcipher_request *req,
227 struct ablkcipher_walk *walk)
228{
229 struct crypto_tfm *tfm = req->base.tfm;
230 unsigned int alignmask, bsize, n;
231 void *src, *dst;
232 int err;
233
234 alignmask = crypto_tfm_alg_alignmask(tfm);
235 n = walk->total;
236 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
237 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
238 return ablkcipher_walk_done(req, walk, -EINVAL);
239 }
240
241 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
242 src = dst = NULL;
243
244 bsize = min(walk->blocksize, n);
245 n = scatterwalk_clamp(&walk->in, n);
246 n = scatterwalk_clamp(&walk->out, n);
247
248 if (n < bsize ||
249 !scatterwalk_aligned(&walk->in, alignmask) ||
250 !scatterwalk_aligned(&walk->out, alignmask)) {
251 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
252 &src, &dst);
253 goto set_phys_lowmem;
254 }
255
256 walk->nbytes = n;
257
258 return ablkcipher_next_fast(req, walk);
259
260set_phys_lowmem:
261 if (err >= 0) {
262 walk->src.page = virt_to_page(src);
263 walk->dst.page = virt_to_page(dst);
264 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
265 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
266 }
267
268 return err;
269}
270
271static int ablkcipher_walk_first(struct ablkcipher_request *req,
272 struct ablkcipher_walk *walk)
273{
274 struct crypto_tfm *tfm = req->base.tfm;
275 unsigned int alignmask;
276
277 alignmask = crypto_tfm_alg_alignmask(tfm);
278 if (WARN_ON_ONCE(in_irq()))
279 return -EDEADLK;
280
281 walk->nbytes = walk->total;
282 if (unlikely(!walk->total))
283 return 0;
284
285 walk->iv_buffer = NULL;
286 walk->iv = req->info;
287 if (unlikely(((unsigned long)walk->iv & alignmask))) {
288 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
289 if (err)
290 return err;
291 }
292
293 scatterwalk_start(&walk->in, walk->in.sg);
294 scatterwalk_start(&walk->out, walk->out.sg);
295
296 return ablkcipher_walk_next(req, walk);
297}
298
299int ablkcipher_walk_phys(struct ablkcipher_request *req,
300 struct ablkcipher_walk *walk)
301{
302 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
303 return ablkcipher_walk_first(req, walk);
304}
305EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
306
307static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
308 unsigned int keylen)
309{
310 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
311 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
312 int ret;
313 u8 *buffer, *alignbuffer;
314 unsigned long absize;
315
316 absize = keylen + alignmask;
317 buffer = kmalloc(absize, GFP_ATOMIC);
318 if (!buffer)
319 return -ENOMEM;
320
321 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
322 memcpy(alignbuffer, key, keylen);
323 ret = cipher->setkey(tfm, alignbuffer, keylen);
324 memset(alignbuffer, 0, keylen);
325 kfree(buffer);
326 return ret;
327}
328
329static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
330 unsigned int keylen)
331{
332 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
333 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
334
335 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
336 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
337 return -EINVAL;
338 }
339
340 if ((unsigned long)key & alignmask)
341 return setkey_unaligned(tfm, key, keylen);
342
343 return cipher->setkey(tfm, key, keylen);
344}
345
346static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
347 u32 mask)
348{
349 return alg->cra_ctxsize;
350}
351
352int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
353{
354 return crypto_ablkcipher_encrypt(&req->creq);
355}
356
357int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
358{
359 return crypto_ablkcipher_decrypt(&req->creq);
360}
361
362static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
363 u32 mask)
364{
365 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
366 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
367
368 if (alg->ivsize > PAGE_SIZE / 8)
369 return -EINVAL;
370
371 crt->setkey = setkey;
372 crt->encrypt = alg->encrypt;
373 crt->decrypt = alg->decrypt;
374 if (!alg->ivsize) {
375 crt->givencrypt = skcipher_null_givencrypt;
376 crt->givdecrypt = skcipher_null_givdecrypt;
377 }
378 crt->base = __crypto_ablkcipher_cast(tfm);
379 crt->ivsize = alg->ivsize;
380
381 return 0;
382}
383
384static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
385 __attribute__ ((unused));
386static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
387{
388 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
389
390 seq_printf(m, "type : ablkcipher\n");
391 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
392 "yes" : "no");
393 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
394 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
395 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
396 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
397 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
398}
399
400const struct crypto_type crypto_ablkcipher_type = {
401 .ctxsize = crypto_ablkcipher_ctxsize,
402 .init = crypto_init_ablkcipher_ops,
403#ifdef CONFIG_PROC_FS
404 .show = crypto_ablkcipher_show,
405#endif
406};
407EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
408
409static int no_givdecrypt(struct skcipher_givcrypt_request *req)
410{
411 return -ENOSYS;
412}
413
414static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
415 u32 mask)
416{
417 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
418 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
419
420 if (alg->ivsize > PAGE_SIZE / 8)
421 return -EINVAL;
422
423 crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
424 alg->setkey : setkey;
425 crt->encrypt = alg->encrypt;
426 crt->decrypt = alg->decrypt;
427 crt->givencrypt = alg->givencrypt;
428 crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
429 crt->base = __crypto_ablkcipher_cast(tfm);
430 crt->ivsize = alg->ivsize;
431
432 return 0;
433}
434
435static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
436 __attribute__ ((unused));
437static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
438{
439 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
440
441 seq_printf(m, "type : givcipher\n");
442 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
443 "yes" : "no");
444 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
445 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
446 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
447 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
448 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
449}
450
451const struct crypto_type crypto_givcipher_type = {
452 .ctxsize = crypto_ablkcipher_ctxsize,
453 .init = crypto_init_givcipher_ops,
454#ifdef CONFIG_PROC_FS
455 .show = crypto_givcipher_show,
456#endif
457};
458EXPORT_SYMBOL_GPL(crypto_givcipher_type);
459
460const char *crypto_default_geniv(const struct crypto_alg *alg)
461{
462 if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
463 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
464 alg->cra_ablkcipher.ivsize) !=
465 alg->cra_blocksize)
466 return "chainiv";
467
468 return alg->cra_flags & CRYPTO_ALG_ASYNC ?
469 "eseqiv" : skcipher_default_geniv;
470}
471
472static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
473{
474 struct rtattr *tb[3];
475 struct {
476 struct rtattr attr;
477 struct crypto_attr_type data;
478 } ptype;
479 struct {
480 struct rtattr attr;
481 struct crypto_attr_alg data;
482 } palg;
483 struct crypto_template *tmpl;
484 struct crypto_instance *inst;
485 struct crypto_alg *larval;
486 const char *geniv;
487 int err;
488
489 larval = crypto_larval_lookup(alg->cra_driver_name,
490 (type & ~CRYPTO_ALG_TYPE_MASK) |
491 CRYPTO_ALG_TYPE_GIVCIPHER,
492 mask | CRYPTO_ALG_TYPE_MASK);
493 err = PTR_ERR(larval);
494 if (IS_ERR(larval))
495 goto out;
496
497 err = -EAGAIN;
498 if (!crypto_is_larval(larval))
499 goto drop_larval;
500
501 ptype.attr.rta_len = sizeof(ptype);
502 ptype.attr.rta_type = CRYPTOA_TYPE;
503 ptype.data.type = type | CRYPTO_ALG_GENIV;
504 /* GENIV tells the template that we're making a default geniv. */
505 ptype.data.mask = mask | CRYPTO_ALG_GENIV;
506 tb[0] = &ptype.attr;
507
508 palg.attr.rta_len = sizeof(palg);
509 palg.attr.rta_type = CRYPTOA_ALG;
510 /* Must use the exact name to locate ourselves. */
511 memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
512 tb[1] = &palg.attr;
513
514 tb[2] = NULL;
515
516 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
517 CRYPTO_ALG_TYPE_BLKCIPHER)
518 geniv = alg->cra_blkcipher.geniv;
519 else
520 geniv = alg->cra_ablkcipher.geniv;
521
522 if (!geniv)
523 geniv = crypto_default_geniv(alg);
524
525 tmpl = crypto_lookup_template(geniv);
526 err = -ENOENT;
527 if (!tmpl)
528 goto kill_larval;
529
530 inst = tmpl->alloc(tb);
531 err = PTR_ERR(inst);
532 if (IS_ERR(inst))
533 goto put_tmpl;
534
535 if ((err = crypto_register_instance(tmpl, inst))) {
536 tmpl->free(inst);
537 goto put_tmpl;
538 }
539
540 /* Redo the lookup to use the instance we just registered. */
541 err = -EAGAIN;
542
543put_tmpl:
544 crypto_tmpl_put(tmpl);
545kill_larval:
546 crypto_larval_kill(larval);
547drop_larval:
548 crypto_mod_put(larval);
549out:
550 crypto_mod_put(alg);
551 return err;
552}
553
554static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
555 u32 mask)
556{
557 struct crypto_alg *alg;
558
559 alg = crypto_alg_mod_lookup(name, type, mask);
560 if (IS_ERR(alg))
561 return alg;
562
563 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
564 CRYPTO_ALG_TYPE_GIVCIPHER)
565 return alg;
566
567 if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
568 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
569 alg->cra_ablkcipher.ivsize))
570 return alg;
571
572 crypto_mod_put(alg);
573 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
574 mask & ~CRYPTO_ALG_TESTED);
575 if (IS_ERR(alg))
576 return alg;
577
578 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
579 CRYPTO_ALG_TYPE_GIVCIPHER) {
580 if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
581 crypto_mod_put(alg);
582 alg = ERR_PTR(-ENOENT);
583 }
584 return alg;
585 }
586
587 BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
588 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
589 alg->cra_ablkcipher.ivsize));
590
591 return ERR_PTR(crypto_givcipher_default(alg, type, mask));
592}
593
594int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
595 u32 type, u32 mask)
596{
597 struct crypto_alg *alg;
598 int err;
599
600 type = crypto_skcipher_type(type);
601 mask = crypto_skcipher_mask(mask);
602
603 alg = crypto_lookup_skcipher(name, type, mask);
604 if (IS_ERR(alg))
605 return PTR_ERR(alg);
606
607 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
608 crypto_mod_put(alg);
609 return err;
610}
611EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
612
613struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
614 u32 type, u32 mask)
615{
616 struct crypto_tfm *tfm;
617 int err;
618
619 type = crypto_skcipher_type(type);
620 mask = crypto_skcipher_mask(mask);
621
622 for (;;) {
623 struct crypto_alg *alg;
624
625 alg = crypto_lookup_skcipher(alg_name, type, mask);
626 if (IS_ERR(alg)) {
627 err = PTR_ERR(alg);
628 goto err;
629 }
630
631 tfm = __crypto_alloc_tfm(alg, type, mask);
632 if (!IS_ERR(tfm))
633 return __crypto_ablkcipher_cast(tfm);
634
635 crypto_mod_put(alg);
636 err = PTR_ERR(tfm);
637
638err:
639 if (err != -EAGAIN)
640 break;
641 if (signal_pending(current)) {
642 err = -EINTR;
643 break;
644 }
645 }
646
647 return ERR_PTR(err);
648}
649EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
650
651static int __init skcipher_module_init(void)
652{
653 skcipher_default_geniv = num_possible_cpus() > 1 ?
654 "eseqiv" : "chainiv";
655 return 0;
656}
657
658static void skcipher_module_exit(void)
659{
660}
661
662module_init(skcipher_module_init);
663module_exit(skcipher_module_exit);