Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Asynchronous block chaining cipher operations.
4 *
5 * This is the asynchronous version of blkcipher.c indicating completion
6 * via a callback.
7 *
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 */
10
11#include <crypto/internal/skcipher.h>
12#include <linux/err.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/seq_file.h>
16#include <linux/cryptouser.h>
17#include <linux/compiler.h>
18#include <net/netlink.h>
19
20#include <crypto/scatterwalk.h>
21
22#include "internal.h"
23
24struct ablkcipher_buffer {
25 struct list_head entry;
26 struct scatter_walk dst;
27 unsigned int len;
28 void *data;
29};
30
31enum {
32 ABLKCIPHER_WALK_SLOW = 1 << 0,
33};
34
35static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
36{
37 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
38}
39
40void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
41{
42 struct ablkcipher_buffer *p, *tmp;
43
44 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
45 ablkcipher_buffer_write(p);
46 list_del(&p->entry);
47 kfree(p);
48 }
49}
50EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
51
52static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
53 struct ablkcipher_buffer *p)
54{
55 p->dst = walk->out;
56 list_add_tail(&p->entry, &walk->buffers);
57}
58
59/* Get a spot of the specified length that does not straddle a page.
60 * The caller needs to ensure that there is enough space for this operation.
61 */
62static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
63{
64 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
65
66 return max(start, end_page);
67}
68
69static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
70 unsigned int n)
71{
72 for (;;) {
73 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
74
75 if (len_this_page > n)
76 len_this_page = n;
77 scatterwalk_advance(&walk->out, n);
78 if (n == len_this_page)
79 break;
80 n -= len_this_page;
81 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
82 }
83}
84
85static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
86 unsigned int n)
87{
88 scatterwalk_advance(&walk->in, n);
89 scatterwalk_advance(&walk->out, n);
90}
91
92static int ablkcipher_walk_next(struct ablkcipher_request *req,
93 struct ablkcipher_walk *walk);
94
95int ablkcipher_walk_done(struct ablkcipher_request *req,
96 struct ablkcipher_walk *walk, int err)
97{
98 struct crypto_tfm *tfm = req->base.tfm;
99 unsigned int n; /* bytes processed */
100 bool more;
101
102 if (unlikely(err < 0))
103 goto finish;
104
105 n = walk->nbytes - err;
106 walk->total -= n;
107 more = (walk->total != 0);
108
109 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
110 ablkcipher_done_fast(walk, n);
111 } else {
112 if (WARN_ON(err)) {
113 /* unexpected case; didn't process all bytes */
114 err = -EINVAL;
115 goto finish;
116 }
117 ablkcipher_done_slow(walk, n);
118 }
119
120 scatterwalk_done(&walk->in, 0, more);
121 scatterwalk_done(&walk->out, 1, more);
122
123 if (more) {
124 crypto_yield(req->base.flags);
125 return ablkcipher_walk_next(req, walk);
126 }
127 err = 0;
128finish:
129 walk->nbytes = 0;
130 if (walk->iv != req->info)
131 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
132 kfree(walk->iv_buffer);
133 return err;
134}
135EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
136
137static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
138 struct ablkcipher_walk *walk,
139 unsigned int bsize,
140 unsigned int alignmask,
141 void **src_p, void **dst_p)
142{
143 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
144 struct ablkcipher_buffer *p;
145 void *src, *dst, *base;
146 unsigned int n;
147
148 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
149 n += (aligned_bsize * 3 - (alignmask + 1) +
150 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
151
152 p = kmalloc(n, GFP_ATOMIC);
153 if (!p)
154 return ablkcipher_walk_done(req, walk, -ENOMEM);
155
156 base = p + 1;
157
158 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
159 src = dst = ablkcipher_get_spot(dst, bsize);
160
161 p->len = bsize;
162 p->data = dst;
163
164 scatterwalk_copychunks(src, &walk->in, bsize, 0);
165
166 ablkcipher_queue_write(walk, p);
167
168 walk->nbytes = bsize;
169 walk->flags |= ABLKCIPHER_WALK_SLOW;
170
171 *src_p = src;
172 *dst_p = dst;
173
174 return 0;
175}
176
177static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
178 struct crypto_tfm *tfm,
179 unsigned int alignmask)
180{
181 unsigned bs = walk->blocksize;
182 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
183 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
184 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
185 (alignmask + 1);
186 u8 *iv;
187
188 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
189 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
190 if (!walk->iv_buffer)
191 return -ENOMEM;
192
193 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
194 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
195 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
196 iv = ablkcipher_get_spot(iv, ivsize);
197
198 walk->iv = memcpy(iv, walk->iv, ivsize);
199 return 0;
200}
201
202static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
203 struct ablkcipher_walk *walk)
204{
205 walk->src.page = scatterwalk_page(&walk->in);
206 walk->src.offset = offset_in_page(walk->in.offset);
207 walk->dst.page = scatterwalk_page(&walk->out);
208 walk->dst.offset = offset_in_page(walk->out.offset);
209
210 return 0;
211}
212
213static int ablkcipher_walk_next(struct ablkcipher_request *req,
214 struct ablkcipher_walk *walk)
215{
216 struct crypto_tfm *tfm = req->base.tfm;
217 unsigned int alignmask, bsize, n;
218 void *src, *dst;
219 int err;
220
221 alignmask = crypto_tfm_alg_alignmask(tfm);
222 n = walk->total;
223 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
224 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
225 return ablkcipher_walk_done(req, walk, -EINVAL);
226 }
227
228 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
229 src = dst = NULL;
230
231 bsize = min(walk->blocksize, n);
232 n = scatterwalk_clamp(&walk->in, n);
233 n = scatterwalk_clamp(&walk->out, n);
234
235 if (n < bsize ||
236 !scatterwalk_aligned(&walk->in, alignmask) ||
237 !scatterwalk_aligned(&walk->out, alignmask)) {
238 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
239 &src, &dst);
240 goto set_phys_lowmem;
241 }
242
243 walk->nbytes = n;
244
245 return ablkcipher_next_fast(req, walk);
246
247set_phys_lowmem:
248 if (err >= 0) {
249 walk->src.page = virt_to_page(src);
250 walk->dst.page = virt_to_page(dst);
251 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
252 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
253 }
254
255 return err;
256}
257
258static int ablkcipher_walk_first(struct ablkcipher_request *req,
259 struct ablkcipher_walk *walk)
260{
261 struct crypto_tfm *tfm = req->base.tfm;
262 unsigned int alignmask;
263
264 alignmask = crypto_tfm_alg_alignmask(tfm);
265 if (WARN_ON_ONCE(in_irq()))
266 return -EDEADLK;
267
268 walk->iv = req->info;
269 walk->nbytes = walk->total;
270 if (unlikely(!walk->total))
271 return 0;
272
273 walk->iv_buffer = NULL;
274 if (unlikely(((unsigned long)walk->iv & alignmask))) {
275 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
276
277 if (err)
278 return err;
279 }
280
281 scatterwalk_start(&walk->in, walk->in.sg);
282 scatterwalk_start(&walk->out, walk->out.sg);
283
284 return ablkcipher_walk_next(req, walk);
285}
286
287int ablkcipher_walk_phys(struct ablkcipher_request *req,
288 struct ablkcipher_walk *walk)
289{
290 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
291 return ablkcipher_walk_first(req, walk);
292}
293EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
294
295static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
296 unsigned int keylen)
297{
298 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
299 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
300 int ret;
301 u8 *buffer, *alignbuffer;
302 unsigned long absize;
303
304 absize = keylen + alignmask;
305 buffer = kmalloc(absize, GFP_ATOMIC);
306 if (!buffer)
307 return -ENOMEM;
308
309 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
310 memcpy(alignbuffer, key, keylen);
311 ret = cipher->setkey(tfm, alignbuffer, keylen);
312 memset(alignbuffer, 0, keylen);
313 kfree(buffer);
314 return ret;
315}
316
317static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
318 unsigned int keylen)
319{
320 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
321 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
322
323 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
324 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
325 return -EINVAL;
326 }
327
328 if ((unsigned long)key & alignmask)
329 return setkey_unaligned(tfm, key, keylen);
330
331 return cipher->setkey(tfm, key, keylen);
332}
333
334static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
335 u32 mask)
336{
337 return alg->cra_ctxsize;
338}
339
340static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
341 u32 mask)
342{
343 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
344 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
345
346 if (alg->ivsize > PAGE_SIZE / 8)
347 return -EINVAL;
348
349 crt->setkey = setkey;
350 crt->encrypt = alg->encrypt;
351 crt->decrypt = alg->decrypt;
352 crt->base = __crypto_ablkcipher_cast(tfm);
353 crt->ivsize = alg->ivsize;
354
355 return 0;
356}
357
358#ifdef CONFIG_NET
359static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
360{
361 struct crypto_report_blkcipher rblkcipher;
362
363 memset(&rblkcipher, 0, sizeof(rblkcipher));
364
365 strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
366 strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
367
368 rblkcipher.blocksize = alg->cra_blocksize;
369 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
370 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
371 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
372
373 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
374 sizeof(rblkcipher), &rblkcipher);
375}
376#else
377static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
378{
379 return -ENOSYS;
380}
381#endif
382
383static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
384 __maybe_unused;
385static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
386{
387 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
388
389 seq_printf(m, "type : ablkcipher\n");
390 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
391 "yes" : "no");
392 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
393 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
394 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
395 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
396 seq_printf(m, "geniv : <default>\n");
397}
398
399const struct crypto_type crypto_ablkcipher_type = {
400 .ctxsize = crypto_ablkcipher_ctxsize,
401 .init = crypto_init_ablkcipher_ops,
402#ifdef CONFIG_PROC_FS
403 .show = crypto_ablkcipher_show,
404#endif
405 .report = crypto_ablkcipher_report,
406};
407EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
1/*
2 * Asynchronous block chaining cipher operations.
3 *
4 * This is the asynchronous version of blkcipher.c indicating completion
5 * via a callback.
6 *
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/internal/skcipher.h>
17#include <linux/err.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/seq_file.h>
21#include <linux/cryptouser.h>
22#include <net/netlink.h>
23
24#include <crypto/scatterwalk.h>
25
26#include "internal.h"
27
28struct ablkcipher_buffer {
29 struct list_head entry;
30 struct scatter_walk dst;
31 unsigned int len;
32 void *data;
33};
34
35enum {
36 ABLKCIPHER_WALK_SLOW = 1 << 0,
37};
38
39static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
40{
41 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
42}
43
44void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
45{
46 struct ablkcipher_buffer *p, *tmp;
47
48 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
49 ablkcipher_buffer_write(p);
50 list_del(&p->entry);
51 kfree(p);
52 }
53}
54EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
55
56static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
57 struct ablkcipher_buffer *p)
58{
59 p->dst = walk->out;
60 list_add_tail(&p->entry, &walk->buffers);
61}
62
63/* Get a spot of the specified length that does not straddle a page.
64 * The caller needs to ensure that there is enough space for this operation.
65 */
66static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
67{
68 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
69
70 return max(start, end_page);
71}
72
73static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
74 unsigned int bsize)
75{
76 unsigned int n = bsize;
77
78 for (;;) {
79 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
80
81 if (len_this_page > n)
82 len_this_page = n;
83 scatterwalk_advance(&walk->out, n);
84 if (n == len_this_page)
85 break;
86 n -= len_this_page;
87 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
88 }
89
90 return bsize;
91}
92
93static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
94 unsigned int n)
95{
96 scatterwalk_advance(&walk->in, n);
97 scatterwalk_advance(&walk->out, n);
98
99 return n;
100}
101
102static int ablkcipher_walk_next(struct ablkcipher_request *req,
103 struct ablkcipher_walk *walk);
104
105int ablkcipher_walk_done(struct ablkcipher_request *req,
106 struct ablkcipher_walk *walk, int err)
107{
108 struct crypto_tfm *tfm = req->base.tfm;
109 unsigned int nbytes = 0;
110
111 if (likely(err >= 0)) {
112 unsigned int n = walk->nbytes - err;
113
114 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
115 n = ablkcipher_done_fast(walk, n);
116 else if (WARN_ON(err)) {
117 err = -EINVAL;
118 goto err;
119 } else
120 n = ablkcipher_done_slow(walk, n);
121
122 nbytes = walk->total - n;
123 err = 0;
124 }
125
126 scatterwalk_done(&walk->in, 0, nbytes);
127 scatterwalk_done(&walk->out, 1, nbytes);
128
129err:
130 walk->total = nbytes;
131 walk->nbytes = nbytes;
132
133 if (nbytes) {
134 crypto_yield(req->base.flags);
135 return ablkcipher_walk_next(req, walk);
136 }
137
138 if (walk->iv != req->info)
139 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
140 kfree(walk->iv_buffer);
141
142 return err;
143}
144EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
145
146static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
147 struct ablkcipher_walk *walk,
148 unsigned int bsize,
149 unsigned int alignmask,
150 void **src_p, void **dst_p)
151{
152 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
153 struct ablkcipher_buffer *p;
154 void *src, *dst, *base;
155 unsigned int n;
156
157 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
158 n += (aligned_bsize * 3 - (alignmask + 1) +
159 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
160
161 p = kmalloc(n, GFP_ATOMIC);
162 if (!p)
163 return ablkcipher_walk_done(req, walk, -ENOMEM);
164
165 base = p + 1;
166
167 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
168 src = dst = ablkcipher_get_spot(dst, bsize);
169
170 p->len = bsize;
171 p->data = dst;
172
173 scatterwalk_copychunks(src, &walk->in, bsize, 0);
174
175 ablkcipher_queue_write(walk, p);
176
177 walk->nbytes = bsize;
178 walk->flags |= ABLKCIPHER_WALK_SLOW;
179
180 *src_p = src;
181 *dst_p = dst;
182
183 return 0;
184}
185
186static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
187 struct crypto_tfm *tfm,
188 unsigned int alignmask)
189{
190 unsigned bs = walk->blocksize;
191 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
192 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
193 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
194 (alignmask + 1);
195 u8 *iv;
196
197 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
198 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
199 if (!walk->iv_buffer)
200 return -ENOMEM;
201
202 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
203 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
204 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
205 iv = ablkcipher_get_spot(iv, ivsize);
206
207 walk->iv = memcpy(iv, walk->iv, ivsize);
208 return 0;
209}
210
211static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
212 struct ablkcipher_walk *walk)
213{
214 walk->src.page = scatterwalk_page(&walk->in);
215 walk->src.offset = offset_in_page(walk->in.offset);
216 walk->dst.page = scatterwalk_page(&walk->out);
217 walk->dst.offset = offset_in_page(walk->out.offset);
218
219 return 0;
220}
221
222static int ablkcipher_walk_next(struct ablkcipher_request *req,
223 struct ablkcipher_walk *walk)
224{
225 struct crypto_tfm *tfm = req->base.tfm;
226 unsigned int alignmask, bsize, n;
227 void *src, *dst;
228 int err;
229
230 alignmask = crypto_tfm_alg_alignmask(tfm);
231 n = walk->total;
232 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
233 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
234 return ablkcipher_walk_done(req, walk, -EINVAL);
235 }
236
237 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
238 src = dst = NULL;
239
240 bsize = min(walk->blocksize, n);
241 n = scatterwalk_clamp(&walk->in, n);
242 n = scatterwalk_clamp(&walk->out, n);
243
244 if (n < bsize ||
245 !scatterwalk_aligned(&walk->in, alignmask) ||
246 !scatterwalk_aligned(&walk->out, alignmask)) {
247 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
248 &src, &dst);
249 goto set_phys_lowmem;
250 }
251
252 walk->nbytes = n;
253
254 return ablkcipher_next_fast(req, walk);
255
256set_phys_lowmem:
257 if (err >= 0) {
258 walk->src.page = virt_to_page(src);
259 walk->dst.page = virt_to_page(dst);
260 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
261 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
262 }
263
264 return err;
265}
266
267static int ablkcipher_walk_first(struct ablkcipher_request *req,
268 struct ablkcipher_walk *walk)
269{
270 struct crypto_tfm *tfm = req->base.tfm;
271 unsigned int alignmask;
272
273 alignmask = crypto_tfm_alg_alignmask(tfm);
274 if (WARN_ON_ONCE(in_irq()))
275 return -EDEADLK;
276
277 walk->iv = req->info;
278 walk->nbytes = walk->total;
279 if (unlikely(!walk->total))
280 return 0;
281
282 walk->iv_buffer = NULL;
283 if (unlikely(((unsigned long)walk->iv & alignmask))) {
284 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
285
286 if (err)
287 return err;
288 }
289
290 scatterwalk_start(&walk->in, walk->in.sg);
291 scatterwalk_start(&walk->out, walk->out.sg);
292
293 return ablkcipher_walk_next(req, walk);
294}
295
296int ablkcipher_walk_phys(struct ablkcipher_request *req,
297 struct ablkcipher_walk *walk)
298{
299 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
300 return ablkcipher_walk_first(req, walk);
301}
302EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
303
304static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
305 unsigned int keylen)
306{
307 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
308 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
309 int ret;
310 u8 *buffer, *alignbuffer;
311 unsigned long absize;
312
313 absize = keylen + alignmask;
314 buffer = kmalloc(absize, GFP_ATOMIC);
315 if (!buffer)
316 return -ENOMEM;
317
318 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
319 memcpy(alignbuffer, key, keylen);
320 ret = cipher->setkey(tfm, alignbuffer, keylen);
321 memset(alignbuffer, 0, keylen);
322 kfree(buffer);
323 return ret;
324}
325
326static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
327 unsigned int keylen)
328{
329 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
330 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
331
332 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
333 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
334 return -EINVAL;
335 }
336
337 if ((unsigned long)key & alignmask)
338 return setkey_unaligned(tfm, key, keylen);
339
340 return cipher->setkey(tfm, key, keylen);
341}
342
343static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
344 u32 mask)
345{
346 return alg->cra_ctxsize;
347}
348
349static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
350 u32 mask)
351{
352 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
353 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
354
355 if (alg->ivsize > PAGE_SIZE / 8)
356 return -EINVAL;
357
358 crt->setkey = setkey;
359 crt->encrypt = alg->encrypt;
360 crt->decrypt = alg->decrypt;
361 crt->base = __crypto_ablkcipher_cast(tfm);
362 crt->ivsize = alg->ivsize;
363
364 return 0;
365}
366
367#ifdef CONFIG_NET
368static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
369{
370 struct crypto_report_blkcipher rblkcipher;
371
372 strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
373 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
374 sizeof(rblkcipher.geniv));
375
376 rblkcipher.blocksize = alg->cra_blocksize;
377 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
378 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
379 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
380
381 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
382 sizeof(struct crypto_report_blkcipher), &rblkcipher))
383 goto nla_put_failure;
384 return 0;
385
386nla_put_failure:
387 return -EMSGSIZE;
388}
389#else
390static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
391{
392 return -ENOSYS;
393}
394#endif
395
396static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
397 __attribute__ ((unused));
398static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
399{
400 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
401
402 seq_printf(m, "type : ablkcipher\n");
403 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
404 "yes" : "no");
405 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
406 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
407 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
408 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
409 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
410}
411
412const struct crypto_type crypto_ablkcipher_type = {
413 .ctxsize = crypto_ablkcipher_ctxsize,
414 .init = crypto_init_ablkcipher_ops,
415#ifdef CONFIG_PROC_FS
416 .show = crypto_ablkcipher_show,
417#endif
418 .report = crypto_ablkcipher_report,
419};
420EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
421
422static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
423 u32 mask)
424{
425 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
426 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
427
428 if (alg->ivsize > PAGE_SIZE / 8)
429 return -EINVAL;
430
431 crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
432 alg->setkey : setkey;
433 crt->encrypt = alg->encrypt;
434 crt->decrypt = alg->decrypt;
435 crt->base = __crypto_ablkcipher_cast(tfm);
436 crt->ivsize = alg->ivsize;
437
438 return 0;
439}
440
441#ifdef CONFIG_NET
442static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
443{
444 struct crypto_report_blkcipher rblkcipher;
445
446 strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
447 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
448 sizeof(rblkcipher.geniv));
449
450 rblkcipher.blocksize = alg->cra_blocksize;
451 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
452 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
453 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
454
455 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
456 sizeof(struct crypto_report_blkcipher), &rblkcipher))
457 goto nla_put_failure;
458 return 0;
459
460nla_put_failure:
461 return -EMSGSIZE;
462}
463#else
464static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
465{
466 return -ENOSYS;
467}
468#endif
469
470static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
471 __attribute__ ((unused));
472static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
473{
474 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
475
476 seq_printf(m, "type : givcipher\n");
477 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
478 "yes" : "no");
479 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
480 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
481 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
482 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
483 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
484}
485
486const struct crypto_type crypto_givcipher_type = {
487 .ctxsize = crypto_ablkcipher_ctxsize,
488 .init = crypto_init_givcipher_ops,
489#ifdef CONFIG_PROC_FS
490 .show = crypto_givcipher_show,
491#endif
492 .report = crypto_givcipher_report,
493};
494EXPORT_SYMBOL_GPL(crypto_givcipher_type);