Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _CRYPTO_ECB_CBC_HELPER_H
4#define _CRYPTO_ECB_CBC_HELPER_H
5
6#include <crypto/internal/skcipher.h>
7#include <asm/fpu/api.h>
8
9/*
10 * Mode helpers to instantiate parameterized skcipher ECB/CBC modes without
11 * having to rely on indirect calls and retpolines.
12 */
13
14#define ECB_WALK_START(req, bsize, fpu_blocks) do { \
15 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); \
16 const int __fpu_blocks = (fpu_blocks); \
17 const int __bsize = (bsize); \
18 struct skcipher_walk walk; \
19 int err = skcipher_walk_virt(&walk, (req), false); \
20 while (walk.nbytes > 0) { \
21 unsigned int nbytes = walk.nbytes; \
22 bool do_fpu = __fpu_blocks != -1 && \
23 nbytes >= __fpu_blocks * __bsize; \
24 const u8 *src = walk.src.virt.addr; \
25 u8 *dst = walk.dst.virt.addr; \
26 u8 __maybe_unused buf[(bsize)]; \
27 if (do_fpu) kernel_fpu_begin()
28
29#define CBC_WALK_START(req, bsize, fpu_blocks) \
30 ECB_WALK_START(req, bsize, fpu_blocks)
31
32#define ECB_WALK_ADVANCE(blocks) do { \
33 dst += (blocks) * __bsize; \
34 src += (blocks) * __bsize; \
35 nbytes -= (blocks) * __bsize; \
36} while (0)
37
38#define ECB_BLOCK(blocks, func) do { \
39 const int __blocks = (blocks); \
40 if (do_fpu && __blocks < __fpu_blocks) { \
41 kernel_fpu_end(); \
42 do_fpu = false; \
43 } \
44 while (nbytes >= __blocks * __bsize) { \
45 (func)(ctx, dst, src); \
46 ECB_WALK_ADVANCE(blocks); \
47 } \
48} while (0)
49
50#define CBC_ENC_BLOCK(func) do { \
51 const u8 *__iv = walk.iv; \
52 while (nbytes >= __bsize) { \
53 crypto_xor_cpy(dst, src, __iv, __bsize); \
54 (func)(ctx, dst, dst); \
55 __iv = dst; \
56 ECB_WALK_ADVANCE(1); \
57 } \
58 memcpy(walk.iv, __iv, __bsize); \
59} while (0)
60
61#define CBC_DEC_BLOCK(blocks, func) do { \
62 const int __blocks = (blocks); \
63 if (do_fpu && __blocks < __fpu_blocks) { \
64 kernel_fpu_end(); \
65 do_fpu = false; \
66 } \
67 while (nbytes >= __blocks * __bsize) { \
68 const u8 *__iv = src + ((blocks) - 1) * __bsize; \
69 if (dst == src) \
70 __iv = memcpy(buf, __iv, __bsize); \
71 (func)(ctx, dst, src); \
72 crypto_xor(dst, walk.iv, __bsize); \
73 memcpy(walk.iv, __iv, __bsize); \
74 ECB_WALK_ADVANCE(blocks); \
75 } \
76} while (0)
77
78#define ECB_WALK_END() \
79 if (do_fpu) kernel_fpu_end(); \
80 err = skcipher_walk_done(&walk, nbytes); \
81 } \
82 return err; \
83} while (0)
84
85#define CBC_WALK_END() ECB_WALK_END()
86
87#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _CRYPTO_ECB_CBC_HELPER_H
4#define _CRYPTO_ECB_CBC_HELPER_H
5
6#include <crypto/internal/skcipher.h>
7#include <asm/fpu/api.h>
8
9/*
10 * Mode helpers to instantiate parameterized skcipher ECB/CBC modes without
11 * having to rely on indirect calls and retpolines.
12 */
13
14#define ECB_WALK_START(req, bsize, fpu_blocks) do { \
15 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); \
16 const int __bsize = (bsize); \
17 struct skcipher_walk walk; \
18 int err = skcipher_walk_virt(&walk, (req), false); \
19 while (walk.nbytes > 0) { \
20 unsigned int nbytes = walk.nbytes; \
21 bool do_fpu = (fpu_blocks) != -1 && \
22 nbytes >= (fpu_blocks) * __bsize; \
23 const u8 *src = walk.src.virt.addr; \
24 u8 *dst = walk.dst.virt.addr; \
25 u8 __maybe_unused buf[(bsize)]; \
26 if (do_fpu) kernel_fpu_begin()
27
28#define CBC_WALK_START(req, bsize, fpu_blocks) \
29 ECB_WALK_START(req, bsize, fpu_blocks)
30
31#define ECB_WALK_ADVANCE(blocks) do { \
32 dst += (blocks) * __bsize; \
33 src += (blocks) * __bsize; \
34 nbytes -= (blocks) * __bsize; \
35} while (0)
36
37#define ECB_BLOCK(blocks, func) do { \
38 while (nbytes >= (blocks) * __bsize) { \
39 (func)(ctx, dst, src); \
40 ECB_WALK_ADVANCE(blocks); \
41 } \
42} while (0)
43
44#define CBC_ENC_BLOCK(func) do { \
45 const u8 *__iv = walk.iv; \
46 while (nbytes >= __bsize) { \
47 crypto_xor_cpy(dst, src, __iv, __bsize); \
48 (func)(ctx, dst, dst); \
49 __iv = dst; \
50 ECB_WALK_ADVANCE(1); \
51 } \
52 memcpy(walk.iv, __iv, __bsize); \
53} while (0)
54
55#define CBC_DEC_BLOCK(blocks, func) do { \
56 while (nbytes >= (blocks) * __bsize) { \
57 const u8 *__iv = src + ((blocks) - 1) * __bsize; \
58 if (dst == src) \
59 __iv = memcpy(buf, __iv, __bsize); \
60 (func)(ctx, dst, src); \
61 crypto_xor(dst, walk.iv, __bsize); \
62 memcpy(walk.iv, __iv, __bsize); \
63 ECB_WALK_ADVANCE(blocks); \
64 } \
65} while (0)
66
67#define ECB_WALK_END() \
68 if (do_fpu) kernel_fpu_end(); \
69 err = skcipher_walk_done(&walk, nbytes); \
70 } \
71 return err; \
72} while (0)
73
74#define CBC_WALK_END() ECB_WALK_END()
75
76#endif