Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Symmetric key ciphers.
4 *
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8#ifndef _CRYPTO_INTERNAL_SKCIPHER_H
9#define _CRYPTO_INTERNAL_SKCIPHER_H
10
11#include <crypto/algapi.h>
12#include <crypto/skcipher.h>
13#include <linux/list.h>
14#include <linux/types.h>
15
16struct aead_request;
17struct rtattr;
18
19struct skcipher_instance {
20 void (*free)(struct skcipher_instance *inst);
21 union {
22 struct {
23 char head[offsetof(struct skcipher_alg, base)];
24 struct crypto_instance base;
25 } s;
26 struct skcipher_alg alg;
27 };
28};
29
30struct crypto_skcipher_spawn {
31 struct crypto_spawn base;
32};
33
34struct skcipher_walk {
35 union {
36 struct {
37 struct page *page;
38 unsigned long offset;
39 } phys;
40
41 struct {
42 u8 *page;
43 void *addr;
44 } virt;
45 } src, dst;
46
47 struct scatter_walk in;
48 unsigned int nbytes;
49
50 struct scatter_walk out;
51 unsigned int total;
52
53 struct list_head buffers;
54
55 u8 *page;
56 u8 *buffer;
57 u8 *oiv;
58 void *iv;
59
60 unsigned int ivsize;
61
62 int flags;
63 unsigned int blocksize;
64 unsigned int stride;
65 unsigned int alignmask;
66};
67
68static inline struct crypto_instance *skcipher_crypto_instance(
69 struct skcipher_instance *inst)
70{
71 return &inst->s.base;
72}
73
74static inline struct skcipher_instance *skcipher_alg_instance(
75 struct crypto_skcipher *skcipher)
76{
77 return container_of(crypto_skcipher_alg(skcipher),
78 struct skcipher_instance, alg);
79}
80
81static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
82{
83 return crypto_instance_ctx(skcipher_crypto_instance(inst));
84}
85
86static inline void skcipher_request_complete(struct skcipher_request *req, int err)
87{
88 req->base.complete(&req->base, err);
89}
90
91static inline void crypto_set_skcipher_spawn(
92 struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
93{
94 crypto_set_spawn(&spawn->base, inst);
95}
96
97int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
98 u32 type, u32 mask);
99
100static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
101{
102 crypto_drop_spawn(&spawn->base);
103}
104
105static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
106 struct crypto_skcipher_spawn *spawn)
107{
108 return container_of(spawn->base.alg, struct skcipher_alg, base);
109}
110
111static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
112 struct crypto_skcipher_spawn *spawn)
113{
114 return crypto_skcipher_spawn_alg(spawn);
115}
116
117static inline struct crypto_skcipher *crypto_spawn_skcipher(
118 struct crypto_skcipher_spawn *spawn)
119{
120 return crypto_spawn_tfm2(&spawn->base);
121}
122
123static inline void crypto_skcipher_set_reqsize(
124 struct crypto_skcipher *skcipher, unsigned int reqsize)
125{
126 skcipher->reqsize = reqsize;
127}
128
129int crypto_register_skcipher(struct skcipher_alg *alg);
130void crypto_unregister_skcipher(struct skcipher_alg *alg);
131int crypto_register_skciphers(struct skcipher_alg *algs, int count);
132void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
133int skcipher_register_instance(struct crypto_template *tmpl,
134 struct skcipher_instance *inst);
135
136int skcipher_walk_done(struct skcipher_walk *walk, int err);
137int skcipher_walk_virt(struct skcipher_walk *walk,
138 struct skcipher_request *req,
139 bool atomic);
140void skcipher_walk_atomise(struct skcipher_walk *walk);
141int skcipher_walk_async(struct skcipher_walk *walk,
142 struct skcipher_request *req);
143int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
144 bool atomic);
145int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
146 struct aead_request *req, bool atomic);
147int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
148 struct aead_request *req, bool atomic);
149void skcipher_walk_complete(struct skcipher_walk *walk, int err);
150
151static inline void skcipher_walk_abort(struct skcipher_walk *walk)
152{
153 skcipher_walk_done(walk, -ECANCELED);
154}
155
156static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
157 int err)
158{
159 req->base.complete(&req->base, err);
160}
161
162static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
163{
164 return req->base.flags;
165}
166
167static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
168{
169 return crypto_tfm_ctx(&tfm->base);
170}
171
172static inline void *skcipher_request_ctx(struct skcipher_request *req)
173{
174 return req->__ctx;
175}
176
177static inline u32 skcipher_request_flags(struct skcipher_request *req)
178{
179 return req->base.flags;
180}
181
182static inline unsigned int crypto_skcipher_alg_min_keysize(
183 struct skcipher_alg *alg)
184{
185 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
186 CRYPTO_ALG_TYPE_BLKCIPHER)
187 return alg->base.cra_blkcipher.min_keysize;
188
189 if (alg->base.cra_ablkcipher.encrypt)
190 return alg->base.cra_ablkcipher.min_keysize;
191
192 return alg->min_keysize;
193}
194
195static inline unsigned int crypto_skcipher_alg_max_keysize(
196 struct skcipher_alg *alg)
197{
198 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
199 CRYPTO_ALG_TYPE_BLKCIPHER)
200 return alg->base.cra_blkcipher.max_keysize;
201
202 if (alg->base.cra_ablkcipher.encrypt)
203 return alg->base.cra_ablkcipher.max_keysize;
204
205 return alg->max_keysize;
206}
207
208static inline unsigned int crypto_skcipher_alg_chunksize(
209 struct skcipher_alg *alg)
210{
211 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
212 CRYPTO_ALG_TYPE_BLKCIPHER)
213 return alg->base.cra_blocksize;
214
215 if (alg->base.cra_ablkcipher.encrypt)
216 return alg->base.cra_blocksize;
217
218 return alg->chunksize;
219}
220
221static inline unsigned int crypto_skcipher_alg_walksize(
222 struct skcipher_alg *alg)
223{
224 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
225 CRYPTO_ALG_TYPE_BLKCIPHER)
226 return alg->base.cra_blocksize;
227
228 if (alg->base.cra_ablkcipher.encrypt)
229 return alg->base.cra_blocksize;
230
231 return alg->walksize;
232}
233
234/**
235 * crypto_skcipher_chunksize() - obtain chunk size
236 * @tfm: cipher handle
237 *
238 * The block size is set to one for ciphers such as CTR. However,
239 * you still need to provide incremental updates in multiples of
240 * the underlying block size as the IV does not have sub-block
241 * granularity. This is known in this API as the chunk size.
242 *
243 * Return: chunk size in bytes
244 */
245static inline unsigned int crypto_skcipher_chunksize(
246 struct crypto_skcipher *tfm)
247{
248 return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
249}
250
251/**
252 * crypto_skcipher_walksize() - obtain walk size
253 * @tfm: cipher handle
254 *
255 * In some cases, algorithms can only perform optimally when operating on
256 * multiple blocks in parallel. This is reflected by the walksize, which
257 * must be a multiple of the chunksize (or equal if the concern does not
258 * apply)
259 *
260 * Return: walk size in bytes
261 */
262static inline unsigned int crypto_skcipher_walksize(
263 struct crypto_skcipher *tfm)
264{
265 return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
266}
267
268/* Helpers for simple block cipher modes of operation */
269struct skcipher_ctx_simple {
270 struct crypto_cipher *cipher; /* underlying block cipher */
271};
272static inline struct crypto_cipher *
273skcipher_cipher_simple(struct crypto_skcipher *tfm)
274{
275 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
276
277 return ctx->cipher;
278}
279struct skcipher_instance *
280skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
281 struct crypto_alg **cipher_alg_ret);
282
283#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
284
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Symmetric key ciphers.
4 *
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8#ifndef _CRYPTO_INTERNAL_SKCIPHER_H
9#define _CRYPTO_INTERNAL_SKCIPHER_H
10
11#include <crypto/algapi.h>
12#include <crypto/internal/cipher.h>
13#include <crypto/skcipher.h>
14#include <linux/list.h>
15#include <linux/types.h>
16
17/*
18 * Set this if your algorithm is sync but needs a reqsize larger
19 * than MAX_SYNC_SKCIPHER_REQSIZE.
20 *
21 * Reuse bit that is specific to hash algorithms.
22 */
23#define CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE CRYPTO_ALG_OPTIONAL_KEY
24
25struct aead_request;
26struct rtattr;
27
28struct skcipher_instance {
29 void (*free)(struct skcipher_instance *inst);
30 union {
31 struct {
32 char head[offsetof(struct skcipher_alg, base)];
33 struct crypto_instance base;
34 } s;
35 struct skcipher_alg alg;
36 };
37};
38
39struct crypto_skcipher_spawn {
40 struct crypto_spawn base;
41};
42
43struct skcipher_walk {
44 union {
45 struct {
46 struct page *page;
47 unsigned long offset;
48 } phys;
49
50 struct {
51 u8 *page;
52 void *addr;
53 } virt;
54 } src, dst;
55
56 struct scatter_walk in;
57 unsigned int nbytes;
58
59 struct scatter_walk out;
60 unsigned int total;
61
62 struct list_head buffers;
63
64 u8 *page;
65 u8 *buffer;
66 u8 *oiv;
67 void *iv;
68
69 unsigned int ivsize;
70
71 int flags;
72 unsigned int blocksize;
73 unsigned int stride;
74 unsigned int alignmask;
75};
76
77static inline struct crypto_instance *skcipher_crypto_instance(
78 struct skcipher_instance *inst)
79{
80 return &inst->s.base;
81}
82
83static inline struct skcipher_instance *skcipher_alg_instance(
84 struct crypto_skcipher *skcipher)
85{
86 return container_of(crypto_skcipher_alg(skcipher),
87 struct skcipher_instance, alg);
88}
89
90static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
91{
92 return crypto_instance_ctx(skcipher_crypto_instance(inst));
93}
94
95static inline void skcipher_request_complete(struct skcipher_request *req, int err)
96{
97 req->base.complete(&req->base, err);
98}
99
100int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
101 struct crypto_instance *inst,
102 const char *name, u32 type, u32 mask);
103
104static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
105{
106 crypto_drop_spawn(&spawn->base);
107}
108
109static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
110 struct crypto_skcipher_spawn *spawn)
111{
112 return container_of(spawn->base.alg, struct skcipher_alg, base);
113}
114
115static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
116 struct crypto_skcipher_spawn *spawn)
117{
118 return crypto_skcipher_spawn_alg(spawn);
119}
120
121static inline struct crypto_skcipher *crypto_spawn_skcipher(
122 struct crypto_skcipher_spawn *spawn)
123{
124 return crypto_spawn_tfm2(&spawn->base);
125}
126
127static inline void crypto_skcipher_set_reqsize(
128 struct crypto_skcipher *skcipher, unsigned int reqsize)
129{
130 skcipher->reqsize = reqsize;
131}
132
133static inline void crypto_skcipher_set_reqsize_dma(
134 struct crypto_skcipher *skcipher, unsigned int reqsize)
135{
136 reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
137 skcipher->reqsize = reqsize;
138}
139
140int crypto_register_skcipher(struct skcipher_alg *alg);
141void crypto_unregister_skcipher(struct skcipher_alg *alg);
142int crypto_register_skciphers(struct skcipher_alg *algs, int count);
143void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
144int skcipher_register_instance(struct crypto_template *tmpl,
145 struct skcipher_instance *inst);
146
147int skcipher_walk_done(struct skcipher_walk *walk, int err);
148int skcipher_walk_virt(struct skcipher_walk *walk,
149 struct skcipher_request *req,
150 bool atomic);
151int skcipher_walk_async(struct skcipher_walk *walk,
152 struct skcipher_request *req);
153int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
154 struct aead_request *req, bool atomic);
155int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
156 struct aead_request *req, bool atomic);
157void skcipher_walk_complete(struct skcipher_walk *walk, int err);
158
159static inline void skcipher_walk_abort(struct skcipher_walk *walk)
160{
161 skcipher_walk_done(walk, -ECANCELED);
162}
163
164static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
165{
166 return crypto_tfm_ctx(&tfm->base);
167}
168
169static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
170{
171 return crypto_tfm_ctx_dma(&tfm->base);
172}
173
174static inline void *skcipher_request_ctx(struct skcipher_request *req)
175{
176 return req->__ctx;
177}
178
179static inline void *skcipher_request_ctx_dma(struct skcipher_request *req)
180{
181 unsigned int align = crypto_dma_align();
182
183 if (align <= crypto_tfm_ctx_alignment())
184 align = 1;
185
186 return PTR_ALIGN(skcipher_request_ctx(req), align);
187}
188
189static inline u32 skcipher_request_flags(struct skcipher_request *req)
190{
191 return req->base.flags;
192}
193
194static inline unsigned int crypto_skcipher_alg_min_keysize(
195 struct skcipher_alg *alg)
196{
197 return alg->min_keysize;
198}
199
200static inline unsigned int crypto_skcipher_alg_max_keysize(
201 struct skcipher_alg *alg)
202{
203 return alg->max_keysize;
204}
205
206static inline unsigned int crypto_skcipher_alg_walksize(
207 struct skcipher_alg *alg)
208{
209 return alg->walksize;
210}
211
212/**
213 * crypto_skcipher_walksize() - obtain walk size
214 * @tfm: cipher handle
215 *
216 * In some cases, algorithms can only perform optimally when operating on
217 * multiple blocks in parallel. This is reflected by the walksize, which
218 * must be a multiple of the chunksize (or equal if the concern does not
219 * apply)
220 *
221 * Return: walk size in bytes
222 */
223static inline unsigned int crypto_skcipher_walksize(
224 struct crypto_skcipher *tfm)
225{
226 return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
227}
228
229/* Helpers for simple block cipher modes of operation */
230struct skcipher_ctx_simple {
231 struct crypto_cipher *cipher; /* underlying block cipher */
232};
233static inline struct crypto_cipher *
234skcipher_cipher_simple(struct crypto_skcipher *tfm)
235{
236 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
237
238 return ctx->cipher;
239}
240
241struct skcipher_instance *skcipher_alloc_instance_simple(
242 struct crypto_template *tmpl, struct rtattr **tb);
243
244static inline struct crypto_alg *skcipher_ialg_simple(
245 struct skcipher_instance *inst)
246{
247 struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
248
249 return crypto_spawn_cipher_alg(spawn);
250}
251
252#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
253