Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Cryptographic API for algorithms (i.e., low-level API).
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7#ifndef _CRYPTO_ALGAPI_H
8#define _CRYPTO_ALGAPI_H
9
10#include <crypto/utils.h>
11#include <linux/align.h>
12#include <linux/cache.h>
13#include <linux/crypto.h>
14#include <linux/types.h>
15#include <linux/workqueue.h>
16
17/*
18 * Maximum values for blocksize and alignmask, used to allocate
19 * static buffers that are big enough for any combination of
20 * algs and architectures. Ciphers have a lower maximum size.
21 */
22#define MAX_ALGAPI_BLOCKSIZE 160
23#define MAX_ALGAPI_ALIGNMASK 127
24#define MAX_CIPHER_BLOCKSIZE 16
25#define MAX_CIPHER_ALIGNMASK 15
26
27#ifdef ARCH_DMA_MINALIGN
28#define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN
29#else
30#define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN
31#endif
32
33#define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
34
35/*
36 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
37 * arbitrary modules to be loaded. Loading from userspace may still need the
38 * unprefixed names, so retains those aliases as well.
39 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
40 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
41 * expands twice on the same line. Instead, use a separate base name for the
42 * alias.
43 */
44#define MODULE_ALIAS_CRYPTO(name) \
45 __MODULE_INFO(alias, alias_userspace, name); \
46 __MODULE_INFO(alias, alias_crypto, "crypto-" name)
47
48struct crypto_aead;
49struct crypto_instance;
50struct module;
51struct notifier_block;
52struct rtattr;
53struct scatterlist;
54struct seq_file;
55struct sk_buff;
56
57struct crypto_type {
58 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
59 unsigned int (*extsize)(struct crypto_alg *alg);
60 int (*init_tfm)(struct crypto_tfm *tfm);
61 void (*show)(struct seq_file *m, struct crypto_alg *alg);
62 int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
63 void (*free)(struct crypto_instance *inst);
64#ifdef CONFIG_CRYPTO_STATS
65 int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
66#endif
67
68 unsigned int type;
69 unsigned int maskclear;
70 unsigned int maskset;
71 unsigned int tfmsize;
72};
73
74struct crypto_instance {
75 struct crypto_alg alg;
76
77 struct crypto_template *tmpl;
78
79 union {
80 /* Node in list of instances after registration. */
81 struct hlist_node list;
82 /* List of attached spawns before registration. */
83 struct crypto_spawn *spawns;
84 };
85
86 struct work_struct free_work;
87
88 void *__ctx[] CRYPTO_MINALIGN_ATTR;
89};
90
91struct crypto_template {
92 struct list_head list;
93 struct hlist_head instances;
94 struct module *module;
95
96 int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
97
98 char name[CRYPTO_MAX_ALG_NAME];
99};
100
101struct crypto_spawn {
102 struct list_head list;
103 struct crypto_alg *alg;
104 union {
105 /* Back pointer to instance after registration.*/
106 struct crypto_instance *inst;
107 /* Spawn list pointer prior to registration. */
108 struct crypto_spawn *next;
109 };
110 const struct crypto_type *frontend;
111 u32 mask;
112 bool dead;
113 bool registered;
114};
115
116struct crypto_queue {
117 struct list_head list;
118 struct list_head *backlog;
119
120 unsigned int qlen;
121 unsigned int max_qlen;
122};
123
124struct scatter_walk {
125 struct scatterlist *sg;
126 unsigned int offset;
127};
128
129struct crypto_attr_alg {
130 char name[CRYPTO_MAX_ALG_NAME];
131};
132
133struct crypto_attr_type {
134 u32 type;
135 u32 mask;
136};
137
138/*
139 * Algorithm registration interface.
140 */
141int crypto_register_alg(struct crypto_alg *alg);
142void crypto_unregister_alg(struct crypto_alg *alg);
143int crypto_register_algs(struct crypto_alg *algs, int count);
144void crypto_unregister_algs(struct crypto_alg *algs, int count);
145
146void crypto_mod_put(struct crypto_alg *alg);
147
148int crypto_register_template(struct crypto_template *tmpl);
149int crypto_register_templates(struct crypto_template *tmpls, int count);
150void crypto_unregister_template(struct crypto_template *tmpl);
151void crypto_unregister_templates(struct crypto_template *tmpls, int count);
152struct crypto_template *crypto_lookup_template(const char *name);
153
154int crypto_register_instance(struct crypto_template *tmpl,
155 struct crypto_instance *inst);
156void crypto_unregister_instance(struct crypto_instance *inst);
157
158int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
159 const char *name, u32 type, u32 mask);
160void crypto_drop_spawn(struct crypto_spawn *spawn);
161struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
162 u32 mask);
163void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
164
165struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
166int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
167const char *crypto_attr_alg_name(struct rtattr *rta);
168int crypto_inst_setname(struct crypto_instance *inst, const char *name,
169 struct crypto_alg *alg);
170
171void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
172int crypto_enqueue_request(struct crypto_queue *queue,
173 struct crypto_async_request *request);
174void crypto_enqueue_request_head(struct crypto_queue *queue,
175 struct crypto_async_request *request);
176struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
177static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
178{
179 return queue->qlen;
180}
181
182void crypto_inc(u8 *a, unsigned int size);
183
184static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
185{
186 return tfm->__crt_ctx;
187}
188
189static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
190 unsigned int align)
191{
192 if (align <= crypto_tfm_ctx_alignment())
193 align = 1;
194
195 return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
196}
197
198static inline unsigned int crypto_dma_align(void)
199{
200 return CRYPTO_DMA_ALIGN;
201}
202
203static inline unsigned int crypto_dma_padding(void)
204{
205 return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
206}
207
208static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
209{
210 return crypto_tfm_ctx_align(tfm, crypto_dma_align());
211}
212
213static inline struct crypto_instance *crypto_tfm_alg_instance(
214 struct crypto_tfm *tfm)
215{
216 return container_of(tfm->__crt_alg, struct crypto_instance, alg);
217}
218
219static inline void *crypto_instance_ctx(struct crypto_instance *inst)
220{
221 return inst->__ctx;
222}
223
224static inline struct crypto_async_request *crypto_get_backlog(
225 struct crypto_queue *queue)
226{
227 return queue->backlog == &queue->list ? NULL :
228 container_of(queue->backlog, struct crypto_async_request, list);
229}
230
231static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
232{
233 return (algt->type ^ off) & algt->mask & off;
234}
235
236/*
237 * When an algorithm uses another algorithm (e.g., if it's an instance of a
238 * template), these are the flags that should always be set on the "outer"
239 * algorithm if any "inner" algorithm has them set.
240 */
241#define CRYPTO_ALG_INHERITED_FLAGS \
242 (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \
243 CRYPTO_ALG_ALLOCATES_MEMORY)
244
245/*
246 * Given the type and mask that specify the flags restrictions on a template
247 * instance being created, return the mask that should be passed to
248 * crypto_grab_*() (along with type=0) to honor any request the user made to
249 * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
250 */
251static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
252{
253 return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
254}
255
256int crypto_register_notifier(struct notifier_block *nb);
257int crypto_unregister_notifier(struct notifier_block *nb);
258
259/* Crypto notification events. */
260enum {
261 CRYPTO_MSG_ALG_REQUEST,
262 CRYPTO_MSG_ALG_REGISTER,
263 CRYPTO_MSG_ALG_LOADED,
264};
265
266static inline void crypto_request_complete(struct crypto_async_request *req,
267 int err)
268{
269 req->complete(req->data, err);
270}
271
272static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
273{
274 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
275}
276
277#endif /* _CRYPTO_ALGAPI_H */
1/*
2 * Cryptographic API for algorithms (i.e., low-level API).
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12#ifndef _CRYPTO_ALGAPI_H
13#define _CRYPTO_ALGAPI_H
14
15#include <linux/crypto.h>
16#include <linux/list.h>
17#include <linux/kernel.h>
18#include <linux/skbuff.h>
19
20struct module;
21struct rtattr;
22struct seq_file;
23
24struct crypto_type {
25 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
26 unsigned int (*extsize)(struct crypto_alg *alg);
27 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
28 int (*init_tfm)(struct crypto_tfm *tfm);
29 void (*show)(struct seq_file *m, struct crypto_alg *alg);
30 int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
31 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
32
33 unsigned int type;
34 unsigned int maskclear;
35 unsigned int maskset;
36 unsigned int tfmsize;
37};
38
39struct crypto_instance {
40 struct crypto_alg alg;
41
42 struct crypto_template *tmpl;
43 struct hlist_node list;
44
45 void *__ctx[] CRYPTO_MINALIGN_ATTR;
46};
47
48struct crypto_template {
49 struct list_head list;
50 struct hlist_head instances;
51 struct module *module;
52
53 struct crypto_instance *(*alloc)(struct rtattr **tb);
54 void (*free)(struct crypto_instance *inst);
55 int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
56
57 char name[CRYPTO_MAX_ALG_NAME];
58};
59
60struct crypto_spawn {
61 struct list_head list;
62 struct crypto_alg *alg;
63 struct crypto_instance *inst;
64 const struct crypto_type *frontend;
65 u32 mask;
66};
67
68struct crypto_queue {
69 struct list_head list;
70 struct list_head *backlog;
71
72 unsigned int qlen;
73 unsigned int max_qlen;
74};
75
76struct scatter_walk {
77 struct scatterlist *sg;
78 unsigned int offset;
79};
80
81struct blkcipher_walk {
82 union {
83 struct {
84 struct page *page;
85 unsigned long offset;
86 } phys;
87
88 struct {
89 u8 *page;
90 u8 *addr;
91 } virt;
92 } src, dst;
93
94 struct scatter_walk in;
95 unsigned int nbytes;
96
97 struct scatter_walk out;
98 unsigned int total;
99
100 void *page;
101 u8 *buffer;
102 u8 *iv;
103
104 int flags;
105 unsigned int blocksize;
106};
107
108struct ablkcipher_walk {
109 struct {
110 struct page *page;
111 unsigned int offset;
112 } src, dst;
113
114 struct scatter_walk in;
115 unsigned int nbytes;
116 struct scatter_walk out;
117 unsigned int total;
118 struct list_head buffers;
119 u8 *iv_buffer;
120 u8 *iv;
121 int flags;
122 unsigned int blocksize;
123};
124
125extern const struct crypto_type crypto_ablkcipher_type;
126extern const struct crypto_type crypto_aead_type;
127extern const struct crypto_type crypto_blkcipher_type;
128
129void crypto_mod_put(struct crypto_alg *alg);
130
131int crypto_register_template(struct crypto_template *tmpl);
132void crypto_unregister_template(struct crypto_template *tmpl);
133struct crypto_template *crypto_lookup_template(const char *name);
134
135int crypto_register_instance(struct crypto_template *tmpl,
136 struct crypto_instance *inst);
137int crypto_unregister_instance(struct crypto_alg *alg);
138
139int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
140 struct crypto_instance *inst, u32 mask);
141int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
142 struct crypto_instance *inst,
143 const struct crypto_type *frontend);
144
145void crypto_drop_spawn(struct crypto_spawn *spawn);
146struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
147 u32 mask);
148void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
149
150static inline void crypto_set_spawn(struct crypto_spawn *spawn,
151 struct crypto_instance *inst)
152{
153 spawn->inst = inst;
154}
155
156struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
157int crypto_check_attr_type(struct rtattr **tb, u32 type);
158const char *crypto_attr_alg_name(struct rtattr *rta);
159struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
160 const struct crypto_type *frontend,
161 u32 type, u32 mask);
162
163static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
164 u32 type, u32 mask)
165{
166 return crypto_attr_alg2(rta, NULL, type, mask);
167}
168
169int crypto_attr_u32(struct rtattr *rta, u32 *num);
170void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
171 unsigned int head);
172struct crypto_instance *crypto_alloc_instance(const char *name,
173 struct crypto_alg *alg);
174
175void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
176int crypto_enqueue_request(struct crypto_queue *queue,
177 struct crypto_async_request *request);
178void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
179struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
180int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
181
182/* These functions require the input/output to be aligned as u32. */
183void crypto_inc(u8 *a, unsigned int size);
184void crypto_xor(u8 *dst, const u8 *src, unsigned int size);
185
186int blkcipher_walk_done(struct blkcipher_desc *desc,
187 struct blkcipher_walk *walk, int err);
188int blkcipher_walk_virt(struct blkcipher_desc *desc,
189 struct blkcipher_walk *walk);
190int blkcipher_walk_phys(struct blkcipher_desc *desc,
191 struct blkcipher_walk *walk);
192int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
193 struct blkcipher_walk *walk,
194 unsigned int blocksize);
195
196int ablkcipher_walk_done(struct ablkcipher_request *req,
197 struct ablkcipher_walk *walk, int err);
198int ablkcipher_walk_phys(struct ablkcipher_request *req,
199 struct ablkcipher_walk *walk);
200void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
201
202static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
203{
204 return PTR_ALIGN(crypto_tfm_ctx(tfm),
205 crypto_tfm_alg_alignmask(tfm) + 1);
206}
207
208static inline struct crypto_instance *crypto_tfm_alg_instance(
209 struct crypto_tfm *tfm)
210{
211 return container_of(tfm->__crt_alg, struct crypto_instance, alg);
212}
213
214static inline void *crypto_instance_ctx(struct crypto_instance *inst)
215{
216 return inst->__ctx;
217}
218
219static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
220 struct crypto_ablkcipher *tfm)
221{
222 return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
223}
224
225static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
226{
227 return crypto_tfm_ctx(&tfm->base);
228}
229
230static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
231{
232 return crypto_tfm_ctx_aligned(&tfm->base);
233}
234
235static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
236{
237 return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
238}
239
240static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
241{
242 return crypto_tfm_ctx(&tfm->base);
243}
244
245static inline struct crypto_instance *crypto_aead_alg_instance(
246 struct crypto_aead *aead)
247{
248 return crypto_tfm_alg_instance(&aead->base);
249}
250
251static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
252 struct crypto_spawn *spawn)
253{
254 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
255 u32 mask = CRYPTO_ALG_TYPE_MASK;
256
257 return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
258}
259
260static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
261{
262 return crypto_tfm_ctx(&tfm->base);
263}
264
265static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
266{
267 return crypto_tfm_ctx_aligned(&tfm->base);
268}
269
270static inline struct crypto_cipher *crypto_spawn_cipher(
271 struct crypto_spawn *spawn)
272{
273 u32 type = CRYPTO_ALG_TYPE_CIPHER;
274 u32 mask = CRYPTO_ALG_TYPE_MASK;
275
276 return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
277}
278
279static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
280{
281 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
282}
283
284static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
285{
286 u32 type = CRYPTO_ALG_TYPE_HASH;
287 u32 mask = CRYPTO_ALG_TYPE_HASH_MASK;
288
289 return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
290}
291
292static inline void *crypto_hash_ctx(struct crypto_hash *tfm)
293{
294 return crypto_tfm_ctx(&tfm->base);
295}
296
297static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
298{
299 return crypto_tfm_ctx_aligned(&tfm->base);
300}
301
302static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
303 struct scatterlist *dst,
304 struct scatterlist *src,
305 unsigned int nbytes)
306{
307 walk->in.sg = src;
308 walk->out.sg = dst;
309 walk->total = nbytes;
310}
311
312static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
313 struct scatterlist *dst,
314 struct scatterlist *src,
315 unsigned int nbytes)
316{
317 walk->in.sg = src;
318 walk->out.sg = dst;
319 walk->total = nbytes;
320 INIT_LIST_HEAD(&walk->buffers);
321}
322
323static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
324{
325 if (unlikely(!list_empty(&walk->buffers)))
326 __ablkcipher_walk_complete(walk);
327}
328
329static inline struct crypto_async_request *crypto_get_backlog(
330 struct crypto_queue *queue)
331{
332 return queue->backlog == &queue->list ? NULL :
333 container_of(queue->backlog, struct crypto_async_request, list);
334}
335
336static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
337 struct ablkcipher_request *request)
338{
339 return crypto_enqueue_request(queue, &request->base);
340}
341
342static inline struct ablkcipher_request *ablkcipher_dequeue_request(
343 struct crypto_queue *queue)
344{
345 return ablkcipher_request_cast(crypto_dequeue_request(queue));
346}
347
348static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
349{
350 return req->__ctx;
351}
352
353static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
354 struct crypto_ablkcipher *tfm)
355{
356 return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
357}
358
359static inline void *aead_request_ctx(struct aead_request *req)
360{
361 return req->__ctx;
362}
363
364static inline void aead_request_complete(struct aead_request *req, int err)
365{
366 req->base.complete(&req->base, err);
367}
368
369static inline u32 aead_request_flags(struct aead_request *req)
370{
371 return req->base.flags;
372}
373
374static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
375 u32 type, u32 mask)
376{
377 return crypto_attr_alg(tb[1], type, mask);
378}
379
380/*
381 * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
382 * Otherwise returns zero.
383 */
384static inline int crypto_requires_sync(u32 type, u32 mask)
385{
386 return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
387}
388
389#endif /* _CRYPTO_ALGAPI_H */
390