Loading...
1/*
2 * Cryptographic API for algorithms (i.e., low-level API).
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12#ifndef _CRYPTO_ALGAPI_H
13#define _CRYPTO_ALGAPI_H
14
15#include <linux/crypto.h>
16#include <linux/list.h>
17#include <linux/kernel.h>
18
19struct module;
20struct rtattr;
21struct seq_file;
22
23struct crypto_type {
24 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
25 unsigned int (*extsize)(struct crypto_alg *alg);
26 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
27 int (*init_tfm)(struct crypto_tfm *tfm);
28 void (*show)(struct seq_file *m, struct crypto_alg *alg);
29 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
30
31 unsigned int type;
32 unsigned int maskclear;
33 unsigned int maskset;
34 unsigned int tfmsize;
35};
36
37struct crypto_instance {
38 struct crypto_alg alg;
39
40 struct crypto_template *tmpl;
41 struct hlist_node list;
42
43 void *__ctx[] CRYPTO_MINALIGN_ATTR;
44};
45
46struct crypto_template {
47 struct list_head list;
48 struct hlist_head instances;
49 struct module *module;
50
51 struct crypto_instance *(*alloc)(struct rtattr **tb);
52 void (*free)(struct crypto_instance *inst);
53 int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
54
55 char name[CRYPTO_MAX_ALG_NAME];
56};
57
58struct crypto_spawn {
59 struct list_head list;
60 struct crypto_alg *alg;
61 struct crypto_instance *inst;
62 const struct crypto_type *frontend;
63 u32 mask;
64};
65
66struct crypto_queue {
67 struct list_head list;
68 struct list_head *backlog;
69
70 unsigned int qlen;
71 unsigned int max_qlen;
72};
73
74struct scatter_walk {
75 struct scatterlist *sg;
76 unsigned int offset;
77};
78
79struct blkcipher_walk {
80 union {
81 struct {
82 struct page *page;
83 unsigned long offset;
84 } phys;
85
86 struct {
87 u8 *page;
88 u8 *addr;
89 } virt;
90 } src, dst;
91
92 struct scatter_walk in;
93 unsigned int nbytes;
94
95 struct scatter_walk out;
96 unsigned int total;
97
98 void *page;
99 u8 *buffer;
100 u8 *iv;
101
102 int flags;
103 unsigned int blocksize;
104};
105
106struct ablkcipher_walk {
107 struct {
108 struct page *page;
109 unsigned int offset;
110 } src, dst;
111
112 struct scatter_walk in;
113 unsigned int nbytes;
114 struct scatter_walk out;
115 unsigned int total;
116 struct list_head buffers;
117 u8 *iv_buffer;
118 u8 *iv;
119 int flags;
120 unsigned int blocksize;
121};
122
123extern const struct crypto_type crypto_ablkcipher_type;
124extern const struct crypto_type crypto_aead_type;
125extern const struct crypto_type crypto_blkcipher_type;
126
127void crypto_mod_put(struct crypto_alg *alg);
128
129int crypto_register_template(struct crypto_template *tmpl);
130void crypto_unregister_template(struct crypto_template *tmpl);
131struct crypto_template *crypto_lookup_template(const char *name);
132
133int crypto_register_instance(struct crypto_template *tmpl,
134 struct crypto_instance *inst);
135
136int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
137 struct crypto_instance *inst, u32 mask);
138int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
139 struct crypto_instance *inst,
140 const struct crypto_type *frontend);
141
142void crypto_drop_spawn(struct crypto_spawn *spawn);
143struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
144 u32 mask);
145void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
146
147static inline void crypto_set_spawn(struct crypto_spawn *spawn,
148 struct crypto_instance *inst)
149{
150 spawn->inst = inst;
151}
152
153struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
154int crypto_check_attr_type(struct rtattr **tb, u32 type);
155const char *crypto_attr_alg_name(struct rtattr *rta);
156struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
157 const struct crypto_type *frontend,
158 u32 type, u32 mask);
159
160static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
161 u32 type, u32 mask)
162{
163 return crypto_attr_alg2(rta, NULL, type, mask);
164}
165
166int crypto_attr_u32(struct rtattr *rta, u32 *num);
167void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
168 unsigned int head);
169struct crypto_instance *crypto_alloc_instance(const char *name,
170 struct crypto_alg *alg);
171
172void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
173int crypto_enqueue_request(struct crypto_queue *queue,
174 struct crypto_async_request *request);
175void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
176struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
177int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
178
179/* These functions require the input/output to be aligned as u32. */
180void crypto_inc(u8 *a, unsigned int size);
181void crypto_xor(u8 *dst, const u8 *src, unsigned int size);
182
183int blkcipher_walk_done(struct blkcipher_desc *desc,
184 struct blkcipher_walk *walk, int err);
185int blkcipher_walk_virt(struct blkcipher_desc *desc,
186 struct blkcipher_walk *walk);
187int blkcipher_walk_phys(struct blkcipher_desc *desc,
188 struct blkcipher_walk *walk);
189int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
190 struct blkcipher_walk *walk,
191 unsigned int blocksize);
192
193int ablkcipher_walk_done(struct ablkcipher_request *req,
194 struct ablkcipher_walk *walk, int err);
195int ablkcipher_walk_phys(struct ablkcipher_request *req,
196 struct ablkcipher_walk *walk);
197void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
198
199static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
200{
201 return PTR_ALIGN(crypto_tfm_ctx(tfm),
202 crypto_tfm_alg_alignmask(tfm) + 1);
203}
204
205static inline struct crypto_instance *crypto_tfm_alg_instance(
206 struct crypto_tfm *tfm)
207{
208 return container_of(tfm->__crt_alg, struct crypto_instance, alg);
209}
210
211static inline void *crypto_instance_ctx(struct crypto_instance *inst)
212{
213 return inst->__ctx;
214}
215
216static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
217 struct crypto_ablkcipher *tfm)
218{
219 return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
220}
221
222static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
223{
224 return crypto_tfm_ctx(&tfm->base);
225}
226
227static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
228{
229 return crypto_tfm_ctx_aligned(&tfm->base);
230}
231
232static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
233{
234 return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
235}
236
237static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
238{
239 return crypto_tfm_ctx(&tfm->base);
240}
241
242static inline struct crypto_instance *crypto_aead_alg_instance(
243 struct crypto_aead *aead)
244{
245 return crypto_tfm_alg_instance(&aead->base);
246}
247
248static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
249 struct crypto_spawn *spawn)
250{
251 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
252 u32 mask = CRYPTO_ALG_TYPE_MASK;
253
254 return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
255}
256
257static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
258{
259 return crypto_tfm_ctx(&tfm->base);
260}
261
262static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
263{
264 return crypto_tfm_ctx_aligned(&tfm->base);
265}
266
267static inline struct crypto_cipher *crypto_spawn_cipher(
268 struct crypto_spawn *spawn)
269{
270 u32 type = CRYPTO_ALG_TYPE_CIPHER;
271 u32 mask = CRYPTO_ALG_TYPE_MASK;
272
273 return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
274}
275
276static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
277{
278 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
279}
280
281static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
282{
283 u32 type = CRYPTO_ALG_TYPE_HASH;
284 u32 mask = CRYPTO_ALG_TYPE_HASH_MASK;
285
286 return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
287}
288
289static inline void *crypto_hash_ctx(struct crypto_hash *tfm)
290{
291 return crypto_tfm_ctx(&tfm->base);
292}
293
294static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
295{
296 return crypto_tfm_ctx_aligned(&tfm->base);
297}
298
299static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
300 struct scatterlist *dst,
301 struct scatterlist *src,
302 unsigned int nbytes)
303{
304 walk->in.sg = src;
305 walk->out.sg = dst;
306 walk->total = nbytes;
307}
308
309static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
310 struct scatterlist *dst,
311 struct scatterlist *src,
312 unsigned int nbytes)
313{
314 walk->in.sg = src;
315 walk->out.sg = dst;
316 walk->total = nbytes;
317 INIT_LIST_HEAD(&walk->buffers);
318}
319
320static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
321{
322 if (unlikely(!list_empty(&walk->buffers)))
323 __ablkcipher_walk_complete(walk);
324}
325
326static inline struct crypto_async_request *crypto_get_backlog(
327 struct crypto_queue *queue)
328{
329 return queue->backlog == &queue->list ? NULL :
330 container_of(queue->backlog, struct crypto_async_request, list);
331}
332
333static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
334 struct ablkcipher_request *request)
335{
336 return crypto_enqueue_request(queue, &request->base);
337}
338
339static inline struct ablkcipher_request *ablkcipher_dequeue_request(
340 struct crypto_queue *queue)
341{
342 return ablkcipher_request_cast(crypto_dequeue_request(queue));
343}
344
345static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
346{
347 return req->__ctx;
348}
349
350static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
351 struct crypto_ablkcipher *tfm)
352{
353 return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
354}
355
356static inline void *aead_request_ctx(struct aead_request *req)
357{
358 return req->__ctx;
359}
360
361static inline void aead_request_complete(struct aead_request *req, int err)
362{
363 req->base.complete(&req->base, err);
364}
365
366static inline u32 aead_request_flags(struct aead_request *req)
367{
368 return req->base.flags;
369}
370
371static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
372 u32 type, u32 mask)
373{
374 return crypto_attr_alg(tb[1], type, mask);
375}
376
377/*
378 * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
379 * Otherwise returns zero.
380 */
381static inline int crypto_requires_sync(u32 type, u32 mask)
382{
383 return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
384}
385
386#endif /* _CRYPTO_ALGAPI_H */
387
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Cryptographic API for algorithms (i.e., low-level API).
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7#ifndef _CRYPTO_ALGAPI_H
8#define _CRYPTO_ALGAPI_H
9
10#include <linux/crypto.h>
11#include <linux/list.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14
15/*
16 * Maximum values for blocksize and alignmask, used to allocate
17 * static buffers that are big enough for any combination of
18 * algs and architectures. Ciphers have a lower maximum size.
19 */
20#define MAX_ALGAPI_BLOCKSIZE 160
21#define MAX_ALGAPI_ALIGNMASK 63
22#define MAX_CIPHER_BLOCKSIZE 16
23#define MAX_CIPHER_ALIGNMASK 15
24
25struct crypto_aead;
26struct crypto_instance;
27struct module;
28struct rtattr;
29struct seq_file;
30
31struct crypto_type {
32 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
33 unsigned int (*extsize)(struct crypto_alg *alg);
34 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
35 int (*init_tfm)(struct crypto_tfm *tfm);
36 void (*show)(struct seq_file *m, struct crypto_alg *alg);
37 int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
38 void (*free)(struct crypto_instance *inst);
39
40 unsigned int type;
41 unsigned int maskclear;
42 unsigned int maskset;
43 unsigned int tfmsize;
44};
45
46struct crypto_instance {
47 struct crypto_alg alg;
48
49 struct crypto_template *tmpl;
50
51 union {
52 /* Node in list of instances after registration. */
53 struct hlist_node list;
54 /* List of attached spawns before registration. */
55 struct crypto_spawn *spawns;
56 };
57
58 void *__ctx[] CRYPTO_MINALIGN_ATTR;
59};
60
61struct crypto_template {
62 struct list_head list;
63 struct hlist_head instances;
64 struct module *module;
65
66 int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
67
68 char name[CRYPTO_MAX_ALG_NAME];
69};
70
71struct crypto_spawn {
72 struct list_head list;
73 struct crypto_alg *alg;
74 union {
75 /* Back pointer to instance after registration.*/
76 struct crypto_instance *inst;
77 /* Spawn list pointer prior to registration. */
78 struct crypto_spawn *next;
79 };
80 const struct crypto_type *frontend;
81 u32 mask;
82 bool dead;
83 bool registered;
84};
85
86struct crypto_queue {
87 struct list_head list;
88 struct list_head *backlog;
89
90 unsigned int qlen;
91 unsigned int max_qlen;
92};
93
94struct scatter_walk {
95 struct scatterlist *sg;
96 unsigned int offset;
97};
98
99void crypto_mod_put(struct crypto_alg *alg);
100
101int crypto_register_template(struct crypto_template *tmpl);
102int crypto_register_templates(struct crypto_template *tmpls, int count);
103void crypto_unregister_template(struct crypto_template *tmpl);
104void crypto_unregister_templates(struct crypto_template *tmpls, int count);
105struct crypto_template *crypto_lookup_template(const char *name);
106
107int crypto_register_instance(struct crypto_template *tmpl,
108 struct crypto_instance *inst);
109void crypto_unregister_instance(struct crypto_instance *inst);
110
111int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
112 const char *name, u32 type, u32 mask);
113void crypto_drop_spawn(struct crypto_spawn *spawn);
114struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
115 u32 mask);
116void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
117
118struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
119int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
120const char *crypto_attr_alg_name(struct rtattr *rta);
121int crypto_attr_u32(struct rtattr *rta, u32 *num);
122int crypto_inst_setname(struct crypto_instance *inst, const char *name,
123 struct crypto_alg *alg);
124
125void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
126int crypto_enqueue_request(struct crypto_queue *queue,
127 struct crypto_async_request *request);
128void crypto_enqueue_request_head(struct crypto_queue *queue,
129 struct crypto_async_request *request);
130struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
131static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
132{
133 return queue->qlen;
134}
135
136void crypto_inc(u8 *a, unsigned int size);
137void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
138
139static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
140{
141 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
142 __builtin_constant_p(size) &&
143 (size % sizeof(unsigned long)) == 0) {
144 unsigned long *d = (unsigned long *)dst;
145 unsigned long *s = (unsigned long *)src;
146
147 while (size > 0) {
148 *d++ ^= *s++;
149 size -= sizeof(unsigned long);
150 }
151 } else {
152 __crypto_xor(dst, dst, src, size);
153 }
154}
155
156static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
157 unsigned int size)
158{
159 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
160 __builtin_constant_p(size) &&
161 (size % sizeof(unsigned long)) == 0) {
162 unsigned long *d = (unsigned long *)dst;
163 unsigned long *s1 = (unsigned long *)src1;
164 unsigned long *s2 = (unsigned long *)src2;
165
166 while (size > 0) {
167 *d++ = *s1++ ^ *s2++;
168 size -= sizeof(unsigned long);
169 }
170 } else {
171 __crypto_xor(dst, src1, src2, size);
172 }
173}
174
175static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
176{
177 return PTR_ALIGN(crypto_tfm_ctx(tfm),
178 crypto_tfm_alg_alignmask(tfm) + 1);
179}
180
181static inline struct crypto_instance *crypto_tfm_alg_instance(
182 struct crypto_tfm *tfm)
183{
184 return container_of(tfm->__crt_alg, struct crypto_instance, alg);
185}
186
187static inline void *crypto_instance_ctx(struct crypto_instance *inst)
188{
189 return inst->__ctx;
190}
191
192struct crypto_cipher_spawn {
193 struct crypto_spawn base;
194};
195
196static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
197 struct crypto_instance *inst,
198 const char *name, u32 type, u32 mask)
199{
200 type &= ~CRYPTO_ALG_TYPE_MASK;
201 type |= CRYPTO_ALG_TYPE_CIPHER;
202 mask |= CRYPTO_ALG_TYPE_MASK;
203 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
204}
205
206static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
207{
208 crypto_drop_spawn(&spawn->base);
209}
210
211static inline struct crypto_alg *crypto_spawn_cipher_alg(
212 struct crypto_cipher_spawn *spawn)
213{
214 return spawn->base.alg;
215}
216
217static inline struct crypto_cipher *crypto_spawn_cipher(
218 struct crypto_cipher_spawn *spawn)
219{
220 u32 type = CRYPTO_ALG_TYPE_CIPHER;
221 u32 mask = CRYPTO_ALG_TYPE_MASK;
222
223 return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
224}
225
226static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
227{
228 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
229}
230
231static inline struct crypto_async_request *crypto_get_backlog(
232 struct crypto_queue *queue)
233{
234 return queue->backlog == &queue->list ? NULL :
235 container_of(queue->backlog, struct crypto_async_request, list);
236}
237
238static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
239{
240 return (algt->type ^ off) & algt->mask & off;
241}
242
243/*
244 * When an algorithm uses another algorithm (e.g., if it's an instance of a
245 * template), these are the flags that should always be set on the "outer"
246 * algorithm if any "inner" algorithm has them set.
247 */
248#define CRYPTO_ALG_INHERITED_FLAGS \
249 (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \
250 CRYPTO_ALG_ALLOCATES_MEMORY)
251
252/*
253 * Given the type and mask that specify the flags restrictions on a template
254 * instance being created, return the mask that should be passed to
255 * crypto_grab_*() (along with type=0) to honor any request the user made to
256 * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
257 */
258static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
259{
260 return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
261}
262
263noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
264
265/**
266 * crypto_memneq - Compare two areas of memory without leaking
267 * timing information.
268 *
269 * @a: One area of memory
270 * @b: Another area of memory
271 * @size: The size of the area.
272 *
273 * Returns 0 when data is equal, 1 otherwise.
274 */
275static inline int crypto_memneq(const void *a, const void *b, size_t size)
276{
277 return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
278}
279
280static inline void crypto_yield(u32 flags)
281{
282 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
283 cond_resched();
284}
285
286int crypto_register_notifier(struct notifier_block *nb);
287int crypto_unregister_notifier(struct notifier_block *nb);
288
289/* Crypto notification events. */
290enum {
291 CRYPTO_MSG_ALG_REQUEST,
292 CRYPTO_MSG_ALG_REGISTER,
293 CRYPTO_MSG_ALG_LOADED,
294};
295
296#endif /* _CRYPTO_ALGAPI_H */