Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Cryptographic API for algorithms (i.e., low-level API).
  4 *
  5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  6 */
  7#ifndef _CRYPTO_ALGAPI_H
  8#define _CRYPTO_ALGAPI_H
  9
 
 
 
 10#include <linux/crypto.h>
 11#include <linux/list.h>
 12#include <linux/kernel.h>
 13#include <linux/skbuff.h>
 14
 15/*
 16 * Maximum values for blocksize and alignmask, used to allocate
 17 * static buffers that are big enough for any combination of
 18 * algs and architectures. Ciphers have a lower maximum size.
 19 */
 20#define MAX_ALGAPI_BLOCKSIZE		160
 21#define MAX_ALGAPI_ALIGNMASK		63
 22#define MAX_CIPHER_BLOCKSIZE		16
 23#define MAX_CIPHER_ALIGNMASK		15
 24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25struct crypto_aead;
 26struct crypto_instance;
 27struct module;
 
 28struct rtattr;
 
 29struct seq_file;
 
 30
 31struct crypto_type {
 32	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
 33	unsigned int (*extsize)(struct crypto_alg *alg);
 34	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
 35	int (*init_tfm)(struct crypto_tfm *tfm);
 36	void (*show)(struct seq_file *m, struct crypto_alg *alg);
 37	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
 38	void (*free)(struct crypto_instance *inst);
 
 
 
 39
 40	unsigned int type;
 41	unsigned int maskclear;
 42	unsigned int maskset;
 43	unsigned int tfmsize;
 44};
 45
 46struct crypto_instance {
 47	struct crypto_alg alg;
 48
 49	struct crypto_template *tmpl;
 50	struct hlist_node list;
 
 
 
 
 
 
 
 
 51
 52	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 53};
 54
 55struct crypto_template {
 56	struct list_head list;
 57	struct hlist_head instances;
 58	struct module *module;
 59
 60	struct crypto_instance *(*alloc)(struct rtattr **tb);
 61	void (*free)(struct crypto_instance *inst);
 62	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
 63
 64	char name[CRYPTO_MAX_ALG_NAME];
 65};
 66
 67struct crypto_spawn {
 68	struct list_head list;
 69	struct crypto_alg *alg;
 70	struct crypto_instance *inst;
 
 
 
 
 
 71	const struct crypto_type *frontend;
 72	u32 mask;
 
 
 73};
 74
 75struct crypto_queue {
 76	struct list_head list;
 77	struct list_head *backlog;
 78
 79	unsigned int qlen;
 80	unsigned int max_qlen;
 81};
 82
 83struct scatter_walk {
 84	struct scatterlist *sg;
 85	unsigned int offset;
 86};
 87
 88struct blkcipher_walk {
 89	union {
 90		struct {
 91			struct page *page;
 92			unsigned long offset;
 93		} phys;
 94
 95		struct {
 96			u8 *page;
 97			u8 *addr;
 98		} virt;
 99	} src, dst;
100
101	struct scatter_walk in;
102	unsigned int nbytes;
103
104	struct scatter_walk out;
105	unsigned int total;
106
107	void *page;
108	u8 *buffer;
109	u8 *iv;
110	unsigned int ivsize;
111
112	int flags;
113	unsigned int walk_blocksize;
114	unsigned int cipher_blocksize;
115	unsigned int alignmask;
116};
117
118struct ablkcipher_walk {
119	struct {
120		struct page *page;
121		unsigned int offset;
122	} src, dst;
123
124	struct scatter_walk	in;
125	unsigned int		nbytes;
126	struct scatter_walk	out;
127	unsigned int		total;
128	struct list_head	buffers;
129	u8			*iv_buffer;
130	u8			*iv;
131	int			flags;
132	unsigned int		blocksize;
133};
134
135extern const struct crypto_type crypto_ablkcipher_type;
136extern const struct crypto_type crypto_blkcipher_type;
 
 
 
 
 
137
138void crypto_mod_put(struct crypto_alg *alg);
139
140int crypto_register_template(struct crypto_template *tmpl);
141int crypto_register_templates(struct crypto_template *tmpls, int count);
142void crypto_unregister_template(struct crypto_template *tmpl);
143void crypto_unregister_templates(struct crypto_template *tmpls, int count);
144struct crypto_template *crypto_lookup_template(const char *name);
145
146int crypto_register_instance(struct crypto_template *tmpl,
147			     struct crypto_instance *inst);
148int crypto_unregister_instance(struct crypto_instance *inst);
149
150int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
151		      struct crypto_instance *inst, u32 mask);
152int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
153		       struct crypto_instance *inst,
154		       const struct crypto_type *frontend);
155int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
156		      u32 type, u32 mask);
157
 
 
158void crypto_drop_spawn(struct crypto_spawn *spawn);
159struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
160				    u32 mask);
161void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
162
163static inline void crypto_set_spawn(struct crypto_spawn *spawn,
164				    struct crypto_instance *inst)
165{
166	spawn->inst = inst;
167}
168
169struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
170int crypto_check_attr_type(struct rtattr **tb, u32 type);
171const char *crypto_attr_alg_name(struct rtattr *rta);
172struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
173				    const struct crypto_type *frontend,
174				    u32 type, u32 mask);
175
176static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
177						 u32 type, u32 mask)
178{
179	return crypto_attr_alg2(rta, NULL, type, mask);
180}
181
182int crypto_attr_u32(struct rtattr *rta, u32 *num);
183int crypto_inst_setname(struct crypto_instance *inst, const char *name,
184			struct crypto_alg *alg);
185void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
186			    unsigned int head);
187
188void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
189int crypto_enqueue_request(struct crypto_queue *queue,
190			   struct crypto_async_request *request);
 
 
191struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
192static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
193{
194	return queue->qlen;
195}
196
197void crypto_inc(u8 *a, unsigned int size);
198void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
199
200static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
201{
202	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
203	    __builtin_constant_p(size) &&
204	    (size % sizeof(unsigned long)) == 0) {
205		unsigned long *d = (unsigned long *)dst;
206		unsigned long *s = (unsigned long *)src;
207
208		while (size > 0) {
209			*d++ ^= *s++;
210			size -= sizeof(unsigned long);
211		}
212	} else {
213		__crypto_xor(dst, dst, src, size);
214	}
215}
216
217static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
218				  unsigned int size)
219{
220	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
221	    __builtin_constant_p(size) &&
222	    (size % sizeof(unsigned long)) == 0) {
223		unsigned long *d = (unsigned long *)dst;
224		unsigned long *s1 = (unsigned long *)src1;
225		unsigned long *s2 = (unsigned long *)src2;
226
227		while (size > 0) {
228			*d++ = *s1++ ^ *s2++;
229			size -= sizeof(unsigned long);
230		}
231	} else {
232		__crypto_xor(dst, src1, src2, size);
233	}
234}
235
236int blkcipher_walk_done(struct blkcipher_desc *desc,
237			struct blkcipher_walk *walk, int err);
238int blkcipher_walk_virt(struct blkcipher_desc *desc,
239			struct blkcipher_walk *walk);
240int blkcipher_walk_phys(struct blkcipher_desc *desc,
241			struct blkcipher_walk *walk);
242int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
243			      struct blkcipher_walk *walk,
244			      unsigned int blocksize);
245int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
246				   struct blkcipher_walk *walk,
247				   struct crypto_aead *tfm,
248				   unsigned int blocksize);
249
250int ablkcipher_walk_done(struct ablkcipher_request *req,
251			 struct ablkcipher_walk *walk, int err);
252int ablkcipher_walk_phys(struct ablkcipher_request *req,
253			 struct ablkcipher_walk *walk);
254void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
255
256static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
257{
258	return PTR_ALIGN(crypto_tfm_ctx(tfm),
259			 crypto_tfm_alg_alignmask(tfm) + 1);
260}
261
262static inline struct crypto_instance *crypto_tfm_alg_instance(
263	struct crypto_tfm *tfm)
264{
265	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
266}
267
268static inline void *crypto_instance_ctx(struct crypto_instance *inst)
269{
270	return inst->__ctx;
271}
272
273static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
274	struct crypto_ablkcipher *tfm)
275{
276	return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
277}
278
279static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
280{
281	return crypto_tfm_ctx(&tfm->base);
282}
283
284static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
285{
286	return crypto_tfm_ctx_aligned(&tfm->base);
287}
288
289static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
290	struct crypto_spawn *spawn)
291{
292	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
293	u32 mask = CRYPTO_ALG_TYPE_MASK;
294
295	return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
296}
297
298static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
299{
300	return crypto_tfm_ctx(&tfm->base);
301}
302
303static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
304{
305	return crypto_tfm_ctx_aligned(&tfm->base);
306}
307
308static inline struct crypto_cipher *crypto_spawn_cipher(
309	struct crypto_spawn *spawn)
310{
311	u32 type = CRYPTO_ALG_TYPE_CIPHER;
312	u32 mask = CRYPTO_ALG_TYPE_MASK;
313
314	return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
315}
316
317static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
318{
319	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
320}
321
322static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
323				       struct scatterlist *dst,
324				       struct scatterlist *src,
325				       unsigned int nbytes)
326{
327	walk->in.sg = src;
328	walk->out.sg = dst;
329	walk->total = nbytes;
330}
331
332static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
333					struct scatterlist *dst,
334					struct scatterlist *src,
335					unsigned int nbytes)
336{
337	walk->in.sg = src;
338	walk->out.sg = dst;
339	walk->total = nbytes;
340	INIT_LIST_HEAD(&walk->buffers);
341}
342
343static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
344{
345	if (unlikely(!list_empty(&walk->buffers)))
346		__ablkcipher_walk_complete(walk);
347}
348
349static inline struct crypto_async_request *crypto_get_backlog(
350	struct crypto_queue *queue)
351{
352	return queue->backlog == &queue->list ? NULL :
353	       container_of(queue->backlog, struct crypto_async_request, list);
354}
355
356static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
357					     struct ablkcipher_request *request)
358{
359	return crypto_enqueue_request(queue, &request->base);
360}
361
362static inline struct ablkcipher_request *ablkcipher_dequeue_request(
363	struct crypto_queue *queue)
364{
365	return ablkcipher_request_cast(crypto_dequeue_request(queue));
366}
367
368static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
369{
370	return req->__ctx;
371}
372
373static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
374						     u32 type, u32 mask)
375{
376	return crypto_attr_alg(tb[1], type, mask);
377}
378
379static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
380{
381	return (type ^ off) & mask & off;
382}
383
384/*
385 * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
386 * Otherwise returns zero.
 
387 */
388static inline int crypto_requires_sync(u32 type, u32 mask)
389{
390	return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
391}
392
393noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
394
395/**
396 * crypto_memneq - Compare two areas of memory without leaking
397 *		   timing information.
398 *
399 * @a: One area of memory
400 * @b: Another area of memory
401 * @size: The size of the area.
402 *
403 * Returns 0 when data is equal, 1 otherwise.
404 */
405static inline int crypto_memneq(const void *a, const void *b, size_t size)
406{
407	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
408}
409
410static inline void crypto_yield(u32 flags)
411{
412	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
413		cond_resched();
414}
415
416int crypto_register_notifier(struct notifier_block *nb);
417int crypto_unregister_notifier(struct notifier_block *nb);
418
419/* Crypto notification events. */
420enum {
421	CRYPTO_MSG_ALG_REQUEST,
422	CRYPTO_MSG_ALG_REGISTER,
423	CRYPTO_MSG_ALG_LOADED,
424};
 
 
 
 
 
 
 
 
 
 
 
425
426#endif	/* _CRYPTO_ALGAPI_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Cryptographic API for algorithms (i.e., low-level API).
  4 *
  5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  6 */
  7#ifndef _CRYPTO_ALGAPI_H
  8#define _CRYPTO_ALGAPI_H
  9
 10#include <crypto/utils.h>
 11#include <linux/align.h>
 12#include <linux/cache.h>
 13#include <linux/crypto.h>
 14#include <linux/types.h>
 15#include <linux/workqueue.h>
 
 16
 17/*
 18 * Maximum values for blocksize and alignmask, used to allocate
 19 * static buffers that are big enough for any combination of
 20 * algs and architectures. Ciphers have a lower maximum size.
 21 */
 22#define MAX_ALGAPI_BLOCKSIZE		160
 23#define MAX_ALGAPI_ALIGNMASK		127
 24#define MAX_CIPHER_BLOCKSIZE		16
 25#define MAX_CIPHER_ALIGNMASK		15
 26
 27#ifdef ARCH_DMA_MINALIGN
 28#define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN
 29#else
 30#define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN
 31#endif
 32
 33#define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
 34
 35/*
 36 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
 37 * arbitrary modules to be loaded. Loading from userspace may still need the
 38 * unprefixed names, so retains those aliases as well.
 39 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
 40 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
 41 * expands twice on the same line. Instead, use a separate base name for the
 42 * alias.
 43 */
 44#define MODULE_ALIAS_CRYPTO(name)	\
 45		__MODULE_INFO(alias, alias_userspace, name);	\
 46		__MODULE_INFO(alias, alias_crypto, "crypto-" name)
 47
 48struct crypto_aead;
 49struct crypto_instance;
 50struct module;
 51struct notifier_block;
 52struct rtattr;
 53struct scatterlist;
 54struct seq_file;
 55struct sk_buff;
 56
 57struct crypto_type {
 58	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
 59	unsigned int (*extsize)(struct crypto_alg *alg);
 
 60	int (*init_tfm)(struct crypto_tfm *tfm);
 61	void (*show)(struct seq_file *m, struct crypto_alg *alg);
 62	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
 63	void (*free)(struct crypto_instance *inst);
 64#ifdef CONFIG_CRYPTO_STATS
 65	int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
 66#endif
 67
 68	unsigned int type;
 69	unsigned int maskclear;
 70	unsigned int maskset;
 71	unsigned int tfmsize;
 72};
 73
 74struct crypto_instance {
 75	struct crypto_alg alg;
 76
 77	struct crypto_template *tmpl;
 78
 79	union {
 80		/* Node in list of instances after registration. */
 81		struct hlist_node list;
 82		/* List of attached spawns before registration. */
 83		struct crypto_spawn *spawns;
 84	};
 85
 86	struct work_struct free_work;
 87
 88	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 89};
 90
 91struct crypto_template {
 92	struct list_head list;
 93	struct hlist_head instances;
 94	struct module *module;
 95
 
 
 96	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
 97
 98	char name[CRYPTO_MAX_ALG_NAME];
 99};
100
101struct crypto_spawn {
102	struct list_head list;
103	struct crypto_alg *alg;
104	union {
105		/* Back pointer to instance after registration.*/
106		struct crypto_instance *inst;
107		/* Spawn list pointer prior to registration. */
108		struct crypto_spawn *next;
109	};
110	const struct crypto_type *frontend;
111	u32 mask;
112	bool dead;
113	bool registered;
114};
115
116struct crypto_queue {
117	struct list_head list;
118	struct list_head *backlog;
119
120	unsigned int qlen;
121	unsigned int max_qlen;
122};
123
124struct scatter_walk {
125	struct scatterlist *sg;
126	unsigned int offset;
127};
128
129struct crypto_attr_alg {
130	char name[CRYPTO_MAX_ALG_NAME];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131};
132
133struct crypto_attr_type {
134	u32 type;
135	u32 mask;
 
 
 
 
 
 
 
 
 
 
 
 
136};
137
138/*
139 * Algorithm registration interface.
140 */
141int crypto_register_alg(struct crypto_alg *alg);
142void crypto_unregister_alg(struct crypto_alg *alg);
143int crypto_register_algs(struct crypto_alg *algs, int count);
144void crypto_unregister_algs(struct crypto_alg *algs, int count);
145
146void crypto_mod_put(struct crypto_alg *alg);
147
148int crypto_register_template(struct crypto_template *tmpl);
149int crypto_register_templates(struct crypto_template *tmpls, int count);
150void crypto_unregister_template(struct crypto_template *tmpl);
151void crypto_unregister_templates(struct crypto_template *tmpls, int count);
152struct crypto_template *crypto_lookup_template(const char *name);
153
154int crypto_register_instance(struct crypto_template *tmpl,
155			     struct crypto_instance *inst);
156void crypto_unregister_instance(struct crypto_instance *inst);
 
 
 
 
 
 
 
 
157
158int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
159		      const char *name, u32 type, u32 mask);
160void crypto_drop_spawn(struct crypto_spawn *spawn);
161struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
162				    u32 mask);
163void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
164
 
 
 
 
 
 
165struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
166int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
167const char *crypto_attr_alg_name(struct rtattr *rta);
 
 
 
 
 
 
 
 
 
 
 
168int crypto_inst_setname(struct crypto_instance *inst, const char *name,
169			struct crypto_alg *alg);
 
 
170
171void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
172int crypto_enqueue_request(struct crypto_queue *queue,
173			   struct crypto_async_request *request);
174void crypto_enqueue_request_head(struct crypto_queue *queue,
175				 struct crypto_async_request *request);
176struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
177static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
178{
179	return queue->qlen;
180}
181
182void crypto_inc(u8 *a, unsigned int size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
184static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
185{
186	return tfm->__crt_ctx;
 
187}
188
189static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
190					 unsigned int align)
191{
192	if (align <= crypto_tfm_ctx_alignment())
193		align = 1;
194
195	return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
 
 
196}
197
198static inline unsigned int crypto_dma_align(void)
 
199{
200	return CRYPTO_DMA_ALIGN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201}
202
203static inline unsigned int crypto_dma_padding(void)
204{
205	return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
206}
207
208static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
209{
210	return crypto_tfm_ctx_align(tfm, crypto_dma_align());
211}
212
213static inline struct crypto_instance *crypto_tfm_alg_instance(
214	struct crypto_tfm *tfm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215{
216	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
 
 
 
217}
218
219static inline void *crypto_instance_ctx(struct crypto_instance *inst)
220{
221	return inst->__ctx;
 
222}
223
224static inline struct crypto_async_request *crypto_get_backlog(
225	struct crypto_queue *queue)
226{
227	return queue->backlog == &queue->list ? NULL :
228	       container_of(queue->backlog, struct crypto_async_request, list);
229}
230
231static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
 
 
 
 
 
 
 
232{
233	return (algt->type ^ off) & algt->mask & off;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234}
235
236/*
237 * When an algorithm uses another algorithm (e.g., if it's an instance of a
238 * template), these are the flags that should always be set on the "outer"
239 * algorithm if any "inner" algorithm has them set.
240 */
241#define CRYPTO_ALG_INHERITED_FLAGS	\
242	(CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |	\
243	 CRYPTO_ALG_ALLOCATES_MEMORY)
 
 
 
244
245/*
246 * Given the type and mask that specify the flags restrictions on a template
247 * instance being created, return the mask that should be passed to
248 * crypto_grab_*() (along with type=0) to honor any request the user made to
249 * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
 
 
 
 
250 */
251static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
252{
253	return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
 
 
 
 
 
 
254}
255
256int crypto_register_notifier(struct notifier_block *nb);
257int crypto_unregister_notifier(struct notifier_block *nb);
258
259/* Crypto notification events. */
260enum {
261	CRYPTO_MSG_ALG_REQUEST,
262	CRYPTO_MSG_ALG_REGISTER,
263	CRYPTO_MSG_ALG_LOADED,
264};
265
266static inline void crypto_request_complete(struct crypto_async_request *req,
267					   int err)
268{
269	req->complete(req->data, err);
270}
271
272static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
273{
274	return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
275}
276
277#endif	/* _CRYPTO_ALGAPI_H */