Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Cryptographic API for algorithms (i.e., low-level API).
  4 *
  5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
 
  6 */
  7#ifndef _CRYPTO_ALGAPI_H
  8#define _CRYPTO_ALGAPI_H
  9
 10#include <linux/crypto.h>
 11#include <linux/list.h>
 12#include <linux/kernel.h>
 13#include <linux/skbuff.h>
 14
 15/*
 16 * Maximum values for blocksize and alignmask, used to allocate
 17 * static buffers that are big enough for any combination of
 18 * algs and architectures. Ciphers have a lower maximum size.
 19 */
 20#define MAX_ALGAPI_BLOCKSIZE		160
 21#define MAX_ALGAPI_ALIGNMASK		63
 22#define MAX_CIPHER_BLOCKSIZE		16
 23#define MAX_CIPHER_ALIGNMASK		15
 24
 25struct crypto_aead;
 26struct crypto_instance;
 27struct module;
 28struct rtattr;
 29struct seq_file;
 30
 31struct crypto_type {
 32	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
 33	unsigned int (*extsize)(struct crypto_alg *alg);
 34	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
 35	int (*init_tfm)(struct crypto_tfm *tfm);
 36	void (*show)(struct seq_file *m, struct crypto_alg *alg);
 37	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
 38	void (*free)(struct crypto_instance *inst);
 39
 40	unsigned int type;
 41	unsigned int maskclear;
 42	unsigned int maskset;
 43	unsigned int tfmsize;
 44};
 45
 46struct crypto_instance {
 47	struct crypto_alg alg;
 48
 49	struct crypto_template *tmpl;
 50
 51	union {
 52		/* Node in list of instances after registration. */
 53		struct hlist_node list;
 54		/* List of attached spawns before registration. */
 55		struct crypto_spawn *spawns;
 56	};
 57
 58	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 59};
 60
 61struct crypto_template {
 62	struct list_head list;
 63	struct hlist_head instances;
 64	struct module *module;
 65
 
 
 66	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
 67
 68	char name[CRYPTO_MAX_ALG_NAME];
 69};
 70
 71struct crypto_spawn {
 72	struct list_head list;
 73	struct crypto_alg *alg;
 74	union {
 75		/* Back pointer to instance after registration.*/
 76		struct crypto_instance *inst;
 77		/* Spawn list pointer prior to registration. */
 78		struct crypto_spawn *next;
 79	};
 80	const struct crypto_type *frontend;
 81	u32 mask;
 82	bool dead;
 83	bool registered;
 84};
 85
 86struct crypto_queue {
 87	struct list_head list;
 88	struct list_head *backlog;
 89
 90	unsigned int qlen;
 91	unsigned int max_qlen;
 92};
 93
 94struct scatter_walk {
 95	struct scatterlist *sg;
 96	unsigned int offset;
 97};
 98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99void crypto_mod_put(struct crypto_alg *alg);
100
101int crypto_register_template(struct crypto_template *tmpl);
102int crypto_register_templates(struct crypto_template *tmpls, int count);
103void crypto_unregister_template(struct crypto_template *tmpl);
104void crypto_unregister_templates(struct crypto_template *tmpls, int count);
105struct crypto_template *crypto_lookup_template(const char *name);
106
107int crypto_register_instance(struct crypto_template *tmpl,
108			     struct crypto_instance *inst);
109void crypto_unregister_instance(struct crypto_instance *inst);
 
 
 
 
 
 
 
 
110
111int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
112		      const char *name, u32 type, u32 mask);
113void crypto_drop_spawn(struct crypto_spawn *spawn);
114struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
115				    u32 mask);
116void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
117
 
 
 
 
 
 
118struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
119int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
120const char *crypto_attr_alg_name(struct rtattr *rta);
 
 
 
 
 
 
 
 
 
 
121int crypto_attr_u32(struct rtattr *rta, u32 *num);
122int crypto_inst_setname(struct crypto_instance *inst, const char *name,
123			struct crypto_alg *alg);
 
 
 
 
124
125void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
126int crypto_enqueue_request(struct crypto_queue *queue,
127			   struct crypto_async_request *request);
128void crypto_enqueue_request_head(struct crypto_queue *queue,
129				 struct crypto_async_request *request);
130struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 
131static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
132{
133	return queue->qlen;
134}
135
136void crypto_inc(u8 *a, unsigned int size);
137void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
138
139static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
140{
141	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
142	    __builtin_constant_p(size) &&
143	    (size % sizeof(unsigned long)) == 0) {
144		unsigned long *d = (unsigned long *)dst;
145		unsigned long *s = (unsigned long *)src;
146
147		while (size > 0) {
148			*d++ ^= *s++;
149			size -= sizeof(unsigned long);
150		}
151	} else {
152		__crypto_xor(dst, dst, src, size);
153	}
154}
155
156static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
157				  unsigned int size)
158{
159	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
160	    __builtin_constant_p(size) &&
161	    (size % sizeof(unsigned long)) == 0) {
162		unsigned long *d = (unsigned long *)dst;
163		unsigned long *s1 = (unsigned long *)src1;
164		unsigned long *s2 = (unsigned long *)src2;
165
166		while (size > 0) {
167			*d++ = *s1++ ^ *s2++;
168			size -= sizeof(unsigned long);
169		}
170	} else {
171		__crypto_xor(dst, src1, src2, size);
172	}
173}
174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
176{
177	return PTR_ALIGN(crypto_tfm_ctx(tfm),
178			 crypto_tfm_alg_alignmask(tfm) + 1);
179}
180
181static inline struct crypto_instance *crypto_tfm_alg_instance(
182	struct crypto_tfm *tfm)
183{
184	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
185}
186
187static inline void *crypto_instance_ctx(struct crypto_instance *inst)
188{
189	return inst->__ctx;
190}
191
192struct crypto_cipher_spawn {
193	struct crypto_spawn base;
194};
 
 
 
 
 
 
 
 
 
 
 
 
195
196static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
197				     struct crypto_instance *inst,
198				     const char *name, u32 type, u32 mask)
199{
200	type &= ~CRYPTO_ALG_TYPE_MASK;
201	type |= CRYPTO_ALG_TYPE_CIPHER;
202	mask |= CRYPTO_ALG_TYPE_MASK;
203	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
204}
205
206static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
207{
208	crypto_drop_spawn(&spawn->base);
209}
210
211static inline struct crypto_alg *crypto_spawn_cipher_alg(
212	struct crypto_cipher_spawn *spawn)
213{
214	return spawn->base.alg;
215}
216
217static inline struct crypto_cipher *crypto_spawn_cipher(
218	struct crypto_cipher_spawn *spawn)
219{
220	u32 type = CRYPTO_ALG_TYPE_CIPHER;
221	u32 mask = CRYPTO_ALG_TYPE_MASK;
222
223	return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
224}
225
226static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
227{
228	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
229}
230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231static inline struct crypto_async_request *crypto_get_backlog(
232	struct crypto_queue *queue)
233{
234	return queue->backlog == &queue->list ? NULL :
235	       container_of(queue->backlog, struct crypto_async_request, list);
236}
237
238static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
 
239{
240	return (algt->type ^ off) & algt->mask & off;
241}
242
243/*
244 * When an algorithm uses another algorithm (e.g., if it's an instance of a
245 * template), these are the flags that should always be set on the "outer"
246 * algorithm if any "inner" algorithm has them set.
247 */
248#define CRYPTO_ALG_INHERITED_FLAGS	\
249	(CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |	\
250	 CRYPTO_ALG_ALLOCATES_MEMORY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
252/*
253 * Given the type and mask that specify the flags restrictions on a template
254 * instance being created, return the mask that should be passed to
255 * crypto_grab_*() (along with type=0) to honor any request the user made to
256 * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
257 */
258static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
259{
260	return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
261}
262
263noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
264
265/**
266 * crypto_memneq - Compare two areas of memory without leaking
267 *		   timing information.
268 *
269 * @a: One area of memory
270 * @b: Another area of memory
271 * @size: The size of the area.
272 *
273 * Returns 0 when data is equal, 1 otherwise.
274 */
275static inline int crypto_memneq(const void *a, const void *b, size_t size)
276{
277	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
278}
279
280static inline void crypto_yield(u32 flags)
281{
 
282	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
283		cond_resched();
 
284}
285
286int crypto_register_notifier(struct notifier_block *nb);
287int crypto_unregister_notifier(struct notifier_block *nb);
288
289/* Crypto notification events. */
290enum {
291	CRYPTO_MSG_ALG_REQUEST,
292	CRYPTO_MSG_ALG_REGISTER,
293	CRYPTO_MSG_ALG_LOADED,
294};
295
296#endif	/* _CRYPTO_ALGAPI_H */
v4.17
 
  1/*
  2 * Cryptographic API for algorithms (i.e., low-level API).
  3 *
  4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License as published by the Free
  8 * Software Foundation; either version 2 of the License, or (at your option) 
  9 * any later version.
 10 *
 11 */
 12#ifndef _CRYPTO_ALGAPI_H
 13#define _CRYPTO_ALGAPI_H
 14
 15#include <linux/crypto.h>
 16#include <linux/list.h>
 17#include <linux/kernel.h>
 18#include <linux/skbuff.h>
 19
 
 
 
 
 
 
 
 
 
 
 20struct crypto_aead;
 21struct crypto_instance;
 22struct module;
 23struct rtattr;
 24struct seq_file;
 25
 26struct crypto_type {
 27	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
 28	unsigned int (*extsize)(struct crypto_alg *alg);
 29	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
 30	int (*init_tfm)(struct crypto_tfm *tfm);
 31	void (*show)(struct seq_file *m, struct crypto_alg *alg);
 32	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
 33	void (*free)(struct crypto_instance *inst);
 34
 35	unsigned int type;
 36	unsigned int maskclear;
 37	unsigned int maskset;
 38	unsigned int tfmsize;
 39};
 40
 41struct crypto_instance {
 42	struct crypto_alg alg;
 43
 44	struct crypto_template *tmpl;
 45	struct hlist_node list;
 
 
 
 
 
 
 46
 47	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 48};
 49
 50struct crypto_template {
 51	struct list_head list;
 52	struct hlist_head instances;
 53	struct module *module;
 54
 55	struct crypto_instance *(*alloc)(struct rtattr **tb);
 56	void (*free)(struct crypto_instance *inst);
 57	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
 58
 59	char name[CRYPTO_MAX_ALG_NAME];
 60};
 61
 62struct crypto_spawn {
 63	struct list_head list;
 64	struct crypto_alg *alg;
 65	struct crypto_instance *inst;
 
 
 
 
 
 66	const struct crypto_type *frontend;
 67	u32 mask;
 
 
 68};
 69
 70struct crypto_queue {
 71	struct list_head list;
 72	struct list_head *backlog;
 73
 74	unsigned int qlen;
 75	unsigned int max_qlen;
 76};
 77
 78struct scatter_walk {
 79	struct scatterlist *sg;
 80	unsigned int offset;
 81};
 82
 83struct blkcipher_walk {
 84	union {
 85		struct {
 86			struct page *page;
 87			unsigned long offset;
 88		} phys;
 89
 90		struct {
 91			u8 *page;
 92			u8 *addr;
 93		} virt;
 94	} src, dst;
 95
 96	struct scatter_walk in;
 97	unsigned int nbytes;
 98
 99	struct scatter_walk out;
100	unsigned int total;
101
102	void *page;
103	u8 *buffer;
104	u8 *iv;
105	unsigned int ivsize;
106
107	int flags;
108	unsigned int walk_blocksize;
109	unsigned int cipher_blocksize;
110	unsigned int alignmask;
111};
112
113struct ablkcipher_walk {
114	struct {
115		struct page *page;
116		unsigned int offset;
117	} src, dst;
118
119	struct scatter_walk	in;
120	unsigned int		nbytes;
121	struct scatter_walk	out;
122	unsigned int		total;
123	struct list_head	buffers;
124	u8			*iv_buffer;
125	u8			*iv;
126	int			flags;
127	unsigned int		blocksize;
128};
129
130extern const struct crypto_type crypto_ablkcipher_type;
131extern const struct crypto_type crypto_blkcipher_type;
132
133void crypto_mod_put(struct crypto_alg *alg);
134
135int crypto_register_template(struct crypto_template *tmpl);
 
136void crypto_unregister_template(struct crypto_template *tmpl);
 
137struct crypto_template *crypto_lookup_template(const char *name);
138
139int crypto_register_instance(struct crypto_template *tmpl,
140			     struct crypto_instance *inst);
141int crypto_unregister_instance(struct crypto_instance *inst);
142
143int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
144		      struct crypto_instance *inst, u32 mask);
145int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
146		       struct crypto_instance *inst,
147		       const struct crypto_type *frontend);
148int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
149		      u32 type, u32 mask);
150
 
 
151void crypto_drop_spawn(struct crypto_spawn *spawn);
152struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
153				    u32 mask);
154void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
155
156static inline void crypto_set_spawn(struct crypto_spawn *spawn,
157				    struct crypto_instance *inst)
158{
159	spawn->inst = inst;
160}
161
162struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
163int crypto_check_attr_type(struct rtattr **tb, u32 type);
164const char *crypto_attr_alg_name(struct rtattr *rta);
165struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
166				    const struct crypto_type *frontend,
167				    u32 type, u32 mask);
168
169static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
170						 u32 type, u32 mask)
171{
172	return crypto_attr_alg2(rta, NULL, type, mask);
173}
174
175int crypto_attr_u32(struct rtattr *rta, u32 *num);
176int crypto_inst_setname(struct crypto_instance *inst, const char *name,
177			struct crypto_alg *alg);
178void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
179			     unsigned int head);
180struct crypto_instance *crypto_alloc_instance(const char *name,
181					      struct crypto_alg *alg);
182
183void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
184int crypto_enqueue_request(struct crypto_queue *queue,
185			   struct crypto_async_request *request);
 
 
186struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
187int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
188static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
189{
190	return queue->qlen;
191}
192
193void crypto_inc(u8 *a, unsigned int size);
194void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
195
196static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
197{
198	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
199	    __builtin_constant_p(size) &&
200	    (size % sizeof(unsigned long)) == 0) {
201		unsigned long *d = (unsigned long *)dst;
202		unsigned long *s = (unsigned long *)src;
203
204		while (size > 0) {
205			*d++ ^= *s++;
206			size -= sizeof(unsigned long);
207		}
208	} else {
209		__crypto_xor(dst, dst, src, size);
210	}
211}
212
213static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
214				  unsigned int size)
215{
216	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
217	    __builtin_constant_p(size) &&
218	    (size % sizeof(unsigned long)) == 0) {
219		unsigned long *d = (unsigned long *)dst;
220		unsigned long *s1 = (unsigned long *)src1;
221		unsigned long *s2 = (unsigned long *)src2;
222
223		while (size > 0) {
224			*d++ = *s1++ ^ *s2++;
225			size -= sizeof(unsigned long);
226		}
227	} else {
228		__crypto_xor(dst, src1, src2, size);
229	}
230}
231
232int blkcipher_walk_done(struct blkcipher_desc *desc,
233			struct blkcipher_walk *walk, int err);
234int blkcipher_walk_virt(struct blkcipher_desc *desc,
235			struct blkcipher_walk *walk);
236int blkcipher_walk_phys(struct blkcipher_desc *desc,
237			struct blkcipher_walk *walk);
238int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
239			      struct blkcipher_walk *walk,
240			      unsigned int blocksize);
241int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
242				   struct blkcipher_walk *walk,
243				   struct crypto_aead *tfm,
244				   unsigned int blocksize);
245
246int ablkcipher_walk_done(struct ablkcipher_request *req,
247			 struct ablkcipher_walk *walk, int err);
248int ablkcipher_walk_phys(struct ablkcipher_request *req,
249			 struct ablkcipher_walk *walk);
250void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
251
252static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
253{
254	return PTR_ALIGN(crypto_tfm_ctx(tfm),
255			 crypto_tfm_alg_alignmask(tfm) + 1);
256}
257
258static inline struct crypto_instance *crypto_tfm_alg_instance(
259	struct crypto_tfm *tfm)
260{
261	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
262}
263
264static inline void *crypto_instance_ctx(struct crypto_instance *inst)
265{
266	return inst->__ctx;
267}
268
269static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
270	struct crypto_ablkcipher *tfm)
271{
272	return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
273}
274
275static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
276{
277	return crypto_tfm_ctx(&tfm->base);
278}
279
280static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
281{
282	return crypto_tfm_ctx_aligned(&tfm->base);
283}
284
285static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
286	struct crypto_spawn *spawn)
 
287{
288	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
289	u32 mask = CRYPTO_ALG_TYPE_MASK;
290
291	return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
292}
293
294static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
295{
296	return crypto_tfm_ctx(&tfm->base);
297}
298
299static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
 
300{
301	return crypto_tfm_ctx_aligned(&tfm->base);
302}
303
304static inline struct crypto_cipher *crypto_spawn_cipher(
305	struct crypto_spawn *spawn)
306{
307	u32 type = CRYPTO_ALG_TYPE_CIPHER;
308	u32 mask = CRYPTO_ALG_TYPE_MASK;
309
310	return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
311}
312
313static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
314{
315	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
316}
317
318static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
319				       struct scatterlist *dst,
320				       struct scatterlist *src,
321				       unsigned int nbytes)
322{
323	walk->in.sg = src;
324	walk->out.sg = dst;
325	walk->total = nbytes;
326}
327
328static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
329					struct scatterlist *dst,
330					struct scatterlist *src,
331					unsigned int nbytes)
332{
333	walk->in.sg = src;
334	walk->out.sg = dst;
335	walk->total = nbytes;
336	INIT_LIST_HEAD(&walk->buffers);
337}
338
339static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
340{
341	if (unlikely(!list_empty(&walk->buffers)))
342		__ablkcipher_walk_complete(walk);
343}
344
345static inline struct crypto_async_request *crypto_get_backlog(
346	struct crypto_queue *queue)
347{
348	return queue->backlog == &queue->list ? NULL :
349	       container_of(queue->backlog, struct crypto_async_request, list);
350}
351
352static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
353					     struct ablkcipher_request *request)
354{
355	return crypto_enqueue_request(queue, &request->base);
356}
357
358static inline struct ablkcipher_request *ablkcipher_dequeue_request(
359	struct crypto_queue *queue)
360{
361	return ablkcipher_request_cast(crypto_dequeue_request(queue));
362}
363
364static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
365{
366	return req->__ctx;
367}
368
369static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
370					  struct crypto_ablkcipher *tfm)
371{
372	return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
373}
374
375static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
376						     u32 type, u32 mask)
377{
378	return crypto_attr_alg(tb[1], type, mask);
379}
380
381static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
382{
383	return (type ^ off) & mask & off;
384}
385
386/*
387 * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
388 * Otherwise returns zero.
 
 
389 */
390static inline int crypto_requires_sync(u32 type, u32 mask)
391{
392	return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
393}
394
395noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
396
397/**
398 * crypto_memneq - Compare two areas of memory without leaking
399 *		   timing information.
400 *
401 * @a: One area of memory
402 * @b: Another area of memory
403 * @size: The size of the area.
404 *
405 * Returns 0 when data is equal, 1 otherwise.
406 */
407static inline int crypto_memneq(const void *a, const void *b, size_t size)
408{
409	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
410}
411
412static inline void crypto_yield(u32 flags)
413{
414#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
415	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
416		cond_resched();
417#endif
418}
 
 
 
 
 
 
 
 
 
 
419
420#endif	/* _CRYPTO_ALGAPI_H */