Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Cryptographic API for algorithms (i.e., low-level API).
  4 *
  5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
 
  6 */
  7#ifndef _CRYPTO_ALGAPI_H
  8#define _CRYPTO_ALGAPI_H
  9
 10#include <crypto/utils.h>
 11#include <linux/align.h>
 12#include <linux/cache.h>
 13#include <linux/crypto.h>
 14#include <linux/types.h>
 15#include <linux/workqueue.h>
 16
 17/*
 18 * Maximum values for blocksize and alignmask, used to allocate
 19 * static buffers that are big enough for any combination of
 20 * algs and architectures. Ciphers have a lower maximum size.
 21 */
 22#define MAX_ALGAPI_BLOCKSIZE		160
 23#define MAX_ALGAPI_ALIGNMASK		127
 24#define MAX_CIPHER_BLOCKSIZE		16
 25#define MAX_CIPHER_ALIGNMASK		15
 26
 27#ifdef ARCH_DMA_MINALIGN
 28#define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN
 29#else
 30#define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN
 31#endif
 32
 33#define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
 34
 35/*
 36 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
 37 * arbitrary modules to be loaded. Loading from userspace may still need the
 38 * unprefixed names, so retains those aliases as well.
 39 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
 40 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
 41 * expands twice on the same line. Instead, use a separate base name for the
 42 * alias.
 43 */
 44#define MODULE_ALIAS_CRYPTO(name)	\
 45		__MODULE_INFO(alias, alias_userspace, name);	\
 46		__MODULE_INFO(alias, alias_crypto, "crypto-" name)
 47
 48struct crypto_aead;
 49struct crypto_instance;
 50struct module;
 51struct notifier_block;
 52struct rtattr;
 53struct scatterlist;
 54struct seq_file;
 55struct sk_buff;
 56
 57struct crypto_type {
 58	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
 59	unsigned int (*extsize)(struct crypto_alg *alg);
 
 60	int (*init_tfm)(struct crypto_tfm *tfm);
 61	void (*show)(struct seq_file *m, struct crypto_alg *alg);
 62	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
 63	void (*free)(struct crypto_instance *inst);
 64#ifdef CONFIG_CRYPTO_STATS
 65	int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
 66#endif
 67
 68	unsigned int type;
 69	unsigned int maskclear;
 70	unsigned int maskset;
 71	unsigned int tfmsize;
 72};
 73
 74struct crypto_instance {
 75	struct crypto_alg alg;
 76
 77	struct crypto_template *tmpl;
 78
 79	union {
 80		/* Node in list of instances after registration. */
 81		struct hlist_node list;
 82		/* List of attached spawns before registration. */
 83		struct crypto_spawn *spawns;
 84	};
 85
 86	struct work_struct free_work;
 87
 88	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 89};
 90
 91struct crypto_template {
 92	struct list_head list;
 93	struct hlist_head instances;
 94	struct module *module;
 95
 
 
 96	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
 97
 98	char name[CRYPTO_MAX_ALG_NAME];
 99};
100
101struct crypto_spawn {
102	struct list_head list;
103	struct crypto_alg *alg;
104	union {
105		/* Back pointer to instance after registration.*/
106		struct crypto_instance *inst;
107		/* Spawn list pointer prior to registration. */
108		struct crypto_spawn *next;
109	};
110	const struct crypto_type *frontend;
111	u32 mask;
112	bool dead;
113	bool registered;
114};
115
116struct crypto_queue {
117	struct list_head list;
118	struct list_head *backlog;
119
120	unsigned int qlen;
121	unsigned int max_qlen;
122};
123
124struct scatter_walk {
125	struct scatterlist *sg;
126	unsigned int offset;
127};
128
129struct crypto_attr_alg {
130	char name[CRYPTO_MAX_ALG_NAME];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131};
132
133struct crypto_attr_type {
134	u32 type;
135	u32 mask;
 
 
 
 
 
 
 
 
 
 
 
 
136};
137
138/*
139 * Algorithm registration interface.
140 */
141int crypto_register_alg(struct crypto_alg *alg);
142void crypto_unregister_alg(struct crypto_alg *alg);
143int crypto_register_algs(struct crypto_alg *algs, int count);
144void crypto_unregister_algs(struct crypto_alg *algs, int count);
145
146void crypto_mod_put(struct crypto_alg *alg);
147
148int crypto_register_template(struct crypto_template *tmpl);
149int crypto_register_templates(struct crypto_template *tmpls, int count);
150void crypto_unregister_template(struct crypto_template *tmpl);
151void crypto_unregister_templates(struct crypto_template *tmpls, int count);
152struct crypto_template *crypto_lookup_template(const char *name);
153
154int crypto_register_instance(struct crypto_template *tmpl,
155			     struct crypto_instance *inst);
156void crypto_unregister_instance(struct crypto_instance *inst);
 
 
 
 
 
 
 
 
157
158int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
159		      const char *name, u32 type, u32 mask);
160void crypto_drop_spawn(struct crypto_spawn *spawn);
161struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
162				    u32 mask);
163void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
164
 
 
 
 
 
 
165struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
166int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
167const char *crypto_attr_alg_name(struct rtattr *rta);
 
 
 
 
 
 
 
 
 
 
 
168int crypto_inst_setname(struct crypto_instance *inst, const char *name,
169			struct crypto_alg *alg);
 
 
 
 
170
171void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
172int crypto_enqueue_request(struct crypto_queue *queue,
173			   struct crypto_async_request *request);
174void crypto_enqueue_request_head(struct crypto_queue *queue,
175				 struct crypto_async_request *request);
176struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 
177static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
178{
179	return queue->qlen;
180}
181
182void crypto_inc(u8 *a, unsigned int size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
184static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
185{
186	return tfm->__crt_ctx;
 
187}
188
189static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
190					 unsigned int align)
191{
192	if (align <= crypto_tfm_ctx_alignment())
193		align = 1;
194
195	return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
 
 
196}
197
198static inline unsigned int crypto_dma_align(void)
 
199{
200	return CRYPTO_DMA_ALIGN;
201}
202
203static inline unsigned int crypto_dma_padding(void)
204{
205	return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
206}
207
208static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
209{
210	return crypto_tfm_ctx_align(tfm, crypto_dma_align());
211}
212
213static inline struct crypto_instance *crypto_tfm_alg_instance(
214	struct crypto_tfm *tfm)
215{
216	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
 
 
 
217}
218
219static inline void *crypto_instance_ctx(struct crypto_instance *inst)
220{
221	return inst->__ctx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222}
223
224static inline struct crypto_async_request *crypto_get_backlog(
225	struct crypto_queue *queue)
226{
227	return queue->backlog == &queue->list ? NULL :
228	       container_of(queue->backlog, struct crypto_async_request, list);
229}
230
231static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
 
232{
233	return (algt->type ^ off) & algt->mask & off;
234}
235
236/*
237 * When an algorithm uses another algorithm (e.g., if it's an instance of a
238 * template), these are the flags that should always be set on the "outer"
239 * algorithm if any "inner" algorithm has them set.
240 */
241#define CRYPTO_ALG_INHERITED_FLAGS	\
242	(CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |	\
243	 CRYPTO_ALG_ALLOCATES_MEMORY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
245/*
246 * Given the type and mask that specify the flags restrictions on a template
247 * instance being created, return the mask that should be passed to
248 * crypto_grab_*() (along with type=0) to honor any request the user made to
249 * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
250 */
251static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
252{
253	return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
254}
255
256int crypto_register_notifier(struct notifier_block *nb);
257int crypto_unregister_notifier(struct notifier_block *nb);
258
259/* Crypto notification events. */
260enum {
261	CRYPTO_MSG_ALG_REQUEST,
262	CRYPTO_MSG_ALG_REGISTER,
263	CRYPTO_MSG_ALG_LOADED,
264};
265
266static inline void crypto_request_complete(struct crypto_async_request *req,
267					   int err)
 
 
 
 
 
 
 
 
 
268{
269	req->complete(req->data, err);
270}
271
272static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
273{
274	return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
 
 
 
275}
276
277#endif	/* _CRYPTO_ALGAPI_H */
v4.17
 
  1/*
  2 * Cryptographic API for algorithms (i.e., low-level API).
  3 *
  4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License as published by the Free
  8 * Software Foundation; either version 2 of the License, or (at your option) 
  9 * any later version.
 10 *
 11 */
 12#ifndef _CRYPTO_ALGAPI_H
 13#define _CRYPTO_ALGAPI_H
 14
 
 
 
 15#include <linux/crypto.h>
 16#include <linux/list.h>
 17#include <linux/kernel.h>
 18#include <linux/skbuff.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19
 20struct crypto_aead;
 21struct crypto_instance;
 22struct module;
 
 23struct rtattr;
 
 24struct seq_file;
 
 25
 26struct crypto_type {
 27	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
 28	unsigned int (*extsize)(struct crypto_alg *alg);
 29	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
 30	int (*init_tfm)(struct crypto_tfm *tfm);
 31	void (*show)(struct seq_file *m, struct crypto_alg *alg);
 32	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
 33	void (*free)(struct crypto_instance *inst);
 
 
 
 34
 35	unsigned int type;
 36	unsigned int maskclear;
 37	unsigned int maskset;
 38	unsigned int tfmsize;
 39};
 40
 41struct crypto_instance {
 42	struct crypto_alg alg;
 43
 44	struct crypto_template *tmpl;
 45	struct hlist_node list;
 
 
 
 
 
 
 
 
 46
 47	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 48};
 49
 50struct crypto_template {
 51	struct list_head list;
 52	struct hlist_head instances;
 53	struct module *module;
 54
 55	struct crypto_instance *(*alloc)(struct rtattr **tb);
 56	void (*free)(struct crypto_instance *inst);
 57	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
 58
 59	char name[CRYPTO_MAX_ALG_NAME];
 60};
 61
 62struct crypto_spawn {
 63	struct list_head list;
 64	struct crypto_alg *alg;
 65	struct crypto_instance *inst;
 
 
 
 
 
 66	const struct crypto_type *frontend;
 67	u32 mask;
 
 
 68};
 69
 70struct crypto_queue {
 71	struct list_head list;
 72	struct list_head *backlog;
 73
 74	unsigned int qlen;
 75	unsigned int max_qlen;
 76};
 77
 78struct scatter_walk {
 79	struct scatterlist *sg;
 80	unsigned int offset;
 81};
 82
 83struct blkcipher_walk {
 84	union {
 85		struct {
 86			struct page *page;
 87			unsigned long offset;
 88		} phys;
 89
 90		struct {
 91			u8 *page;
 92			u8 *addr;
 93		} virt;
 94	} src, dst;
 95
 96	struct scatter_walk in;
 97	unsigned int nbytes;
 98
 99	struct scatter_walk out;
100	unsigned int total;
101
102	void *page;
103	u8 *buffer;
104	u8 *iv;
105	unsigned int ivsize;
106
107	int flags;
108	unsigned int walk_blocksize;
109	unsigned int cipher_blocksize;
110	unsigned int alignmask;
111};
112
113struct ablkcipher_walk {
114	struct {
115		struct page *page;
116		unsigned int offset;
117	} src, dst;
118
119	struct scatter_walk	in;
120	unsigned int		nbytes;
121	struct scatter_walk	out;
122	unsigned int		total;
123	struct list_head	buffers;
124	u8			*iv_buffer;
125	u8			*iv;
126	int			flags;
127	unsigned int		blocksize;
128};
129
130extern const struct crypto_type crypto_ablkcipher_type;
131extern const struct crypto_type crypto_blkcipher_type;
 
 
 
 
 
132
133void crypto_mod_put(struct crypto_alg *alg);
134
135int crypto_register_template(struct crypto_template *tmpl);
 
136void crypto_unregister_template(struct crypto_template *tmpl);
 
137struct crypto_template *crypto_lookup_template(const char *name);
138
139int crypto_register_instance(struct crypto_template *tmpl,
140			     struct crypto_instance *inst);
141int crypto_unregister_instance(struct crypto_instance *inst);
142
143int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
144		      struct crypto_instance *inst, u32 mask);
145int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
146		       struct crypto_instance *inst,
147		       const struct crypto_type *frontend);
148int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
149		      u32 type, u32 mask);
150
 
 
151void crypto_drop_spawn(struct crypto_spawn *spawn);
152struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
153				    u32 mask);
154void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
155
156static inline void crypto_set_spawn(struct crypto_spawn *spawn,
157				    struct crypto_instance *inst)
158{
159	spawn->inst = inst;
160}
161
162struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
163int crypto_check_attr_type(struct rtattr **tb, u32 type);
164const char *crypto_attr_alg_name(struct rtattr *rta);
165struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
166				    const struct crypto_type *frontend,
167				    u32 type, u32 mask);
168
169static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
170						 u32 type, u32 mask)
171{
172	return crypto_attr_alg2(rta, NULL, type, mask);
173}
174
175int crypto_attr_u32(struct rtattr *rta, u32 *num);
176int crypto_inst_setname(struct crypto_instance *inst, const char *name,
177			struct crypto_alg *alg);
178void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
179			     unsigned int head);
180struct crypto_instance *crypto_alloc_instance(const char *name,
181					      struct crypto_alg *alg);
182
183void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
184int crypto_enqueue_request(struct crypto_queue *queue,
185			   struct crypto_async_request *request);
 
 
186struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
187int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
188static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
189{
190	return queue->qlen;
191}
192
193void crypto_inc(u8 *a, unsigned int size);
194void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
195
196static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
197{
198	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
199	    __builtin_constant_p(size) &&
200	    (size % sizeof(unsigned long)) == 0) {
201		unsigned long *d = (unsigned long *)dst;
202		unsigned long *s = (unsigned long *)src;
203
204		while (size > 0) {
205			*d++ ^= *s++;
206			size -= sizeof(unsigned long);
207		}
208	} else {
209		__crypto_xor(dst, dst, src, size);
210	}
211}
212
213static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
214				  unsigned int size)
215{
216	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
217	    __builtin_constant_p(size) &&
218	    (size % sizeof(unsigned long)) == 0) {
219		unsigned long *d = (unsigned long *)dst;
220		unsigned long *s1 = (unsigned long *)src1;
221		unsigned long *s2 = (unsigned long *)src2;
222
223		while (size > 0) {
224			*d++ = *s1++ ^ *s2++;
225			size -= sizeof(unsigned long);
226		}
227	} else {
228		__crypto_xor(dst, src1, src2, size);
229	}
230}
231
232int blkcipher_walk_done(struct blkcipher_desc *desc,
233			struct blkcipher_walk *walk, int err);
234int blkcipher_walk_virt(struct blkcipher_desc *desc,
235			struct blkcipher_walk *walk);
236int blkcipher_walk_phys(struct blkcipher_desc *desc,
237			struct blkcipher_walk *walk);
238int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
239			      struct blkcipher_walk *walk,
240			      unsigned int blocksize);
241int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
242				   struct blkcipher_walk *walk,
243				   struct crypto_aead *tfm,
244				   unsigned int blocksize);
245
246int ablkcipher_walk_done(struct ablkcipher_request *req,
247			 struct ablkcipher_walk *walk, int err);
248int ablkcipher_walk_phys(struct ablkcipher_request *req,
249			 struct ablkcipher_walk *walk);
250void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
251
252static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
253{
254	return PTR_ALIGN(crypto_tfm_ctx(tfm),
255			 crypto_tfm_alg_alignmask(tfm) + 1);
256}
257
258static inline struct crypto_instance *crypto_tfm_alg_instance(
259	struct crypto_tfm *tfm)
260{
261	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
262}
263
264static inline void *crypto_instance_ctx(struct crypto_instance *inst)
265{
266	return inst->__ctx;
267}
268
269static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
270	struct crypto_ablkcipher *tfm)
271{
272	return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
273}
274
275static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
276{
277	return crypto_tfm_ctx(&tfm->base);
278}
279
280static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
281{
282	return crypto_tfm_ctx_aligned(&tfm->base);
283}
284
285static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
286	struct crypto_spawn *spawn)
287{
288	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
289	u32 mask = CRYPTO_ALG_TYPE_MASK;
290
291	return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
292}
293
294static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
295{
296	return crypto_tfm_ctx(&tfm->base);
297}
298
299static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
300{
301	return crypto_tfm_ctx_aligned(&tfm->base);
302}
303
304static inline struct crypto_cipher *crypto_spawn_cipher(
305	struct crypto_spawn *spawn)
306{
307	u32 type = CRYPTO_ALG_TYPE_CIPHER;
308	u32 mask = CRYPTO_ALG_TYPE_MASK;
309
310	return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
311}
312
313static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
314{
315	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
316}
317
318static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
319				       struct scatterlist *dst,
320				       struct scatterlist *src,
321				       unsigned int nbytes)
322{
323	walk->in.sg = src;
324	walk->out.sg = dst;
325	walk->total = nbytes;
326}
327
328static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
329					struct scatterlist *dst,
330					struct scatterlist *src,
331					unsigned int nbytes)
332{
333	walk->in.sg = src;
334	walk->out.sg = dst;
335	walk->total = nbytes;
336	INIT_LIST_HEAD(&walk->buffers);
337}
338
339static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
340{
341	if (unlikely(!list_empty(&walk->buffers)))
342		__ablkcipher_walk_complete(walk);
343}
344
345static inline struct crypto_async_request *crypto_get_backlog(
346	struct crypto_queue *queue)
347{
348	return queue->backlog == &queue->list ? NULL :
349	       container_of(queue->backlog, struct crypto_async_request, list);
350}
351
352static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
353					     struct ablkcipher_request *request)
354{
355	return crypto_enqueue_request(queue, &request->base);
356}
357
358static inline struct ablkcipher_request *ablkcipher_dequeue_request(
359	struct crypto_queue *queue)
360{
361	return ablkcipher_request_cast(crypto_dequeue_request(queue));
362}
363
364static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
365{
366	return req->__ctx;
367}
368
369static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
370					  struct crypto_ablkcipher *tfm)
371{
372	return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
373}
374
375static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
376						     u32 type, u32 mask)
377{
378	return crypto_attr_alg(tb[1], type, mask);
379}
380
381static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
382{
383	return (type ^ off) & mask & off;
384}
385
386/*
387 * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
388 * Otherwise returns zero.
 
 
389 */
390static inline int crypto_requires_sync(u32 type, u32 mask)
391{
392	return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
393}
394
395noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
 
 
 
 
 
 
 
 
396
397/**
398 * crypto_memneq - Compare two areas of memory without leaking
399 *		   timing information.
400 *
401 * @a: One area of memory
402 * @b: Another area of memory
403 * @size: The size of the area.
404 *
405 * Returns 0 when data is equal, 1 otherwise.
406 */
407static inline int crypto_memneq(const void *a, const void *b, size_t size)
408{
409	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
410}
411
412static inline void crypto_yield(u32 flags)
413{
414#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
415	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
416		cond_resched();
417#endif
418}
419
420#endif	/* _CRYPTO_ALGAPI_H */