Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Cryptographic API for algorithms (i.e., low-level API).
  4 *
  5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
 
  6 */
  7#ifndef _CRYPTO_ALGAPI_H
  8#define _CRYPTO_ALGAPI_H
  9
 10#include <linux/align.h>
 11#include <linux/cache.h>
 12#include <linux/crypto.h>
 13#include <linux/kconfig.h>
 14#include <linux/list.h>
 15#include <linux/types.h>
 16
 17#include <asm/unaligned.h>
 18
 19/*
 20 * Maximum values for blocksize and alignmask, used to allocate
 21 * static buffers that are big enough for any combination of
 22 * algs and architectures. Ciphers have a lower maximum size.
 23 */
 24#define MAX_ALGAPI_BLOCKSIZE		160
 25#define MAX_ALGAPI_ALIGNMASK		127
 26#define MAX_CIPHER_BLOCKSIZE		16
 27#define MAX_CIPHER_ALIGNMASK		15
 28
 29#ifdef ARCH_DMA_MINALIGN
 30#define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN
 31#else
 32#define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN
 33#endif
 34
 35#define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
 36
 37struct crypto_aead;
 38struct crypto_instance;
 39struct module;
 40struct notifier_block;
 41struct rtattr;
 42struct seq_file;
 43struct sk_buff;
 44
 45struct crypto_type {
 46	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
 47	unsigned int (*extsize)(struct crypto_alg *alg);
 48	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
 49	int (*init_tfm)(struct crypto_tfm *tfm);
 50	void (*show)(struct seq_file *m, struct crypto_alg *alg);
 51	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
 52	void (*free)(struct crypto_instance *inst);
 53
 54	unsigned int type;
 55	unsigned int maskclear;
 56	unsigned int maskset;
 57	unsigned int tfmsize;
 58};
 59
 60struct crypto_instance {
 61	struct crypto_alg alg;
 62
 63	struct crypto_template *tmpl;
 64
 65	union {
 66		/* Node in list of instances after registration. */
 67		struct hlist_node list;
 68		/* List of attached spawns before registration. */
 69		struct crypto_spawn *spawns;
 70	};
 71
 72	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 73};
 74
 75struct crypto_template {
 76	struct list_head list;
 77	struct hlist_head instances;
 78	struct module *module;
 79
 
 
 80	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
 81
 82	char name[CRYPTO_MAX_ALG_NAME];
 83};
 84
 85struct crypto_spawn {
 86	struct list_head list;
 87	struct crypto_alg *alg;
 88	union {
 89		/* Back pointer to instance after registration.*/
 90		struct crypto_instance *inst;
 91		/* Spawn list pointer prior to registration. */
 92		struct crypto_spawn *next;
 93	};
 94	const struct crypto_type *frontend;
 95	u32 mask;
 96	bool dead;
 97	bool registered;
 98};
 99
100struct crypto_queue {
101	struct list_head list;
102	struct list_head *backlog;
103
104	unsigned int qlen;
105	unsigned int max_qlen;
106};
107
108struct scatter_walk {
109	struct scatterlist *sg;
110	unsigned int offset;
111};
112
113struct crypto_attr_alg {
114	char name[CRYPTO_MAX_ALG_NAME];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115};
116
117struct crypto_attr_type {
118	u32 type;
119	u32 mask;
 
 
 
 
 
 
 
 
 
 
 
 
120};
121
 
 
 
 
122void crypto_mod_put(struct crypto_alg *alg);
123
124int crypto_register_template(struct crypto_template *tmpl);
125int crypto_register_templates(struct crypto_template *tmpls, int count);
126void crypto_unregister_template(struct crypto_template *tmpl);
127void crypto_unregister_templates(struct crypto_template *tmpls, int count);
128struct crypto_template *crypto_lookup_template(const char *name);
129
130int crypto_register_instance(struct crypto_template *tmpl,
131			     struct crypto_instance *inst);
132void crypto_unregister_instance(struct crypto_instance *inst);
 
 
 
 
 
 
133
134int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
135		      const char *name, u32 type, u32 mask);
136void crypto_drop_spawn(struct crypto_spawn *spawn);
137struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
138				    u32 mask);
139void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
140
 
 
 
 
 
 
141struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
142int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
143const char *crypto_attr_alg_name(struct rtattr *rta);
144int crypto_inst_setname(struct crypto_instance *inst, const char *name,
145			struct crypto_alg *alg);
 
 
 
 
 
 
 
 
 
 
 
 
 
146
147void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
148int crypto_enqueue_request(struct crypto_queue *queue,
149			   struct crypto_async_request *request);
150void crypto_enqueue_request_head(struct crypto_queue *queue,
151				 struct crypto_async_request *request);
152struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
153static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154{
155	return queue->qlen;
 
156}
157
158void crypto_inc(u8 *a, unsigned int size);
159void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
 
 
 
160
161static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
162{
163	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
164	    __builtin_constant_p(size) &&
165	    (size % sizeof(unsigned long)) == 0) {
166		unsigned long *d = (unsigned long *)dst;
167		unsigned long *s = (unsigned long *)src;
168		unsigned long l;
169
170		while (size > 0) {
171			l = get_unaligned(d) ^ get_unaligned(s++);
172			put_unaligned(l, d++);
173			size -= sizeof(unsigned long);
174		}
175	} else {
176		__crypto_xor(dst, dst, src, size);
177	}
178}
179
180static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
181				  unsigned int size)
182{
183	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
184	    __builtin_constant_p(size) &&
185	    (size % sizeof(unsigned long)) == 0) {
186		unsigned long *d = (unsigned long *)dst;
187		unsigned long *s1 = (unsigned long *)src1;
188		unsigned long *s2 = (unsigned long *)src2;
189		unsigned long l;
190
191		while (size > 0) {
192			l = get_unaligned(s1++) ^ get_unaligned(s2++);
193			put_unaligned(l, d++);
194			size -= sizeof(unsigned long);
195		}
196	} else {
197		__crypto_xor(dst, src1, src2, size);
198	}
199}
200
201static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
202{
203	return tfm->__crt_ctx;
204}
205
206static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
207					 unsigned int align)
208{
209	if (align <= crypto_tfm_ctx_alignment())
210		align = 1;
211
212	return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
 
 
 
213}
214
215static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
 
 
 
 
 
 
 
 
 
216{
217	return crypto_tfm_ctx_align(tfm, crypto_tfm_alg_alignmask(tfm) + 1);
218}
219
220static inline unsigned int crypto_dma_align(void)
221{
222	return CRYPTO_DMA_ALIGN;
223}
224
225static inline unsigned int crypto_dma_padding(void)
 
226{
227	return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
 
 
 
228}
229
230static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
231{
232	return crypto_tfm_ctx_align(tfm, crypto_dma_align());
233}
234
235static inline struct crypto_instance *crypto_tfm_alg_instance(
236	struct crypto_tfm *tfm)
237{
238	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
 
 
 
239}
240
241static inline void *crypto_instance_ctx(struct crypto_instance *inst)
242{
243	return inst->__ctx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244}
245
246static inline struct crypto_async_request *crypto_get_backlog(
247	struct crypto_queue *queue)
248{
249	return queue->backlog == &queue->list ? NULL :
250	       container_of(queue->backlog, struct crypto_async_request, list);
251}
252
253static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
 
254{
255	return (algt->type ^ off) & algt->mask & off;
256}
257
258/*
259 * When an algorithm uses another algorithm (e.g., if it's an instance of a
260 * template), these are the flags that should always be set on the "outer"
261 * algorithm if any "inner" algorithm has them set.
262 */
263#define CRYPTO_ALG_INHERITED_FLAGS	\
264	(CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |	\
265	 CRYPTO_ALG_ALLOCATES_MEMORY)
266
267/*
268 * Given the type and mask that specify the flags restrictions on a template
269 * instance being created, return the mask that should be passed to
270 * crypto_grab_*() (along with type=0) to honor any request the user made to
271 * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
272 */
273static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
274{
275	return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
276}
277
278noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
 
 
 
 
279
280/**
281 * crypto_memneq - Compare two areas of memory without leaking
282 *		   timing information.
283 *
284 * @a: One area of memory
285 * @b: Another area of memory
286 * @size: The size of the area.
287 *
288 * Returns 0 when data is equal, 1 otherwise.
289 */
290static inline int crypto_memneq(const void *a, const void *b, size_t size)
291{
292	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
293}
294
295int crypto_register_notifier(struct notifier_block *nb);
296int crypto_unregister_notifier(struct notifier_block *nb);
 
 
297
298/* Crypto notification events. */
299enum {
300	CRYPTO_MSG_ALG_REQUEST,
301	CRYPTO_MSG_ALG_REGISTER,
302	CRYPTO_MSG_ALG_LOADED,
303};
 
 
 
 
 
 
 
 
 
 
 
 
 
304
305#endif	/* _CRYPTO_ALGAPI_H */
v3.5.6
 
  1/*
  2 * Cryptographic API for algorithms (i.e., low-level API).
  3 *
  4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License as published by the Free
  8 * Software Foundation; either version 2 of the License, or (at your option) 
  9 * any later version.
 10 *
 11 */
 12#ifndef _CRYPTO_ALGAPI_H
 13#define _CRYPTO_ALGAPI_H
 14
 
 
 15#include <linux/crypto.h>
 
 16#include <linux/list.h>
 17#include <linux/kernel.h>
 18#include <linux/skbuff.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19
 
 
 
 
 20struct module;
 
 21struct rtattr;
 22struct seq_file;
 
 23
 24struct crypto_type {
 25	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
 26	unsigned int (*extsize)(struct crypto_alg *alg);
 27	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
 28	int (*init_tfm)(struct crypto_tfm *tfm);
 29	void (*show)(struct seq_file *m, struct crypto_alg *alg);
 30	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
 31	struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
 32
 33	unsigned int type;
 34	unsigned int maskclear;
 35	unsigned int maskset;
 36	unsigned int tfmsize;
 37};
 38
 39struct crypto_instance {
 40	struct crypto_alg alg;
 41
 42	struct crypto_template *tmpl;
 43	struct hlist_node list;
 
 
 
 
 
 
 44
 45	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 46};
 47
 48struct crypto_template {
 49	struct list_head list;
 50	struct hlist_head instances;
 51	struct module *module;
 52
 53	struct crypto_instance *(*alloc)(struct rtattr **tb);
 54	void (*free)(struct crypto_instance *inst);
 55	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
 56
 57	char name[CRYPTO_MAX_ALG_NAME];
 58};
 59
 60struct crypto_spawn {
 61	struct list_head list;
 62	struct crypto_alg *alg;
 63	struct crypto_instance *inst;
 
 
 
 
 
 64	const struct crypto_type *frontend;
 65	u32 mask;
 
 
 66};
 67
 68struct crypto_queue {
 69	struct list_head list;
 70	struct list_head *backlog;
 71
 72	unsigned int qlen;
 73	unsigned int max_qlen;
 74};
 75
 76struct scatter_walk {
 77	struct scatterlist *sg;
 78	unsigned int offset;
 79};
 80
 81struct blkcipher_walk {
 82	union {
 83		struct {
 84			struct page *page;
 85			unsigned long offset;
 86		} phys;
 87
 88		struct {
 89			u8 *page;
 90			u8 *addr;
 91		} virt;
 92	} src, dst;
 93
 94	struct scatter_walk in;
 95	unsigned int nbytes;
 96
 97	struct scatter_walk out;
 98	unsigned int total;
 99
100	void *page;
101	u8 *buffer;
102	u8 *iv;
103
104	int flags;
105	unsigned int blocksize;
106};
107
108struct ablkcipher_walk {
109	struct {
110		struct page *page;
111		unsigned int offset;
112	} src, dst;
113
114	struct scatter_walk	in;
115	unsigned int		nbytes;
116	struct scatter_walk	out;
117	unsigned int		total;
118	struct list_head	buffers;
119	u8			*iv_buffer;
120	u8			*iv;
121	int			flags;
122	unsigned int		blocksize;
123};
124
125extern const struct crypto_type crypto_ablkcipher_type;
126extern const struct crypto_type crypto_aead_type;
127extern const struct crypto_type crypto_blkcipher_type;
128
129void crypto_mod_put(struct crypto_alg *alg);
130
131int crypto_register_template(struct crypto_template *tmpl);
 
132void crypto_unregister_template(struct crypto_template *tmpl);
 
133struct crypto_template *crypto_lookup_template(const char *name);
134
135int crypto_register_instance(struct crypto_template *tmpl,
136			     struct crypto_instance *inst);
137int crypto_unregister_instance(struct crypto_alg *alg);
138
139int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
140		      struct crypto_instance *inst, u32 mask);
141int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
142		       struct crypto_instance *inst,
143		       const struct crypto_type *frontend);
144
 
 
145void crypto_drop_spawn(struct crypto_spawn *spawn);
146struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
147				    u32 mask);
148void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
149
150static inline void crypto_set_spawn(struct crypto_spawn *spawn,
151				    struct crypto_instance *inst)
152{
153	spawn->inst = inst;
154}
155
156struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
157int crypto_check_attr_type(struct rtattr **tb, u32 type);
158const char *crypto_attr_alg_name(struct rtattr *rta);
159struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
160				    const struct crypto_type *frontend,
161				    u32 type, u32 mask);
162
163static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
164						 u32 type, u32 mask)
165{
166	return crypto_attr_alg2(rta, NULL, type, mask);
167}
168
169int crypto_attr_u32(struct rtattr *rta, u32 *num);
170void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
171			     unsigned int head);
172struct crypto_instance *crypto_alloc_instance(const char *name,
173					      struct crypto_alg *alg);
174
175void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
176int crypto_enqueue_request(struct crypto_queue *queue,
177			   struct crypto_async_request *request);
178void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
 
179struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
180int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
181
182/* These functions require the input/output to be aligned as u32. */
183void crypto_inc(u8 *a, unsigned int size);
184void crypto_xor(u8 *dst, const u8 *src, unsigned int size);
185
186int blkcipher_walk_done(struct blkcipher_desc *desc,
187			struct blkcipher_walk *walk, int err);
188int blkcipher_walk_virt(struct blkcipher_desc *desc,
189			struct blkcipher_walk *walk);
190int blkcipher_walk_phys(struct blkcipher_desc *desc,
191			struct blkcipher_walk *walk);
192int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
193			      struct blkcipher_walk *walk,
194			      unsigned int blocksize);
195
196int ablkcipher_walk_done(struct ablkcipher_request *req,
197			 struct ablkcipher_walk *walk, int err);
198int ablkcipher_walk_phys(struct ablkcipher_request *req,
199			 struct ablkcipher_walk *walk);
200void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
201
202static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
203{
204	return PTR_ALIGN(crypto_tfm_ctx(tfm),
205			 crypto_tfm_alg_alignmask(tfm) + 1);
206}
207
208static inline struct crypto_instance *crypto_tfm_alg_instance(
209	struct crypto_tfm *tfm)
210{
211	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
212}
213
214static inline void *crypto_instance_ctx(struct crypto_instance *inst)
215{
216	return inst->__ctx;
217}
 
 
 
 
218
219static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
220	struct crypto_ablkcipher *tfm)
221{
222	return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
 
 
 
 
223}
224
225static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
 
226{
227	return crypto_tfm_ctx(&tfm->base);
228}
 
 
 
 
 
229
230static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
231{
232	return crypto_tfm_ctx_aligned(&tfm->base);
 
 
 
 
 
233}
234
235static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
236{
237	return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
238}
239
240static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
 
241{
242	return crypto_tfm_ctx(&tfm->base);
243}
244
245static inline struct crypto_instance *crypto_aead_alg_instance(
246	struct crypto_aead *aead)
247{
248	return crypto_tfm_alg_instance(&aead->base);
249}
250
251static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
252	struct crypto_spawn *spawn)
253{
254	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
255	u32 mask = CRYPTO_ALG_TYPE_MASK;
256
257	return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
258}
259
260static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
261{
262	return crypto_tfm_ctx(&tfm->base);
263}
264
265static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
266{
267	return crypto_tfm_ctx_aligned(&tfm->base);
268}
269
270static inline struct crypto_cipher *crypto_spawn_cipher(
271	struct crypto_spawn *spawn)
272{
273	u32 type = CRYPTO_ALG_TYPE_CIPHER;
274	u32 mask = CRYPTO_ALG_TYPE_MASK;
275
276	return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
277}
278
279static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
280{
281	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
282}
283
284static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
 
285{
286	u32 type = CRYPTO_ALG_TYPE_HASH;
287	u32 mask = CRYPTO_ALG_TYPE_HASH_MASK;
288
289	return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
290}
291
292static inline void *crypto_hash_ctx(struct crypto_hash *tfm)
293{
294	return crypto_tfm_ctx(&tfm->base);
295}
296
297static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
298{
299	return crypto_tfm_ctx_aligned(&tfm->base);
300}
301
302static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
303				       struct scatterlist *dst,
304				       struct scatterlist *src,
305				       unsigned int nbytes)
306{
307	walk->in.sg = src;
308	walk->out.sg = dst;
309	walk->total = nbytes;
310}
311
312static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
313					struct scatterlist *dst,
314					struct scatterlist *src,
315					unsigned int nbytes)
316{
317	walk->in.sg = src;
318	walk->out.sg = dst;
319	walk->total = nbytes;
320	INIT_LIST_HEAD(&walk->buffers);
321}
322
323static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
324{
325	if (unlikely(!list_empty(&walk->buffers)))
326		__ablkcipher_walk_complete(walk);
327}
328
329static inline struct crypto_async_request *crypto_get_backlog(
330	struct crypto_queue *queue)
331{
332	return queue->backlog == &queue->list ? NULL :
333	       container_of(queue->backlog, struct crypto_async_request, list);
334}
335
336static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
337					     struct ablkcipher_request *request)
338{
339	return crypto_enqueue_request(queue, &request->base);
340}
341
342static inline struct ablkcipher_request *ablkcipher_dequeue_request(
343	struct crypto_queue *queue)
344{
345	return ablkcipher_request_cast(crypto_dequeue_request(queue));
346}
 
 
 
347
348static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
 
 
 
 
 
 
349{
350	return req->__ctx;
351}
352
353static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
354					  struct crypto_ablkcipher *tfm)
355{
356	return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
357}
358
359static inline void *aead_request_ctx(struct aead_request *req)
 
 
 
 
 
 
 
 
 
 
360{
361	return req->__ctx;
362}
363
364static inline void aead_request_complete(struct aead_request *req, int err)
365{
366	req->base.complete(&req->base, err);
367}
368
369static inline u32 aead_request_flags(struct aead_request *req)
370{
371	return req->base.flags;
372}
373
374static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
375						     u32 type, u32 mask)
376{
377	return crypto_attr_alg(tb[1], type, mask);
378}
379
380/*
381 * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
382 * Otherwise returns zero.
383 */
384static inline int crypto_requires_sync(u32 type, u32 mask)
385{
386	return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
387}
388
389#endif	/* _CRYPTO_ALGAPI_H */
390