Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v4.6
  1/*
  2 * algif_aead: User-space interface for AEAD algorithms
  3 *
  4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
  5 *
  6 * This file provides the user-space API for AEAD ciphers.
  7 *
  8 * This file is derived from algif_skcipher.c.
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of the GNU General Public License as published by the Free
 12 * Software Foundation; either version 2 of the License, or (at your option)
 13 * any later version.
 14 */
 15
 16#include <crypto/aead.h>
 17#include <crypto/scatterwalk.h>
 18#include <crypto/if_alg.h>
 19#include <linux/init.h>
 20#include <linux/list.h>
 21#include <linux/kernel.h>
 22#include <linux/mm.h>
 23#include <linux/module.h>
 24#include <linux/net.h>
 25#include <net/sock.h>
 26
 27struct aead_sg_list {
 28	unsigned int cur;
 29	struct scatterlist sg[ALG_MAX_PAGES];
 30};
 31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32struct aead_ctx {
 33	struct aead_sg_list tsgl;
 34	/*
 35	 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
 36	 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
 37	 * pages
 38	 */
 39#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
 40	struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
 41
 42	void *iv;
 43
 44	struct af_alg_completion completion;
 45
 46	unsigned long used;
 47
 48	unsigned int len;
 49	bool more;
 50	bool merge;
 51	bool enc;
 52
 53	size_t aead_assoclen;
 54	struct aead_request aead_req;
 55};
 56
 57static inline int aead_sndbuf(struct sock *sk)
 58{
 59	struct alg_sock *ask = alg_sk(sk);
 60	struct aead_ctx *ctx = ask->private;
 61
 62	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
 63			  ctx->used, 0);
 64}
 65
 66static inline bool aead_writable(struct sock *sk)
 67{
 68	return PAGE_SIZE <= aead_sndbuf(sk);
 69}
 70
 71static inline bool aead_sufficient_data(struct aead_ctx *ctx)
 72{
 73	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
 74
 75	return ctx->used >= ctx->aead_assoclen + as;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76}
 77
 78static void aead_put_sgl(struct sock *sk)
 79{
 80	struct alg_sock *ask = alg_sk(sk);
 81	struct aead_ctx *ctx = ask->private;
 82	struct aead_sg_list *sgl = &ctx->tsgl;
 83	struct scatterlist *sg = sgl->sg;
 84	unsigned int i;
 85
 86	for (i = 0; i < sgl->cur; i++) {
 87		if (!sg_page(sg + i))
 88			continue;
 89
 90		put_page(sg_page(sg + i));
 91		sg_assign_page(sg + i, NULL);
 92	}
 93	sg_init_table(sg, ALG_MAX_PAGES);
 94	sgl->cur = 0;
 95	ctx->used = 0;
 96	ctx->more = 0;
 97	ctx->merge = 0;
 98}
 99
100static void aead_wmem_wakeup(struct sock *sk)
101{
102	struct socket_wq *wq;
103
104	if (!aead_writable(sk))
105		return;
106
107	rcu_read_lock();
108	wq = rcu_dereference(sk->sk_wq);
109	if (skwq_has_sleeper(wq))
110		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
111							   POLLRDNORM |
112							   POLLRDBAND);
113	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
114	rcu_read_unlock();
115}
116
117static int aead_wait_for_data(struct sock *sk, unsigned flags)
118{
 
119	struct alg_sock *ask = alg_sk(sk);
120	struct aead_ctx *ctx = ask->private;
121	long timeout;
122	DEFINE_WAIT(wait);
123	int err = -ERESTARTSYS;
124
125	if (flags & MSG_DONTWAIT)
126		return -EAGAIN;
127
128	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
129
130	for (;;) {
131		if (signal_pending(current))
132			break;
133		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
134		timeout = MAX_SCHEDULE_TIMEOUT;
135		if (sk_wait_event(sk, &timeout, !ctx->more)) {
136			err = 0;
137			break;
138		}
139	}
140	finish_wait(sk_sleep(sk), &wait);
141
142	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
143
144	return err;
145}
146
147static void aead_data_wakeup(struct sock *sk)
148{
149	struct alg_sock *ask = alg_sk(sk);
150	struct aead_ctx *ctx = ask->private;
151	struct socket_wq *wq;
152
153	if (ctx->more)
154		return;
155	if (!ctx->used)
156		return;
157
158	rcu_read_lock();
159	wq = rcu_dereference(sk->sk_wq);
160	if (skwq_has_sleeper(wq))
161		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
162							   POLLRDNORM |
163							   POLLRDBAND);
164	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
165	rcu_read_unlock();
166}
167
168static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
169{
170	struct sock *sk = sock->sk;
171	struct alg_sock *ask = alg_sk(sk);
172	struct aead_ctx *ctx = ask->private;
173	unsigned ivsize =
174		crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
175	struct aead_sg_list *sgl = &ctx->tsgl;
176	struct af_alg_control con = {};
177	long copied = 0;
178	bool enc = 0;
179	bool init = 0;
180	int err = -EINVAL;
181
182	if (msg->msg_controllen) {
183		err = af_alg_cmsg_send(msg, &con);
184		if (err)
185			return err;
186
187		init = 1;
188		switch (con.op) {
189		case ALG_OP_ENCRYPT:
190			enc = 1;
191			break;
192		case ALG_OP_DECRYPT:
193			enc = 0;
194			break;
195		default:
196			return -EINVAL;
197		}
198
199		if (con.iv && con.iv->ivlen != ivsize)
200			return -EINVAL;
201	}
202
203	lock_sock(sk);
204	if (!ctx->more && ctx->used)
205		goto unlock;
206
207	if (init) {
208		ctx->enc = enc;
209		if (con.iv)
210			memcpy(ctx->iv, con.iv->iv, ivsize);
211
212		ctx->aead_assoclen = con.aead_assoclen;
213	}
214
215	while (size) {
216		size_t len = size;
217		struct scatterlist *sg = NULL;
218
219		/* use the existing memory in an allocated page */
220		if (ctx->merge) {
221			sg = sgl->sg + sgl->cur - 1;
222			len = min_t(unsigned long, len,
223				    PAGE_SIZE - sg->offset - sg->length);
224			err = memcpy_from_msg(page_address(sg_page(sg)) +
225					      sg->offset + sg->length,
226					      msg, len);
227			if (err)
228				goto unlock;
229
230			sg->length += len;
231			ctx->merge = (sg->offset + sg->length) &
232				     (PAGE_SIZE - 1);
233
234			ctx->used += len;
235			copied += len;
236			size -= len;
237			continue;
238		}
239
240		if (!aead_writable(sk)) {
241			/* user space sent too much data */
242			aead_put_sgl(sk);
243			err = -EMSGSIZE;
244			goto unlock;
245		}
246
247		/* allocate a new page */
248		len = min_t(unsigned long, size, aead_sndbuf(sk));
249		while (len) {
250			size_t plen = 0;
251
252			if (sgl->cur >= ALG_MAX_PAGES) {
253				aead_put_sgl(sk);
254				err = -E2BIG;
255				goto unlock;
256			}
257
258			sg = sgl->sg + sgl->cur;
259			plen = min_t(size_t, len, PAGE_SIZE);
260
261			sg_assign_page(sg, alloc_page(GFP_KERNEL));
262			err = -ENOMEM;
263			if (!sg_page(sg))
264				goto unlock;
265
266			err = memcpy_from_msg(page_address(sg_page(sg)),
267					      msg, plen);
268			if (err) {
269				__free_page(sg_page(sg));
270				sg_assign_page(sg, NULL);
271				goto unlock;
272			}
273
274			sg->offset = 0;
275			sg->length = plen;
276			len -= plen;
277			ctx->used += plen;
278			copied += plen;
279			sgl->cur++;
280			size -= plen;
281			ctx->merge = plen & (PAGE_SIZE - 1);
282		}
283	}
284
285	err = 0;
286
287	ctx->more = msg->msg_flags & MSG_MORE;
288	if (!ctx->more && !aead_sufficient_data(ctx)) {
289		aead_put_sgl(sk);
290		err = -EMSGSIZE;
291	}
292
293unlock:
294	aead_data_wakeup(sk);
295	release_sock(sk);
296
297	return err ?: copied;
298}
299
300static ssize_t aead_sendpage(struct socket *sock, struct page *page,
301			     int offset, size_t size, int flags)
302{
303	struct sock *sk = sock->sk;
304	struct alg_sock *ask = alg_sk(sk);
305	struct aead_ctx *ctx = ask->private;
306	struct aead_sg_list *sgl = &ctx->tsgl;
307	int err = -EINVAL;
308
309	if (flags & MSG_SENDPAGE_NOTLAST)
310		flags |= MSG_MORE;
311
312	if (sgl->cur >= ALG_MAX_PAGES)
313		return -E2BIG;
314
315	lock_sock(sk);
316	if (!ctx->more && ctx->used)
317		goto unlock;
318
319	if (!size)
320		goto done;
321
322	if (!aead_writable(sk)) {
323		/* user space sent too much data */
324		aead_put_sgl(sk);
325		err = -EMSGSIZE;
326		goto unlock;
327	}
328
329	ctx->merge = 0;
330
331	get_page(page);
332	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
333	sgl->cur++;
334	ctx->used += size;
335
336	err = 0;
337
338done:
339	ctx->more = flags & MSG_MORE;
340	if (!ctx->more && !aead_sufficient_data(ctx)) {
341		aead_put_sgl(sk);
342		err = -EMSGSIZE;
343	}
344
345unlock:
346	aead_data_wakeup(sk);
347	release_sock(sk);
348
349	return err ?: size;
350}
351
352static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353{
354	struct sock *sk = sock->sk;
355	struct alg_sock *ask = alg_sk(sk);
356	struct aead_ctx *ctx = ask->private;
357	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
358	struct aead_sg_list *sgl = &ctx->tsgl;
359	unsigned int i = 0;
 
360	int err = -EINVAL;
361	unsigned long used = 0;
362	size_t outlen = 0;
363	size_t usedpages = 0;
364	unsigned int cnt = 0;
365
366	/* Limit number of IOV blocks to be accessed below */
367	if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES)
368		return -ENOMSG;
369
370	lock_sock(sk);
371
372	/*
373	 * AEAD memory structure: For encryption, the tag is appended to the
374	 * ciphertext which implies that the memory allocated for the ciphertext
375	 * must be increased by the tag length. For decryption, the tag
376	 * is expected to be concatenated to the ciphertext. The plaintext
377	 * therefore has a memory size of the ciphertext minus the tag length.
378	 *
379	 * The memory structure for cipher operation has the following
380	 * structure:
381	 *	AEAD encryption input:  assoc data || plaintext
382	 *	AEAD encryption output: cipherntext || auth tag
383	 *	AEAD decryption input:  assoc data || ciphertext || auth tag
384	 *	AEAD decryption output: plaintext
385	 */
386
387	if (ctx->more) {
388		err = aead_wait_for_data(sk, flags);
389		if (err)
390			goto unlock;
391	}
392
 
393	used = ctx->used;
394
395	/*
396	 * Make sure sufficient data is present -- note, the same check is
397	 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
398	 * shall provide an information to the data sender that something is
399	 * wrong, but they are irrelevant to maintain the kernel integrity.
400	 * We need this check here too in case user space decides to not honor
401	 * the error message in sendmsg/sendpage and still call recvmsg. This
402	 * check here protects the kernel integrity.
403	 */
404	if (!aead_sufficient_data(ctx))
405		goto unlock;
406
407	outlen = used;
 
 
 
 
 
 
 
 
 
 
 
408
409	/*
410	 * The cipher operation input data is reduced by the associated data
411	 * length as this data is processed separately later on.
412	 */
413	used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
414
415	/* convert iovecs of output buffers into scatterlists */
416	while (iov_iter_count(&msg->msg_iter)) {
417		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
418				      (outlen - usedpages));
419
 
 
 
 
 
 
 
 
 
 
 
 
420		/* make one iovec available as scatterlist */
421		err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter,
422				     seglen);
423		if (err < 0)
424			goto unlock;
425		usedpages += err;
426		/* chain the new scatterlist with previous one */
427		if (cnt)
428			af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
 
 
429
430		/* we do not need more iovecs as we have sufficient memory */
431		if (outlen <= usedpages)
432			break;
433		iov_iter_advance(&msg->msg_iter, err);
434		cnt++;
435	}
436
437	err = -EINVAL;
438	/* ensure output buffer is sufficiently large */
439	if (usedpages < outlen)
 
440		goto unlock;
 
441
442	sg_mark_end(sgl->sg + sgl->cur - 1);
443
444	aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg,
445			       used, ctx->iv);
446	aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
447
448	err = af_alg_wait_for_completion(ctx->enc ?
449					 crypto_aead_encrypt(&ctx->aead_req) :
450					 crypto_aead_decrypt(&ctx->aead_req),
451					 &ctx->completion);
452
453	if (err) {
454		/* EBADMSG implies a valid cipher operation took place */
455		if (err == -EBADMSG)
456			aead_put_sgl(sk);
 
457		goto unlock;
458	}
459
460	aead_put_sgl(sk);
461
462	err = 0;
463
464unlock:
465	for (i = 0; i < cnt; i++)
466		af_alg_free_sg(&ctx->rsgl[i]);
467
 
 
 
 
468	aead_wmem_wakeup(sk);
469	release_sock(sk);
470
471	return err ? err : outlen;
472}
473
 
 
 
 
 
 
 
 
474static unsigned int aead_poll(struct file *file, struct socket *sock,
475			      poll_table *wait)
476{
477	struct sock *sk = sock->sk;
478	struct alg_sock *ask = alg_sk(sk);
479	struct aead_ctx *ctx = ask->private;
480	unsigned int mask;
481
482	sock_poll_wait(file, sk_sleep(sk), wait);
483	mask = 0;
484
485	if (!ctx->more)
486		mask |= POLLIN | POLLRDNORM;
487
488	if (aead_writable(sk))
489		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
490
491	return mask;
492}
493
494static struct proto_ops algif_aead_ops = {
495	.family		=	PF_ALG,
496
497	.connect	=	sock_no_connect,
498	.socketpair	=	sock_no_socketpair,
499	.getname	=	sock_no_getname,
500	.ioctl		=	sock_no_ioctl,
501	.listen		=	sock_no_listen,
502	.shutdown	=	sock_no_shutdown,
503	.getsockopt	=	sock_no_getsockopt,
504	.mmap		=	sock_no_mmap,
505	.bind		=	sock_no_bind,
506	.accept		=	sock_no_accept,
507	.setsockopt	=	sock_no_setsockopt,
508
509	.release	=	af_alg_release,
510	.sendmsg	=	aead_sendmsg,
511	.sendpage	=	aead_sendpage,
512	.recvmsg	=	aead_recvmsg,
513	.poll		=	aead_poll,
514};
515
516static void *aead_bind(const char *name, u32 type, u32 mask)
517{
518	return crypto_alloc_aead(name, type, mask);
519}
520
521static void aead_release(void *private)
522{
523	crypto_free_aead(private);
524}
525
526static int aead_setauthsize(void *private, unsigned int authsize)
527{
528	return crypto_aead_setauthsize(private, authsize);
529}
530
531static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
532{
533	return crypto_aead_setkey(private, key, keylen);
534}
535
536static void aead_sock_destruct(struct sock *sk)
537{
538	struct alg_sock *ask = alg_sk(sk);
539	struct aead_ctx *ctx = ask->private;
540	unsigned int ivlen = crypto_aead_ivsize(
541				crypto_aead_reqtfm(&ctx->aead_req));
542
 
543	aead_put_sgl(sk);
544	sock_kzfree_s(sk, ctx->iv, ivlen);
545	sock_kfree_s(sk, ctx, ctx->len);
546	af_alg_release_parent(sk);
547}
548
549static int aead_accept_parent(void *private, struct sock *sk)
550{
551	struct aead_ctx *ctx;
552	struct alg_sock *ask = alg_sk(sk);
553	unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
554	unsigned int ivlen = crypto_aead_ivsize(private);
555
556	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
557	if (!ctx)
558		return -ENOMEM;
559	memset(ctx, 0, len);
560
561	ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
562	if (!ctx->iv) {
563		sock_kfree_s(sk, ctx, len);
564		return -ENOMEM;
565	}
566	memset(ctx->iv, 0, ivlen);
567
568	ctx->len = len;
569	ctx->used = 0;
570	ctx->more = 0;
571	ctx->merge = 0;
572	ctx->enc = 0;
573	ctx->tsgl.cur = 0;
574	ctx->aead_assoclen = 0;
575	af_alg_init_completion(&ctx->completion);
576	sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
 
577
578	ask->private = ctx;
579
580	aead_request_set_tfm(&ctx->aead_req, private);
581	aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
582				  af_alg_complete, &ctx->completion);
583
584	sk->sk_destruct = aead_sock_destruct;
585
586	return 0;
587}
588
589static const struct af_alg_type algif_type_aead = {
590	.bind		=	aead_bind,
591	.release	=	aead_release,
592	.setkey		=	aead_setkey,
593	.setauthsize	=	aead_setauthsize,
594	.accept		=	aead_accept_parent,
595	.ops		=	&algif_aead_ops,
596	.name		=	"aead",
597	.owner		=	THIS_MODULE
598};
599
600static int __init algif_aead_init(void)
601{
602	return af_alg_register_type(&algif_type_aead);
603}
604
605static void __exit algif_aead_exit(void)
606{
607	int err = af_alg_unregister_type(&algif_type_aead);
608	BUG_ON(err);
609}
610
611module_init(algif_aead_init);
612module_exit(algif_aead_exit);
613MODULE_LICENSE("GPL");
614MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
615MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
v4.10.11
  1/*
  2 * algif_aead: User-space interface for AEAD algorithms
  3 *
  4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
  5 *
  6 * This file provides the user-space API for AEAD ciphers.
  7 *
  8 * This file is derived from algif_skcipher.c.
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of the GNU General Public License as published by the Free
 12 * Software Foundation; either version 2 of the License, or (at your option)
 13 * any later version.
 14 */
 15
 16#include <crypto/internal/aead.h>
 17#include <crypto/scatterwalk.h>
 18#include <crypto/if_alg.h>
 19#include <linux/init.h>
 20#include <linux/list.h>
 21#include <linux/kernel.h>
 22#include <linux/mm.h>
 23#include <linux/module.h>
 24#include <linux/net.h>
 25#include <net/sock.h>
 26
 27struct aead_sg_list {
 28	unsigned int cur;
 29	struct scatterlist sg[ALG_MAX_PAGES];
 30};
 31
 32struct aead_async_rsgl {
 33	struct af_alg_sgl sgl;
 34	struct list_head list;
 35};
 36
 37struct aead_async_req {
 38	struct scatterlist *tsgl;
 39	struct aead_async_rsgl first_rsgl;
 40	struct list_head list;
 41	struct kiocb *iocb;
 42	unsigned int tsgls;
 43	char iv[];
 44};
 45
 46struct aead_ctx {
 47	struct aead_sg_list tsgl;
 48	struct aead_async_rsgl first_rsgl;
 49	struct list_head list;
 
 
 
 
 
 50
 51	void *iv;
 52
 53	struct af_alg_completion completion;
 54
 55	unsigned long used;
 56
 57	unsigned int len;
 58	bool more;
 59	bool merge;
 60	bool enc;
 61
 62	size_t aead_assoclen;
 63	struct aead_request aead_req;
 64};
 65
 66static inline int aead_sndbuf(struct sock *sk)
 67{
 68	struct alg_sock *ask = alg_sk(sk);
 69	struct aead_ctx *ctx = ask->private;
 70
 71	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
 72			  ctx->used, 0);
 73}
 74
 75static inline bool aead_writable(struct sock *sk)
 76{
 77	return PAGE_SIZE <= aead_sndbuf(sk);
 78}
 79
 80static inline bool aead_sufficient_data(struct aead_ctx *ctx)
 81{
 82	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
 83
 84	/*
 85	 * The minimum amount of memory needed for an AEAD cipher is
 86	 * the AAD and in case of decryption the tag.
 87	 */
 88	return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
 89}
 90
 91static void aead_reset_ctx(struct aead_ctx *ctx)
 92{
 93	struct aead_sg_list *sgl = &ctx->tsgl;
 94
 95	sg_init_table(sgl->sg, ALG_MAX_PAGES);
 96	sgl->cur = 0;
 97	ctx->used = 0;
 98	ctx->more = 0;
 99	ctx->merge = 0;
100}
101
102static void aead_put_sgl(struct sock *sk)
103{
104	struct alg_sock *ask = alg_sk(sk);
105	struct aead_ctx *ctx = ask->private;
106	struct aead_sg_list *sgl = &ctx->tsgl;
107	struct scatterlist *sg = sgl->sg;
108	unsigned int i;
109
110	for (i = 0; i < sgl->cur; i++) {
111		if (!sg_page(sg + i))
112			continue;
113
114		put_page(sg_page(sg + i));
115		sg_assign_page(sg + i, NULL);
116	}
117	aead_reset_ctx(ctx);
 
 
 
 
118}
119
120static void aead_wmem_wakeup(struct sock *sk)
121{
122	struct socket_wq *wq;
123
124	if (!aead_writable(sk))
125		return;
126
127	rcu_read_lock();
128	wq = rcu_dereference(sk->sk_wq);
129	if (skwq_has_sleeper(wq))
130		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
131							   POLLRDNORM |
132							   POLLRDBAND);
133	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
134	rcu_read_unlock();
135}
136
137static int aead_wait_for_data(struct sock *sk, unsigned flags)
138{
139	DEFINE_WAIT_FUNC(wait, woken_wake_function);
140	struct alg_sock *ask = alg_sk(sk);
141	struct aead_ctx *ctx = ask->private;
142	long timeout;
 
143	int err = -ERESTARTSYS;
144
145	if (flags & MSG_DONTWAIT)
146		return -EAGAIN;
147
148	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
149	add_wait_queue(sk_sleep(sk), &wait);
150	for (;;) {
151		if (signal_pending(current))
152			break;
 
153		timeout = MAX_SCHEDULE_TIMEOUT;
154		if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
155			err = 0;
156			break;
157		}
158	}
159	remove_wait_queue(sk_sleep(sk), &wait);
160
161	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
162
163	return err;
164}
165
166static void aead_data_wakeup(struct sock *sk)
167{
168	struct alg_sock *ask = alg_sk(sk);
169	struct aead_ctx *ctx = ask->private;
170	struct socket_wq *wq;
171
172	if (ctx->more)
173		return;
174	if (!ctx->used)
175		return;
176
177	rcu_read_lock();
178	wq = rcu_dereference(sk->sk_wq);
179	if (skwq_has_sleeper(wq))
180		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
181							   POLLRDNORM |
182							   POLLRDBAND);
183	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
184	rcu_read_unlock();
185}
186
187static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
188{
189	struct sock *sk = sock->sk;
190	struct alg_sock *ask = alg_sk(sk);
191	struct aead_ctx *ctx = ask->private;
192	unsigned ivsize =
193		crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
194	struct aead_sg_list *sgl = &ctx->tsgl;
195	struct af_alg_control con = {};
196	long copied = 0;
197	bool enc = 0;
198	bool init = 0;
199	int err = -EINVAL;
200
201	if (msg->msg_controllen) {
202		err = af_alg_cmsg_send(msg, &con);
203		if (err)
204			return err;
205
206		init = 1;
207		switch (con.op) {
208		case ALG_OP_ENCRYPT:
209			enc = 1;
210			break;
211		case ALG_OP_DECRYPT:
212			enc = 0;
213			break;
214		default:
215			return -EINVAL;
216		}
217
218		if (con.iv && con.iv->ivlen != ivsize)
219			return -EINVAL;
220	}
221
222	lock_sock(sk);
223	if (!ctx->more && ctx->used)
224		goto unlock;
225
226	if (init) {
227		ctx->enc = enc;
228		if (con.iv)
229			memcpy(ctx->iv, con.iv->iv, ivsize);
230
231		ctx->aead_assoclen = con.aead_assoclen;
232	}
233
234	while (size) {
235		size_t len = size;
236		struct scatterlist *sg = NULL;
237
238		/* use the existing memory in an allocated page */
239		if (ctx->merge) {
240			sg = sgl->sg + sgl->cur - 1;
241			len = min_t(unsigned long, len,
242				    PAGE_SIZE - sg->offset - sg->length);
243			err = memcpy_from_msg(page_address(sg_page(sg)) +
244					      sg->offset + sg->length,
245					      msg, len);
246			if (err)
247				goto unlock;
248
249			sg->length += len;
250			ctx->merge = (sg->offset + sg->length) &
251				     (PAGE_SIZE - 1);
252
253			ctx->used += len;
254			copied += len;
255			size -= len;
256			continue;
257		}
258
259		if (!aead_writable(sk)) {
260			/* user space sent too much data */
261			aead_put_sgl(sk);
262			err = -EMSGSIZE;
263			goto unlock;
264		}
265
266		/* allocate a new page */
267		len = min_t(unsigned long, size, aead_sndbuf(sk));
268		while (len) {
269			size_t plen = 0;
270
271			if (sgl->cur >= ALG_MAX_PAGES) {
272				aead_put_sgl(sk);
273				err = -E2BIG;
274				goto unlock;
275			}
276
277			sg = sgl->sg + sgl->cur;
278			plen = min_t(size_t, len, PAGE_SIZE);
279
280			sg_assign_page(sg, alloc_page(GFP_KERNEL));
281			err = -ENOMEM;
282			if (!sg_page(sg))
283				goto unlock;
284
285			err = memcpy_from_msg(page_address(sg_page(sg)),
286					      msg, plen);
287			if (err) {
288				__free_page(sg_page(sg));
289				sg_assign_page(sg, NULL);
290				goto unlock;
291			}
292
293			sg->offset = 0;
294			sg->length = plen;
295			len -= plen;
296			ctx->used += plen;
297			copied += plen;
298			sgl->cur++;
299			size -= plen;
300			ctx->merge = plen & (PAGE_SIZE - 1);
301		}
302	}
303
304	err = 0;
305
306	ctx->more = msg->msg_flags & MSG_MORE;
307	if (!ctx->more && !aead_sufficient_data(ctx)) {
308		aead_put_sgl(sk);
309		err = -EMSGSIZE;
310	}
311
312unlock:
313	aead_data_wakeup(sk);
314	release_sock(sk);
315
316	return err ?: copied;
317}
318
319static ssize_t aead_sendpage(struct socket *sock, struct page *page,
320			     int offset, size_t size, int flags)
321{
322	struct sock *sk = sock->sk;
323	struct alg_sock *ask = alg_sk(sk);
324	struct aead_ctx *ctx = ask->private;
325	struct aead_sg_list *sgl = &ctx->tsgl;
326	int err = -EINVAL;
327
328	if (flags & MSG_SENDPAGE_NOTLAST)
329		flags |= MSG_MORE;
330
331	if (sgl->cur >= ALG_MAX_PAGES)
332		return -E2BIG;
333
334	lock_sock(sk);
335	if (!ctx->more && ctx->used)
336		goto unlock;
337
338	if (!size)
339		goto done;
340
341	if (!aead_writable(sk)) {
342		/* user space sent too much data */
343		aead_put_sgl(sk);
344		err = -EMSGSIZE;
345		goto unlock;
346	}
347
348	ctx->merge = 0;
349
350	get_page(page);
351	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
352	sgl->cur++;
353	ctx->used += size;
354
355	err = 0;
356
357done:
358	ctx->more = flags & MSG_MORE;
359	if (!ctx->more && !aead_sufficient_data(ctx)) {
360		aead_put_sgl(sk);
361		err = -EMSGSIZE;
362	}
363
364unlock:
365	aead_data_wakeup(sk);
366	release_sock(sk);
367
368	return err ?: size;
369}
370
371#define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
372		((char *)req + sizeof(struct aead_request) + \
373		 crypto_aead_reqsize(tfm))
374
375 #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
376	crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
377	sizeof(struct aead_request)
378
379static void aead_async_cb(struct crypto_async_request *_req, int err)
380{
381	struct sock *sk = _req->data;
382	struct alg_sock *ask = alg_sk(sk);
383	struct aead_ctx *ctx = ask->private;
384	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
385	struct aead_request *req = aead_request_cast(_req);
386	struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
387	struct scatterlist *sg = areq->tsgl;
388	struct aead_async_rsgl *rsgl;
389	struct kiocb *iocb = areq->iocb;
390	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
391
392	list_for_each_entry(rsgl, &areq->list, list) {
393		af_alg_free_sg(&rsgl->sgl);
394		if (rsgl != &areq->first_rsgl)
395			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
396	}
397
398	for (i = 0; i < areq->tsgls; i++)
399		put_page(sg_page(sg + i));
400
401	sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
402	sock_kfree_s(sk, req, reqlen);
403	__sock_put(sk);
404	iocb->ki_complete(iocb, err, err);
405}
406
407static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
408			      int flags)
409{
410	struct sock *sk = sock->sk;
411	struct alg_sock *ask = alg_sk(sk);
412	struct aead_ctx *ctx = ask->private;
413	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
414	struct aead_async_req *areq;
415	struct aead_request *req = NULL;
416	struct aead_sg_list *sgl = &ctx->tsgl;
417	struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
418	unsigned int as = crypto_aead_authsize(tfm);
419	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
420	int err = -ENOMEM;
421	unsigned long used;
422	size_t outlen = 0;
423	size_t usedpages = 0;
424
425	lock_sock(sk);
426	if (ctx->more) {
427		err = aead_wait_for_data(sk, flags);
428		if (err)
429			goto unlock;
430	}
431
432	if (!aead_sufficient_data(ctx))
433		goto unlock;
434
435	used = ctx->used;
436	if (ctx->enc)
437		outlen = used + as;
438	else
439		outlen = used - as;
440
441	req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
442	if (unlikely(!req))
443		goto unlock;
444
445	areq = GET_ASYM_REQ(req, tfm);
446	memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
447	INIT_LIST_HEAD(&areq->list);
448	areq->iocb = msg->msg_iocb;
449	memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
450	aead_request_set_tfm(req, tfm);
451	aead_request_set_ad(req, ctx->aead_assoclen);
452	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
453				  aead_async_cb, sk);
454	used -= ctx->aead_assoclen;
455
456	/* take over all tx sgls from ctx */
457	areq->tsgl = sock_kmalloc(sk,
458				  sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
459				  GFP_KERNEL);
460	if (unlikely(!areq->tsgl))
461		goto free;
462
463	sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
464	for (i = 0; i < sgl->cur; i++)
465		sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
466			    sgl->sg[i].length, sgl->sg[i].offset);
467
468	areq->tsgls = sgl->cur;
469
470	/* create rx sgls */
471	while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
472		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
473				      (outlen - usedpages));
474
475		if (list_empty(&areq->list)) {
476			rsgl = &areq->first_rsgl;
477
478		} else {
479			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
480			if (unlikely(!rsgl)) {
481				err = -ENOMEM;
482				goto free;
483			}
484		}
485		rsgl->sgl.npages = 0;
486		list_add_tail(&rsgl->list, &areq->list);
487
488		/* make one iovec available as scatterlist */
489		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
490		if (err < 0)
491			goto free;
492
493		usedpages += err;
494
495		/* chain the new scatterlist with previous one */
496		if (last_rsgl)
497			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
498
499		last_rsgl = rsgl;
500
501		iov_iter_advance(&msg->msg_iter, err);
502	}
503
504	/* ensure output buffer is sufficiently large */
505	if (usedpages < outlen) {
506		err = -EINVAL;
507		goto unlock;
508	}
509
510	aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
511			       areq->iv);
512	err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
513	if (err) {
514		if (err == -EINPROGRESS) {
515			sock_hold(sk);
516			err = -EIOCBQUEUED;
517			aead_reset_ctx(ctx);
518			goto unlock;
519		} else if (err == -EBADMSG) {
520			aead_put_sgl(sk);
521		}
522		goto free;
523	}
524	aead_put_sgl(sk);
525
526free:
527	list_for_each_entry(rsgl, &areq->list, list) {
528		af_alg_free_sg(&rsgl->sgl);
529		if (rsgl != &areq->first_rsgl)
530			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
531	}
532	if (areq->tsgl)
533		sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
534	if (req)
535		sock_kfree_s(sk, req, reqlen);
536unlock:
537	aead_wmem_wakeup(sk);
538	release_sock(sk);
539	return err ? err : outlen;
540}
541
542static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
543{
544	struct sock *sk = sock->sk;
545	struct alg_sock *ask = alg_sk(sk);
546	struct aead_ctx *ctx = ask->private;
547	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
548	struct aead_sg_list *sgl = &ctx->tsgl;
549	struct aead_async_rsgl *last_rsgl = NULL;
550	struct aead_async_rsgl *rsgl, *tmp;
551	int err = -EINVAL;
552	unsigned long used = 0;
553	size_t outlen = 0;
554	size_t usedpages = 0;
 
 
 
 
 
555
556	lock_sock(sk);
557
558	/*
559	 * Please see documentation of aead_request_set_crypt for the
560	 * description of the AEAD memory structure expected from the caller.
 
 
 
 
 
 
 
 
 
 
561	 */
562
563	if (ctx->more) {
564		err = aead_wait_for_data(sk, flags);
565		if (err)
566			goto unlock;
567	}
568
569	/* data length provided by caller via sendmsg/sendpage */
570	used = ctx->used;
571
572	/*
573	 * Make sure sufficient data is present -- note, the same check is
574	 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
575	 * shall provide an information to the data sender that something is
576	 * wrong, but they are irrelevant to maintain the kernel integrity.
577	 * We need this check here too in case user space decides to not honor
578	 * the error message in sendmsg/sendpage and still call recvmsg. This
579	 * check here protects the kernel integrity.
580	 */
581	if (!aead_sufficient_data(ctx))
582		goto unlock;
583
584	/*
585	 * Calculate the minimum output buffer size holding the result of the
586	 * cipher operation. When encrypting data, the receiving buffer is
587	 * larger by the tag length compared to the input buffer as the
588	 * encryption operation generates the tag. For decryption, the input
589	 * buffer provides the tag which is consumed resulting in only the
590	 * plaintext without a buffer for the tag returned to the caller.
591	 */
592	if (ctx->enc)
593		outlen = used + as;
594	else
595		outlen = used - as;
596
597	/*
598	 * The cipher operation input data is reduced by the associated data
599	 * length as this data is processed separately later on.
600	 */
601	used -= ctx->aead_assoclen;
602
603	/* convert iovecs of output buffers into scatterlists */
604	while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
605		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
606				      (outlen - usedpages));
607
608		if (list_empty(&ctx->list)) {
609			rsgl = &ctx->first_rsgl;
610		} else {
611			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
612			if (unlikely(!rsgl)) {
613				err = -ENOMEM;
614				goto unlock;
615			}
616		}
617		rsgl->sgl.npages = 0;
618		list_add_tail(&rsgl->list, &ctx->list);
619
620		/* make one iovec available as scatterlist */
621		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
 
622		if (err < 0)
623			goto unlock;
624		usedpages += err;
625		/* chain the new scatterlist with previous one */
626		if (last_rsgl)
627			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
628
629		last_rsgl = rsgl;
630
 
 
 
631		iov_iter_advance(&msg->msg_iter, err);
 
632	}
633
 
634	/* ensure output buffer is sufficiently large */
635	if (usedpages < outlen) {
636		err = -EINVAL;
637		goto unlock;
638	}
639
640	sg_mark_end(sgl->sg + sgl->cur - 1);
641	aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
 
642			       used, ctx->iv);
643	aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
644
645	err = af_alg_wait_for_completion(ctx->enc ?
646					 crypto_aead_encrypt(&ctx->aead_req) :
647					 crypto_aead_decrypt(&ctx->aead_req),
648					 &ctx->completion);
649
650	if (err) {
651		/* EBADMSG implies a valid cipher operation took place */
652		if (err == -EBADMSG)
653			aead_put_sgl(sk);
654
655		goto unlock;
656	}
657
658	aead_put_sgl(sk);
 
659	err = 0;
660
661unlock:
662	list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
663		af_alg_free_sg(&rsgl->sgl);
664		list_del(&rsgl->list);
665		if (rsgl != &ctx->first_rsgl)
666			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
667	}
668	INIT_LIST_HEAD(&ctx->list);
669	aead_wmem_wakeup(sk);
670	release_sock(sk);
671
672	return err ? err : outlen;
673}
674
675static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
676			int flags)
677{
678	return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
679		aead_recvmsg_async(sock, msg, flags) :
680		aead_recvmsg_sync(sock, msg, flags);
681}
682
683static unsigned int aead_poll(struct file *file, struct socket *sock,
684			      poll_table *wait)
685{
686	struct sock *sk = sock->sk;
687	struct alg_sock *ask = alg_sk(sk);
688	struct aead_ctx *ctx = ask->private;
689	unsigned int mask;
690
691	sock_poll_wait(file, sk_sleep(sk), wait);
692	mask = 0;
693
694	if (!ctx->more)
695		mask |= POLLIN | POLLRDNORM;
696
697	if (aead_writable(sk))
698		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
699
700	return mask;
701}
702
703static struct proto_ops algif_aead_ops = {
704	.family		=	PF_ALG,
705
706	.connect	=	sock_no_connect,
707	.socketpair	=	sock_no_socketpair,
708	.getname	=	sock_no_getname,
709	.ioctl		=	sock_no_ioctl,
710	.listen		=	sock_no_listen,
711	.shutdown	=	sock_no_shutdown,
712	.getsockopt	=	sock_no_getsockopt,
713	.mmap		=	sock_no_mmap,
714	.bind		=	sock_no_bind,
715	.accept		=	sock_no_accept,
716	.setsockopt	=	sock_no_setsockopt,
717
718	.release	=	af_alg_release,
719	.sendmsg	=	aead_sendmsg,
720	.sendpage	=	aead_sendpage,
721	.recvmsg	=	aead_recvmsg,
722	.poll		=	aead_poll,
723};
724
725static void *aead_bind(const char *name, u32 type, u32 mask)
726{
727	return crypto_alloc_aead(name, type, mask);
728}
729
730static void aead_release(void *private)
731{
732	crypto_free_aead(private);
733}
734
735static int aead_setauthsize(void *private, unsigned int authsize)
736{
737	return crypto_aead_setauthsize(private, authsize);
738}
739
740static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
741{
742	return crypto_aead_setkey(private, key, keylen);
743}
744
745static void aead_sock_destruct(struct sock *sk)
746{
747	struct alg_sock *ask = alg_sk(sk);
748	struct aead_ctx *ctx = ask->private;
749	unsigned int ivlen = crypto_aead_ivsize(
750				crypto_aead_reqtfm(&ctx->aead_req));
751
752	WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
753	aead_put_sgl(sk);
754	sock_kzfree_s(sk, ctx->iv, ivlen);
755	sock_kfree_s(sk, ctx, ctx->len);
756	af_alg_release_parent(sk);
757}
758
759static int aead_accept_parent(void *private, struct sock *sk)
760{
761	struct aead_ctx *ctx;
762	struct alg_sock *ask = alg_sk(sk);
763	unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
764	unsigned int ivlen = crypto_aead_ivsize(private);
765
766	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
767	if (!ctx)
768		return -ENOMEM;
769	memset(ctx, 0, len);
770
771	ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
772	if (!ctx->iv) {
773		sock_kfree_s(sk, ctx, len);
774		return -ENOMEM;
775	}
776	memset(ctx->iv, 0, ivlen);
777
778	ctx->len = len;
779	ctx->used = 0;
780	ctx->more = 0;
781	ctx->merge = 0;
782	ctx->enc = 0;
783	ctx->tsgl.cur = 0;
784	ctx->aead_assoclen = 0;
785	af_alg_init_completion(&ctx->completion);
786	sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
787	INIT_LIST_HEAD(&ctx->list);
788
789	ask->private = ctx;
790
791	aead_request_set_tfm(&ctx->aead_req, private);
792	aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
793				  af_alg_complete, &ctx->completion);
794
795	sk->sk_destruct = aead_sock_destruct;
796
797	return 0;
798}
799
800static const struct af_alg_type algif_type_aead = {
801	.bind		=	aead_bind,
802	.release	=	aead_release,
803	.setkey		=	aead_setkey,
804	.setauthsize	=	aead_setauthsize,
805	.accept		=	aead_accept_parent,
806	.ops		=	&algif_aead_ops,
807	.name		=	"aead",
808	.owner		=	THIS_MODULE
809};
810
811static int __init algif_aead_init(void)
812{
813	return af_alg_register_type(&algif_type_aead);
814}
815
816static void __exit algif_aead_exit(void)
817{
818	int err = af_alg_unregister_type(&algif_type_aead);
819	BUG_ON(err);
820}
821
822module_init(algif_aead_init);
823module_exit(algif_aead_exit);
824MODULE_LICENSE("GPL");
825MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
826MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");