Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * algif_aead: User-space interface for AEAD algorithms
  4 *
  5 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
  6 *
  7 * This file provides the user-space API for AEAD ciphers.
  8 *
  9 * The following concept of the memory management is used:
 10 *
 11 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
 12 * filled by user space with the data submitted via sendmsg (maybe with
 13 * MSG_SPLICE_PAGES).  Filling up the TX SGL does not cause a crypto operation
 14 * -- the data will only be tracked by the kernel. Upon receipt of one recvmsg
 15 * call, the caller must provide a buffer which is tracked with the RX SGL.
 16 *
 17 * During the processing of the recvmsg operation, the cipher request is
 18 * allocated and prepared. As part of the recvmsg operation, the processed
 19 * TX buffers are extracted from the TX SGL into a separate SGL.
 20 *
 21 * After the completion of the crypto operation, the RX SGL and the cipher
 22 * request is released. The extracted TX SGL parts are released together with
 23 * the RX SGL release.
 24 */
 25
 26#include <crypto/internal/aead.h>
 27#include <crypto/scatterwalk.h>
 28#include <crypto/if_alg.h>
 29#include <crypto/skcipher.h>
 30#include <crypto/null.h>
 31#include <linux/init.h>
 32#include <linux/list.h>
 33#include <linux/kernel.h>
 34#include <linux/mm.h>
 35#include <linux/module.h>
 36#include <linux/net.h>
 37#include <net/sock.h>
 38
 39struct aead_tfm {
 40	struct crypto_aead *aead;
 41	struct crypto_sync_skcipher *null_tfm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42};
 43
 44static inline bool aead_sufficient_data(struct sock *sk)
 45{
 46	struct alg_sock *ask = alg_sk(sk);
 47	struct sock *psk = ask->parent;
 48	struct alg_sock *pask = alg_sk(psk);
 49	struct af_alg_ctx *ctx = ask->private;
 50	struct aead_tfm *aeadc = pask->private;
 51	struct crypto_aead *tfm = aeadc->aead;
 52	unsigned int as = crypto_aead_authsize(tfm);
 
 
 
 
 
 
 
 
 53
 54	/*
 55	 * The minimum amount of memory needed for an AEAD cipher is
 56	 * the AAD and in case of decryption the tag.
 57	 */
 58	return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
 59}
 60
 61static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
 
 
 
 
 
 
 
 
 
 
 62{
 63	struct sock *sk = sock->sk;
 64	struct alg_sock *ask = alg_sk(sk);
 65	struct sock *psk = ask->parent;
 66	struct alg_sock *pask = alg_sk(psk);
 67	struct aead_tfm *aeadc = pask->private;
 68	struct crypto_aead *tfm = aeadc->aead;
 69	unsigned int ivsize = crypto_aead_ivsize(tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 70
 71	return af_alg_sendmsg(sock, msg, size, ivsize);
 
 
 
 
 
 
 
 
 
 
 72}
 73
 74static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
 75				struct scatterlist *src,
 76				struct scatterlist *dst, unsigned int len)
 77{
 78	SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79
 80	skcipher_request_set_sync_tfm(skreq, null_tfm);
 81	skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 82				      NULL, NULL);
 83	skcipher_request_set_crypt(skreq, src, dst, len, NULL);
 
 84
 85	return crypto_skcipher_encrypt(skreq);
 
 
 
 
 
 
 
 
 
 
 
 
 86}
 87
 88static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 89			 size_t ignored, int flags)
 90{
 91	struct sock *sk = sock->sk;
 92	struct alg_sock *ask = alg_sk(sk);
 93	struct sock *psk = ask->parent;
 94	struct alg_sock *pask = alg_sk(psk);
 95	struct af_alg_ctx *ctx = ask->private;
 96	struct aead_tfm *aeadc = pask->private;
 97	struct crypto_aead *tfm = aeadc->aead;
 98	struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
 99	unsigned int i, as = crypto_aead_authsize(tfm);
100	struct af_alg_async_req *areq;
101	struct af_alg_tsgl *tsgl, *tmp;
102	struct scatterlist *rsgl_src, *tsgl_src = NULL;
103	int err = 0;
104	size_t used = 0;		/* [in]  TX bufs to be en/decrypted */
105	size_t outlen = 0;		/* [out] RX bufs produced by kernel */
106	size_t usedpages = 0;		/* [in]  RX bufs to be used from user */
107	size_t processed = 0;		/* [in]  TX bufs to be consumed */
108
109	if (!ctx->init || ctx->more) {
110		err = af_alg_wait_for_data(sk, flags, 0);
111		if (err)
112			return err;
113	}
114
115	/*
116	 * Data length provided by caller via sendmsg that has not yet been
117	 * processed.
118	 */
119	used = ctx->used;
 
 
 
 
 
 
120
121	/*
122	 * Make sure sufficient data is present -- note, the same check is also
123	 * present in sendmsg. The checks in sendmsg shall provide an
124	 * information to the data sender that something is wrong, but they are
125	 * irrelevant to maintain the kernel integrity.  We need this check
126	 * here too in case user space decides to not honor the error message
127	 * in sendmsg and still call recvmsg. This check here protects the
128	 * kernel integrity.
129	 */
130	if (!aead_sufficient_data(sk))
131		return -EINVAL;
132
133	/*
134	 * Calculate the minimum output buffer size holding the result of the
135	 * cipher operation. When encrypting data, the receiving buffer is
136	 * larger by the tag length compared to the input buffer as the
137	 * encryption operation generates the tag. For decryption, the input
138	 * buffer provides the tag which is consumed resulting in only the
139	 * plaintext without a buffer for the tag returned to the caller.
140	 */
141	if (ctx->enc)
142		outlen = used + as;
143	else
144		outlen = used - as;
145
146	/*
147	 * The cipher operation input data is reduced by the associated data
148	 * length as this data is processed separately later on.
149	 */
150	used -= ctx->aead_assoclen;
151
152	/* Allocate cipher request for current operation. */
153	areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
154				     crypto_aead_reqsize(tfm));
155	if (IS_ERR(areq))
156		return PTR_ERR(areq);
157
158	/* convert iovecs of output buffers into RX SGL */
159	err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
160	if (err)
161		goto free;
162
163	/*
164	 * Ensure output buffer is sufficiently large. If the caller provides
165	 * less buffer space, only use the relative required input size. This
166	 * allows AIO operation where the caller sent all data to be processed
167	 * and the AIO operation performs the operation on the different chunks
168	 * of the input data.
169	 */
170	if (usedpages < outlen) {
171		size_t less = outlen - usedpages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
173		if (used < less) {
174			err = -EINVAL;
175			goto free;
 
 
176		}
177		used -= less;
178		outlen -= less;
179	}
180
181	processed = used + ctx->aead_assoclen;
182	list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
183		for (i = 0; i < tsgl->cur; i++) {
184			struct scatterlist *process_sg = tsgl->sg + i;
185
186			if (!(process_sg->length) || !sg_page(process_sg))
187				continue;
188			tsgl_src = process_sg;
189			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190		}
191		if (tsgl_src)
192			break;
193	}
194	if (processed && !tsgl_src) {
195		err = -EFAULT;
196		goto free;
 
 
 
 
197	}
198
199	/*
200	 * Copy of AAD from source to destination
201	 *
202	 * The AAD is copied to the destination buffer without change. Even
203	 * when user space uses an in-place cipher operation, the kernel
204	 * will copy the data as it does not see whether such in-place operation
205	 * is initiated.
206	 *
207	 * To ensure efficiency, the following implementation ensure that the
208	 * ciphers are invoked to perform a crypto operation in-place. This
209	 * is achieved by memory management specified as follows.
210	 */
211
212	/* Use the RX SGL as source (and destination) for crypto op. */
213	rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
214
215	if (ctx->enc) {
216		/*
217		 * Encryption operation - The in-place cipher operation is
218		 * achieved by the following operation:
219		 *
220		 * TX SGL: AAD || PT
221		 *	    |	   |
222		 *	    | copy |
223		 *	    v	   v
224		 * RX SGL: AAD || PT || Tag
225		 */
226		err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
227					   areq->first_rsgl.sgl.sgt.sgl,
228					   processed);
229		if (err)
230			goto free;
231		af_alg_pull_tsgl(sk, processed, NULL, 0);
232	} else {
233		/*
234		 * Decryption operation - To achieve an in-place cipher
235		 * operation, the following  SGL structure is used:
236		 *
237		 * TX SGL: AAD || CT || Tag
238		 *	    |	   |	 ^
239		 *	    | copy |	 | Create SGL link.
240		 *	    v	   v	 |
241		 * RX SGL: AAD || CT ----+
242		 */
243
244		 /* Copy AAD || CT to RX SGL buffer for in-place operation. */
245		err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
246					   areq->first_rsgl.sgl.sgt.sgl,
247					   outlen);
248		if (err)
249			goto free;
250
251		/* Create TX SGL for tag and chain it to RX SGL. */
252		areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
253						       processed - as);
254		if (!areq->tsgl_entries)
255			areq->tsgl_entries = 1;
256		areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
257							 areq->tsgl_entries),
258					  GFP_KERNEL);
259		if (!areq->tsgl) {
260			err = -ENOMEM;
261			goto free;
262		}
263		sg_init_table(areq->tsgl, areq->tsgl_entries);
264
265		/* Release TX SGL, except for tag data and reassign tag data. */
266		af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
 
267
268		/* chain the areq TX SGL holding the tag with RX SGL */
269		if (usedpages) {
270			/* RX SGL present */
271			struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
272			struct scatterlist *sg = sgl_prev->sgt.sgl;
273
274			sg_unmark_end(sg + sgl_prev->sgt.nents - 1);
275			sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl);
276		} else
277			/* no RX SGL present (e.g. authentication only) */
278			rsgl_src = areq->tsgl;
279	}
280
281	/* Initialize the crypto operation */
282	aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
283			       areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
284	aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
285	aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
286
287	if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
288		/* AIO operation */
289		sock_hold(sk);
290		areq->iocb = msg->msg_iocb;
291
292		/* Remember output size that will be generated. */
293		areq->outlen = outlen;
294
295		aead_request_set_callback(&areq->cra_u.aead_req,
296					  CRYPTO_TFM_REQ_MAY_SLEEP,
297					  af_alg_async_cb, areq);
298		err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
299				 crypto_aead_decrypt(&areq->cra_u.aead_req);
300
301		/* AIO operation in progress */
302		if (err == -EINPROGRESS)
303			return -EIOCBQUEUED;
304
305		sock_put(sk);
306	} else {
307		/* Synchronous operation */
308		aead_request_set_callback(&areq->cra_u.aead_req,
309					  CRYPTO_TFM_REQ_MAY_SLEEP |
310					  CRYPTO_TFM_REQ_MAY_BACKLOG,
311					  crypto_req_done, &ctx->wait);
312		err = crypto_wait_req(ctx->enc ?
313				crypto_aead_encrypt(&areq->cra_u.aead_req) :
314				crypto_aead_decrypt(&areq->cra_u.aead_req),
315				&ctx->wait);
316	}
317
 
318
319free:
320	af_alg_free_resources(areq);
 
 
321
322	return err ? err : outlen;
 
 
 
 
 
 
 
 
 
 
 
 
 
323}
324
325static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
326			size_t ignored, int flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327{
328	struct sock *sk = sock->sk;
329	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
330
331	lock_sock(sk);
332	while (msg_data_left(msg)) {
333		int err = _aead_recvmsg(sock, msg, ignored, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
335		/*
336		 * This error covers -EIOCBQUEUED which implies that we can
337		 * only handle one AIO request. If the caller wants to have
338		 * multiple AIO requests in parallel, he must make multiple
339		 * separate AIO calls.
340		 *
341		 * Also return the error if no data has been processed so far.
342		 */
343		if (err <= 0) {
344			if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
345				ret = err;
346			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347		}
 
 
348
349		ret += err;
350	}
 
 
351
352out:
353	af_alg_wmem_wakeup(sk);
354	release_sock(sk);
355	return ret;
356}
357
358static struct proto_ops algif_aead_ops = {
359	.family		=	PF_ALG,
 
360
361	.connect	=	sock_no_connect,
362	.socketpair	=	sock_no_socketpair,
363	.getname	=	sock_no_getname,
364	.ioctl		=	sock_no_ioctl,
365	.listen		=	sock_no_listen,
366	.shutdown	=	sock_no_shutdown,
367	.mmap		=	sock_no_mmap,
368	.bind		=	sock_no_bind,
369	.accept		=	sock_no_accept,
370
371	.release	=	af_alg_release,
372	.sendmsg	=	aead_sendmsg,
373	.recvmsg	=	aead_recvmsg,
374	.poll		=	af_alg_poll,
375};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
377static int aead_check_key(struct socket *sock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378{
379	int err = 0;
380	struct sock *psk;
381	struct alg_sock *pask;
382	struct aead_tfm *tfm;
383	struct sock *sk = sock->sk;
384	struct alg_sock *ask = alg_sk(sk);
 
 
 
 
 
 
 
 
 
385
386	lock_sock(sk);
387	if (!atomic_read(&ask->nokey_refcnt))
388		goto unlock_child;
389
390	psk = ask->parent;
391	pask = alg_sk(ask->parent);
392	tfm = pask->private;
393
394	err = -ENOKEY;
395	lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
396	if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397		goto unlock;
 
398
399	atomic_dec(&pask->nokey_refcnt);
400	atomic_set(&ask->nokey_refcnt, 0);
 
 
 
 
 
 
 
 
 
 
 
 
401
 
 
 
 
402	err = 0;
403
404unlock:
405	release_sock(psk);
406unlock_child:
 
 
 
 
 
 
407	release_sock(sk);
408
409	return err;
410}
411
412static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
413				  size_t size)
414{
415	int err;
416
417	err = aead_check_key(sock);
418	if (err)
419		return err;
420
421	return aead_sendmsg(sock, msg, size);
422}
423
424static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
425				  size_t ignored, int flags)
426{
427	int err;
 
 
 
428
429	err = aead_check_key(sock);
430	if (err)
431		return err;
432
433	return aead_recvmsg(sock, msg, ignored, flags);
 
 
 
 
 
 
434}
435
436static struct proto_ops algif_aead_ops_nokey = {
437	.family		=	PF_ALG,
438
439	.connect	=	sock_no_connect,
440	.socketpair	=	sock_no_socketpair,
441	.getname	=	sock_no_getname,
442	.ioctl		=	sock_no_ioctl,
443	.listen		=	sock_no_listen,
444	.shutdown	=	sock_no_shutdown,
 
445	.mmap		=	sock_no_mmap,
446	.bind		=	sock_no_bind,
447	.accept		=	sock_no_accept,
 
448
449	.release	=	af_alg_release,
450	.sendmsg	=	aead_sendmsg_nokey,
451	.recvmsg	=	aead_recvmsg_nokey,
452	.poll		=	af_alg_poll,
 
453};
454
455static void *aead_bind(const char *name, u32 type, u32 mask)
456{
457	struct aead_tfm *tfm;
458	struct crypto_aead *aead;
459	struct crypto_sync_skcipher *null_tfm;
460
461	tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
462	if (!tfm)
463		return ERR_PTR(-ENOMEM);
464
465	aead = crypto_alloc_aead(name, type, mask);
466	if (IS_ERR(aead)) {
467		kfree(tfm);
468		return ERR_CAST(aead);
469	}
470
471	null_tfm = crypto_get_default_null_skcipher();
472	if (IS_ERR(null_tfm)) {
473		crypto_free_aead(aead);
474		kfree(tfm);
475		return ERR_CAST(null_tfm);
476	}
477
478	tfm->aead = aead;
479	tfm->null_tfm = null_tfm;
480
481	return tfm;
482}
483
484static void aead_release(void *private)
485{
486	struct aead_tfm *tfm = private;
487
488	crypto_free_aead(tfm->aead);
489	crypto_put_default_null_skcipher();
490	kfree(tfm);
491}
492
493static int aead_setauthsize(void *private, unsigned int authsize)
494{
495	struct aead_tfm *tfm = private;
496
497	return crypto_aead_setauthsize(tfm->aead, authsize);
498}
499
500static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
501{
502	struct aead_tfm *tfm = private;
503
504	return crypto_aead_setkey(tfm->aead, key, keylen);
505}
506
507static void aead_sock_destruct(struct sock *sk)
508{
509	struct alg_sock *ask = alg_sk(sk);
510	struct af_alg_ctx *ctx = ask->private;
511	struct sock *psk = ask->parent;
512	struct alg_sock *pask = alg_sk(psk);
513	struct aead_tfm *aeadc = pask->private;
514	struct crypto_aead *tfm = aeadc->aead;
515	unsigned int ivlen = crypto_aead_ivsize(tfm);
516
517	af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
 
518	sock_kzfree_s(sk, ctx->iv, ivlen);
519	sock_kfree_s(sk, ctx, ctx->len);
520	af_alg_release_parent(sk);
521}
522
523static int aead_accept_parent_nokey(void *private, struct sock *sk)
524{
525	struct af_alg_ctx *ctx;
526	struct alg_sock *ask = alg_sk(sk);
527	struct aead_tfm *tfm = private;
528	struct crypto_aead *aead = tfm->aead;
529	unsigned int len = sizeof(*ctx);
530	unsigned int ivlen = crypto_aead_ivsize(aead);
531
532	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
533	if (!ctx)
534		return -ENOMEM;
535	memset(ctx, 0, len);
536
537	ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
538	if (!ctx->iv) {
539		sock_kfree_s(sk, ctx, len);
540		return -ENOMEM;
541	}
542	memset(ctx->iv, 0, ivlen);
543
544	INIT_LIST_HEAD(&ctx->tsgl_list);
545	ctx->len = len;
546	crypto_init_wait(&ctx->wait);
 
 
 
 
 
 
 
 
547
548	ask->private = ctx;
549
 
 
 
 
550	sk->sk_destruct = aead_sock_destruct;
551
552	return 0;
553}
554
555static int aead_accept_parent(void *private, struct sock *sk)
556{
557	struct aead_tfm *tfm = private;
558
559	if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
560		return -ENOKEY;
561
562	return aead_accept_parent_nokey(private, sk);
563}
564
565static const struct af_alg_type algif_type_aead = {
566	.bind		=	aead_bind,
567	.release	=	aead_release,
568	.setkey		=	aead_setkey,
569	.setauthsize	=	aead_setauthsize,
570	.accept		=	aead_accept_parent,
571	.accept_nokey	=	aead_accept_parent_nokey,
572	.ops		=	&algif_aead_ops,
573	.ops_nokey	=	&algif_aead_ops_nokey,
574	.name		=	"aead",
575	.owner		=	THIS_MODULE
576};
577
578static int __init algif_aead_init(void)
579{
580	return af_alg_register_type(&algif_type_aead);
581}
582
583static void __exit algif_aead_exit(void)
584{
585	int err = af_alg_unregister_type(&algif_type_aead);
586	BUG_ON(err);
587}
588
589module_init(algif_aead_init);
590module_exit(algif_aead_exit);
591MODULE_LICENSE("GPL");
592MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
593MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
v4.10.11
 
  1/*
  2 * algif_aead: User-space interface for AEAD algorithms
  3 *
  4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
  5 *
  6 * This file provides the user-space API for AEAD ciphers.
  7 *
  8 * This file is derived from algif_skcipher.c.
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of the GNU General Public License as published by the Free
 12 * Software Foundation; either version 2 of the License, or (at your option)
 13 * any later version.
 
 
 
 
 
 
 
 
 
 14 */
 15
 16#include <crypto/internal/aead.h>
 17#include <crypto/scatterwalk.h>
 18#include <crypto/if_alg.h>
 
 
 19#include <linux/init.h>
 20#include <linux/list.h>
 21#include <linux/kernel.h>
 22#include <linux/mm.h>
 23#include <linux/module.h>
 24#include <linux/net.h>
 25#include <net/sock.h>
 26
 27struct aead_sg_list {
 28	unsigned int cur;
 29	struct scatterlist sg[ALG_MAX_PAGES];
 30};
 31
 32struct aead_async_rsgl {
 33	struct af_alg_sgl sgl;
 34	struct list_head list;
 35};
 36
 37struct aead_async_req {
 38	struct scatterlist *tsgl;
 39	struct aead_async_rsgl first_rsgl;
 40	struct list_head list;
 41	struct kiocb *iocb;
 42	unsigned int tsgls;
 43	char iv[];
 44};
 45
 46struct aead_ctx {
 47	struct aead_sg_list tsgl;
 48	struct aead_async_rsgl first_rsgl;
 49	struct list_head list;
 50
 51	void *iv;
 52
 53	struct af_alg_completion completion;
 54
 55	unsigned long used;
 56
 57	unsigned int len;
 58	bool more;
 59	bool merge;
 60	bool enc;
 61
 62	size_t aead_assoclen;
 63	struct aead_request aead_req;
 64};
 65
 66static inline int aead_sndbuf(struct sock *sk)
 67{
 68	struct alg_sock *ask = alg_sk(sk);
 69	struct aead_ctx *ctx = ask->private;
 70
 71	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
 72			  ctx->used, 0);
 73}
 74
 75static inline bool aead_writable(struct sock *sk)
 76{
 77	return PAGE_SIZE <= aead_sndbuf(sk);
 78}
 79
 80static inline bool aead_sufficient_data(struct aead_ctx *ctx)
 81{
 82	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
 83
 84	/*
 85	 * The minimum amount of memory needed for an AEAD cipher is
 86	 * the AAD and in case of decryption the tag.
 87	 */
 88	return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
 89}
 90
 91static void aead_reset_ctx(struct aead_ctx *ctx)
 92{
 93	struct aead_sg_list *sgl = &ctx->tsgl;
 94
 95	sg_init_table(sgl->sg, ALG_MAX_PAGES);
 96	sgl->cur = 0;
 97	ctx->used = 0;
 98	ctx->more = 0;
 99	ctx->merge = 0;
100}
101
102static void aead_put_sgl(struct sock *sk)
103{
 
104	struct alg_sock *ask = alg_sk(sk);
105	struct aead_ctx *ctx = ask->private;
106	struct aead_sg_list *sgl = &ctx->tsgl;
107	struct scatterlist *sg = sgl->sg;
108	unsigned int i;
109
110	for (i = 0; i < sgl->cur; i++) {
111		if (!sg_page(sg + i))
112			continue;
113
114		put_page(sg_page(sg + i));
115		sg_assign_page(sg + i, NULL);
116	}
117	aead_reset_ctx(ctx);
118}
119
120static void aead_wmem_wakeup(struct sock *sk)
121{
122	struct socket_wq *wq;
123
124	if (!aead_writable(sk))
125		return;
126
127	rcu_read_lock();
128	wq = rcu_dereference(sk->sk_wq);
129	if (skwq_has_sleeper(wq))
130		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
131							   POLLRDNORM |
132							   POLLRDBAND);
133	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
134	rcu_read_unlock();
135}
136
137static int aead_wait_for_data(struct sock *sk, unsigned flags)
 
 
138{
139	DEFINE_WAIT_FUNC(wait, woken_wake_function);
140	struct alg_sock *ask = alg_sk(sk);
141	struct aead_ctx *ctx = ask->private;
142	long timeout;
143	int err = -ERESTARTSYS;
144
145	if (flags & MSG_DONTWAIT)
146		return -EAGAIN;
147
148	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
149	add_wait_queue(sk_sleep(sk), &wait);
150	for (;;) {
151		if (signal_pending(current))
152			break;
153		timeout = MAX_SCHEDULE_TIMEOUT;
154		if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
155			err = 0;
156			break;
157		}
158	}
159	remove_wait_queue(sk_sleep(sk), &wait);
160
161	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
162
163	return err;
164}
165
166static void aead_data_wakeup(struct sock *sk)
167{
168	struct alg_sock *ask = alg_sk(sk);
169	struct aead_ctx *ctx = ask->private;
170	struct socket_wq *wq;
171
172	if (ctx->more)
173		return;
174	if (!ctx->used)
175		return;
176
177	rcu_read_lock();
178	wq = rcu_dereference(sk->sk_wq);
179	if (skwq_has_sleeper(wq))
180		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
181							   POLLRDNORM |
182							   POLLRDBAND);
183	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
184	rcu_read_unlock();
185}
186
187static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
188{
189	struct sock *sk = sock->sk;
190	struct alg_sock *ask = alg_sk(sk);
191	struct aead_ctx *ctx = ask->private;
192	unsigned ivsize =
193		crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
194	struct aead_sg_list *sgl = &ctx->tsgl;
195	struct af_alg_control con = {};
196	long copied = 0;
197	bool enc = 0;
198	bool init = 0;
199	int err = -EINVAL;
 
 
 
 
 
 
200
201	if (msg->msg_controllen) {
202		err = af_alg_cmsg_send(msg, &con);
203		if (err)
204			return err;
 
205
206		init = 1;
207		switch (con.op) {
208		case ALG_OP_ENCRYPT:
209			enc = 1;
210			break;
211		case ALG_OP_DECRYPT:
212			enc = 0;
213			break;
214		default:
215			return -EINVAL;
216		}
217
218		if (con.iv && con.iv->ivlen != ivsize)
219			return -EINVAL;
220	}
 
 
 
 
 
 
 
 
221
222	lock_sock(sk);
223	if (!ctx->more && ctx->used)
224		goto unlock;
 
 
 
 
 
 
 
 
 
225
226	if (init) {
227		ctx->enc = enc;
228		if (con.iv)
229			memcpy(ctx->iv, con.iv->iv, ivsize);
 
230
231		ctx->aead_assoclen = con.aead_assoclen;
232	}
 
 
 
 
 
 
 
 
233
234	while (size) {
235		size_t len = size;
236		struct scatterlist *sg = NULL;
237
238		/* use the existing memory in an allocated page */
239		if (ctx->merge) {
240			sg = sgl->sg + sgl->cur - 1;
241			len = min_t(unsigned long, len,
242				    PAGE_SIZE - sg->offset - sg->length);
243			err = memcpy_from_msg(page_address(sg_page(sg)) +
244					      sg->offset + sg->length,
245					      msg, len);
246			if (err)
247				goto unlock;
248
249			sg->length += len;
250			ctx->merge = (sg->offset + sg->length) &
251				     (PAGE_SIZE - 1);
252
253			ctx->used += len;
254			copied += len;
255			size -= len;
256			continue;
257		}
258
259		if (!aead_writable(sk)) {
260			/* user space sent too much data */
261			aead_put_sgl(sk);
262			err = -EMSGSIZE;
263			goto unlock;
264		}
 
 
 
265
266		/* allocate a new page */
267		len = min_t(unsigned long, size, aead_sndbuf(sk));
268		while (len) {
269			size_t plen = 0;
270
271			if (sgl->cur >= ALG_MAX_PAGES) {
272				aead_put_sgl(sk);
273				err = -E2BIG;
274				goto unlock;
275			}
276
277			sg = sgl->sg + sgl->cur;
278			plen = min_t(size_t, len, PAGE_SIZE);
279
280			sg_assign_page(sg, alloc_page(GFP_KERNEL));
281			err = -ENOMEM;
282			if (!sg_page(sg))
283				goto unlock;
284
285			err = memcpy_from_msg(page_address(sg_page(sg)),
286					      msg, plen);
287			if (err) {
288				__free_page(sg_page(sg));
289				sg_assign_page(sg, NULL);
290				goto unlock;
291			}
292
293			sg->offset = 0;
294			sg->length = plen;
295			len -= plen;
296			ctx->used += plen;
297			copied += plen;
298			sgl->cur++;
299			size -= plen;
300			ctx->merge = plen & (PAGE_SIZE - 1);
301		}
 
 
302	}
303
304	err = 0;
305
306	ctx->more = msg->msg_flags & MSG_MORE;
307	if (!ctx->more && !aead_sufficient_data(ctx)) {
308		aead_put_sgl(sk);
309		err = -EMSGSIZE;
310	}
311
312unlock:
313	aead_data_wakeup(sk);
314	release_sock(sk);
 
 
 
 
 
 
 
 
 
315
316	return err ?: copied;
317}
318
319static ssize_t aead_sendpage(struct socket *sock, struct page *page,
320			     int offset, size_t size, int flags)
321{
322	struct sock *sk = sock->sk;
323	struct alg_sock *ask = alg_sk(sk);
324	struct aead_ctx *ctx = ask->private;
325	struct aead_sg_list *sgl = &ctx->tsgl;
326	int err = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327
328	if (flags & MSG_SENDPAGE_NOTLAST)
329		flags |= MSG_MORE;
330
331	if (sgl->cur >= ALG_MAX_PAGES)
332		return -E2BIG;
 
 
 
 
 
 
 
 
333
334	lock_sock(sk);
335	if (!ctx->more && ctx->used)
336		goto unlock;
337
338	if (!size)
339		goto done;
340
341	if (!aead_writable(sk)) {
342		/* user space sent too much data */
343		aead_put_sgl(sk);
344		err = -EMSGSIZE;
345		goto unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346	}
347
348	ctx->merge = 0;
349
350	get_page(page);
351	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
352	sgl->cur++;
353	ctx->used += size;
354
355	err = 0;
356
357done:
358	ctx->more = flags & MSG_MORE;
359	if (!ctx->more && !aead_sufficient_data(ctx)) {
360		aead_put_sgl(sk);
361		err = -EMSGSIZE;
362	}
363
364unlock:
365	aead_data_wakeup(sk);
366	release_sock(sk);
367
368	return err ?: size;
369}
370
371#define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
372		((char *)req + sizeof(struct aead_request) + \
373		 crypto_aead_reqsize(tfm))
374
375 #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
376	crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
377	sizeof(struct aead_request)
378
379static void aead_async_cb(struct crypto_async_request *_req, int err)
380{
381	struct sock *sk = _req->data;
382	struct alg_sock *ask = alg_sk(sk);
383	struct aead_ctx *ctx = ask->private;
384	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
385	struct aead_request *req = aead_request_cast(_req);
386	struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
387	struct scatterlist *sg = areq->tsgl;
388	struct aead_async_rsgl *rsgl;
389	struct kiocb *iocb = areq->iocb;
390	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
391
392	list_for_each_entry(rsgl, &areq->list, list) {
393		af_alg_free_sg(&rsgl->sgl);
394		if (rsgl != &areq->first_rsgl)
395			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
396	}
397
398	for (i = 0; i < areq->tsgls; i++)
399		put_page(sg_page(sg + i));
400
401	sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
402	sock_kfree_s(sk, req, reqlen);
403	__sock_put(sk);
404	iocb->ki_complete(iocb, err, err);
405}
406
407static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
408			      int flags)
409{
410	struct sock *sk = sock->sk;
411	struct alg_sock *ask = alg_sk(sk);
412	struct aead_ctx *ctx = ask->private;
413	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
414	struct aead_async_req *areq;
415	struct aead_request *req = NULL;
416	struct aead_sg_list *sgl = &ctx->tsgl;
417	struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
418	unsigned int as = crypto_aead_authsize(tfm);
419	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
420	int err = -ENOMEM;
421	unsigned long used;
422	size_t outlen = 0;
423	size_t usedpages = 0;
424
425	lock_sock(sk);
426	if (ctx->more) {
427		err = aead_wait_for_data(sk, flags);
428		if (err)
429			goto unlock;
430	}
431
432	if (!aead_sufficient_data(ctx))
433		goto unlock;
434
435	used = ctx->used;
436	if (ctx->enc)
437		outlen = used + as;
438	else
439		outlen = used - as;
440
441	req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
442	if (unlikely(!req))
443		goto unlock;
444
445	areq = GET_ASYM_REQ(req, tfm);
446	memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
447	INIT_LIST_HEAD(&areq->list);
448	areq->iocb = msg->msg_iocb;
449	memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
450	aead_request_set_tfm(req, tfm);
451	aead_request_set_ad(req, ctx->aead_assoclen);
452	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
453				  aead_async_cb, sk);
454	used -= ctx->aead_assoclen;
455
456	/* take over all tx sgls from ctx */
457	areq->tsgl = sock_kmalloc(sk,
458				  sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
459				  GFP_KERNEL);
460	if (unlikely(!areq->tsgl))
461		goto free;
462
463	sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
464	for (i = 0; i < sgl->cur; i++)
465		sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
466			    sgl->sg[i].length, sgl->sg[i].offset);
467
468	areq->tsgls = sgl->cur;
469
470	/* create rx sgls */
471	while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
472		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
473				      (outlen - usedpages));
474
475		if (list_empty(&areq->list)) {
476			rsgl = &areq->first_rsgl;
477
478		} else {
479			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
480			if (unlikely(!rsgl)) {
481				err = -ENOMEM;
482				goto free;
483			}
484		}
485		rsgl->sgl.npages = 0;
486		list_add_tail(&rsgl->list, &areq->list);
487
488		/* make one iovec available as scatterlist */
489		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
490		if (err < 0)
491			goto free;
492
493		usedpages += err;
 
 
 
 
494
495		/* chain the new scatterlist with previous one */
496		if (last_rsgl)
497			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
498
499		last_rsgl = rsgl;
 
 
 
 
 
 
 
 
500
501		iov_iter_advance(&msg->msg_iter, err);
502	}
503
504	/* ensure output buffer is sufficiently large */
505	if (usedpages < outlen) {
506		err = -EINVAL;
507		goto unlock;
508	}
509
510	aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
511			       areq->iv);
512	err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
513	if (err) {
514		if (err == -EINPROGRESS) {
515			sock_hold(sk);
516			err = -EIOCBQUEUED;
517			aead_reset_ctx(ctx);
518			goto unlock;
519		} else if (err == -EBADMSG) {
520			aead_put_sgl(sk);
521		}
522		goto free;
523	}
524	aead_put_sgl(sk);
525
526free:
527	list_for_each_entry(rsgl, &areq->list, list) {
528		af_alg_free_sg(&rsgl->sgl);
529		if (rsgl != &areq->first_rsgl)
530			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
531	}
532	if (areq->tsgl)
533		sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
534	if (req)
535		sock_kfree_s(sk, req, reqlen);
536unlock:
537	aead_wmem_wakeup(sk);
538	release_sock(sk);
539	return err ? err : outlen;
540}
541
542static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
543{
 
 
 
 
544	struct sock *sk = sock->sk;
545	struct alg_sock *ask = alg_sk(sk);
546	struct aead_ctx *ctx = ask->private;
547	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
548	struct aead_sg_list *sgl = &ctx->tsgl;
549	struct aead_async_rsgl *last_rsgl = NULL;
550	struct aead_async_rsgl *rsgl, *tmp;
551	int err = -EINVAL;
552	unsigned long used = 0;
553	size_t outlen = 0;
554	size_t usedpages = 0;
555
556	lock_sock(sk);
 
 
557
558	/*
559	 * Please see documentation of aead_request_set_crypt for the
560	 * description of the AEAD memory structure expected from the caller.
561	 */
562
563	if (ctx->more) {
564		err = aead_wait_for_data(sk, flags);
565		if (err)
566			goto unlock;
567	}
568
569	/* data length provided by caller via sendmsg/sendpage */
570	used = ctx->used;
571
572	/*
573	 * Make sure sufficient data is present -- note, the same check is
574	 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
575	 * shall provide an information to the data sender that something is
576	 * wrong, but they are irrelevant to maintain the kernel integrity.
577	 * We need this check here too in case user space decides to not honor
578	 * the error message in sendmsg/sendpage and still call recvmsg. This
579	 * check here protects the kernel integrity.
580	 */
581	if (!aead_sufficient_data(ctx))
582		goto unlock;
583
584	/*
585	 * Calculate the minimum output buffer size holding the result of the
586	 * cipher operation. When encrypting data, the receiving buffer is
587	 * larger by the tag length compared to the input buffer as the
588	 * encryption operation generates the tag. For decryption, the input
589	 * buffer provides the tag which is consumed resulting in only the
590	 * plaintext without a buffer for the tag returned to the caller.
591	 */
592	if (ctx->enc)
593		outlen = used + as;
594	else
595		outlen = used - as;
596
597	/*
598	 * The cipher operation input data is reduced by the associated data
599	 * length as this data is processed separately later on.
600	 */
601	used -= ctx->aead_assoclen;
602
603	/* convert iovecs of output buffers into scatterlists */
604	while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
605		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
606				      (outlen - usedpages));
607
608		if (list_empty(&ctx->list)) {
609			rsgl = &ctx->first_rsgl;
610		} else {
611			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
612			if (unlikely(!rsgl)) {
613				err = -ENOMEM;
614				goto unlock;
615			}
616		}
617		rsgl->sgl.npages = 0;
618		list_add_tail(&rsgl->list, &ctx->list);
619
620		/* make one iovec available as scatterlist */
621		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
622		if (err < 0)
623			goto unlock;
624		usedpages += err;
625		/* chain the new scatterlist with previous one */
626		if (last_rsgl)
627			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
628
629		last_rsgl = rsgl;
630
631		iov_iter_advance(&msg->msg_iter, err);
632	}
633
634	/* ensure output buffer is sufficiently large */
635	if (usedpages < outlen) {
636		err = -EINVAL;
637		goto unlock;
638	}
639
640	sg_mark_end(sgl->sg + sgl->cur - 1);
641	aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
642			       used, ctx->iv);
643	aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
644
645	err = af_alg_wait_for_completion(ctx->enc ?
646					 crypto_aead_encrypt(&ctx->aead_req) :
647					 crypto_aead_decrypt(&ctx->aead_req),
648					 &ctx->completion);
649
650	if (err) {
651		/* EBADMSG implies a valid cipher operation took place */
652		if (err == -EBADMSG)
653			aead_put_sgl(sk);
654
655		goto unlock;
656	}
657
658	aead_put_sgl(sk);
659	err = 0;
660
661unlock:
662	list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
663		af_alg_free_sg(&rsgl->sgl);
664		list_del(&rsgl->list);
665		if (rsgl != &ctx->first_rsgl)
666			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
667	}
668	INIT_LIST_HEAD(&ctx->list);
669	aead_wmem_wakeup(sk);
670	release_sock(sk);
671
672	return err ? err : outlen;
673}
674
675static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
676			int flags)
677{
678	return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
679		aead_recvmsg_async(sock, msg, flags) :
680		aead_recvmsg_sync(sock, msg, flags);
 
 
 
 
681}
682
683static unsigned int aead_poll(struct file *file, struct socket *sock,
684			      poll_table *wait)
685{
686	struct sock *sk = sock->sk;
687	struct alg_sock *ask = alg_sk(sk);
688	struct aead_ctx *ctx = ask->private;
689	unsigned int mask;
690
691	sock_poll_wait(file, sk_sleep(sk), wait);
692	mask = 0;
 
693
694	if (!ctx->more)
695		mask |= POLLIN | POLLRDNORM;
696
697	if (aead_writable(sk))
698		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
699
700	return mask;
701}
702
703static struct proto_ops algif_aead_ops = {
704	.family		=	PF_ALG,
705
706	.connect	=	sock_no_connect,
707	.socketpair	=	sock_no_socketpair,
708	.getname	=	sock_no_getname,
709	.ioctl		=	sock_no_ioctl,
710	.listen		=	sock_no_listen,
711	.shutdown	=	sock_no_shutdown,
712	.getsockopt	=	sock_no_getsockopt,
713	.mmap		=	sock_no_mmap,
714	.bind		=	sock_no_bind,
715	.accept		=	sock_no_accept,
716	.setsockopt	=	sock_no_setsockopt,
717
718	.release	=	af_alg_release,
719	.sendmsg	=	aead_sendmsg,
720	.sendpage	=	aead_sendpage,
721	.recvmsg	=	aead_recvmsg,
722	.poll		=	aead_poll,
723};
724
725static void *aead_bind(const char *name, u32 type, u32 mask)
726{
727	return crypto_alloc_aead(name, type, mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728}
729
730static void aead_release(void *private)
731{
732	crypto_free_aead(private);
 
 
 
 
733}
734
735static int aead_setauthsize(void *private, unsigned int authsize)
736{
737	return crypto_aead_setauthsize(private, authsize);
 
 
738}
739
740static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
741{
742	return crypto_aead_setkey(private, key, keylen);
 
 
743}
744
745static void aead_sock_destruct(struct sock *sk)
746{
747	struct alg_sock *ask = alg_sk(sk);
748	struct aead_ctx *ctx = ask->private;
749	unsigned int ivlen = crypto_aead_ivsize(
750				crypto_aead_reqtfm(&ctx->aead_req));
 
 
 
751
752	WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
753	aead_put_sgl(sk);
754	sock_kzfree_s(sk, ctx->iv, ivlen);
755	sock_kfree_s(sk, ctx, ctx->len);
756	af_alg_release_parent(sk);
757}
758
759static int aead_accept_parent(void *private, struct sock *sk)
760{
761	struct aead_ctx *ctx;
762	struct alg_sock *ask = alg_sk(sk);
763	unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
764	unsigned int ivlen = crypto_aead_ivsize(private);
 
 
765
766	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
767	if (!ctx)
768		return -ENOMEM;
769	memset(ctx, 0, len);
770
771	ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
772	if (!ctx->iv) {
773		sock_kfree_s(sk, ctx, len);
774		return -ENOMEM;
775	}
776	memset(ctx->iv, 0, ivlen);
777
 
778	ctx->len = len;
779	ctx->used = 0;
780	ctx->more = 0;
781	ctx->merge = 0;
782	ctx->enc = 0;
783	ctx->tsgl.cur = 0;
784	ctx->aead_assoclen = 0;
785	af_alg_init_completion(&ctx->completion);
786	sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
787	INIT_LIST_HEAD(&ctx->list);
788
789	ask->private = ctx;
790
791	aead_request_set_tfm(&ctx->aead_req, private);
792	aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
793				  af_alg_complete, &ctx->completion);
794
795	sk->sk_destruct = aead_sock_destruct;
796
797	return 0;
798}
799
 
 
 
 
 
 
 
 
 
 
800static const struct af_alg_type algif_type_aead = {
801	.bind		=	aead_bind,
802	.release	=	aead_release,
803	.setkey		=	aead_setkey,
804	.setauthsize	=	aead_setauthsize,
805	.accept		=	aead_accept_parent,
 
806	.ops		=	&algif_aead_ops,
 
807	.name		=	"aead",
808	.owner		=	THIS_MODULE
809};
810
811static int __init algif_aead_init(void)
812{
813	return af_alg_register_type(&algif_type_aead);
814}
815
816static void __exit algif_aead_exit(void)
817{
818	int err = af_alg_unregister_type(&algif_type_aead);
819	BUG_ON(err);
820}
821
822module_init(algif_aead_init);
823module_exit(algif_aead_exit);
824MODULE_LICENSE("GPL");
825MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
826MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");