Loading...
1/*
2 * algif_aead: User-space interface for AEAD algorithms
3 *
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5 *
6 * This file provides the user-space API for AEAD ciphers.
7 *
8 * This file is derived from algif_skcipher.c.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 */
15
16#include <crypto/aead.h>
17#include <crypto/scatterwalk.h>
18#include <crypto/if_alg.h>
19#include <linux/init.h>
20#include <linux/list.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/net.h>
25#include <net/sock.h>
26
27struct aead_sg_list {
28 unsigned int cur;
29 struct scatterlist sg[ALG_MAX_PAGES];
30};
31
32struct aead_ctx {
33 struct aead_sg_list tsgl;
34 /*
35 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
36 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
37 * pages
38 */
39#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
40 struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
41
42 void *iv;
43
44 struct af_alg_completion completion;
45
46 unsigned long used;
47
48 unsigned int len;
49 bool more;
50 bool merge;
51 bool enc;
52
53 size_t aead_assoclen;
54 struct aead_request aead_req;
55};
56
57static inline int aead_sndbuf(struct sock *sk)
58{
59 struct alg_sock *ask = alg_sk(sk);
60 struct aead_ctx *ctx = ask->private;
61
62 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
63 ctx->used, 0);
64}
65
66static inline bool aead_writable(struct sock *sk)
67{
68 return PAGE_SIZE <= aead_sndbuf(sk);
69}
70
71static inline bool aead_sufficient_data(struct aead_ctx *ctx)
72{
73 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
74
75 return ctx->used >= ctx->aead_assoclen + as;
76}
77
78static void aead_put_sgl(struct sock *sk)
79{
80 struct alg_sock *ask = alg_sk(sk);
81 struct aead_ctx *ctx = ask->private;
82 struct aead_sg_list *sgl = &ctx->tsgl;
83 struct scatterlist *sg = sgl->sg;
84 unsigned int i;
85
86 for (i = 0; i < sgl->cur; i++) {
87 if (!sg_page(sg + i))
88 continue;
89
90 put_page(sg_page(sg + i));
91 sg_assign_page(sg + i, NULL);
92 }
93 sg_init_table(sg, ALG_MAX_PAGES);
94 sgl->cur = 0;
95 ctx->used = 0;
96 ctx->more = 0;
97 ctx->merge = 0;
98}
99
100static void aead_wmem_wakeup(struct sock *sk)
101{
102 struct socket_wq *wq;
103
104 if (!aead_writable(sk))
105 return;
106
107 rcu_read_lock();
108 wq = rcu_dereference(sk->sk_wq);
109 if (skwq_has_sleeper(wq))
110 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
111 POLLRDNORM |
112 POLLRDBAND);
113 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
114 rcu_read_unlock();
115}
116
117static int aead_wait_for_data(struct sock *sk, unsigned flags)
118{
119 struct alg_sock *ask = alg_sk(sk);
120 struct aead_ctx *ctx = ask->private;
121 long timeout;
122 DEFINE_WAIT(wait);
123 int err = -ERESTARTSYS;
124
125 if (flags & MSG_DONTWAIT)
126 return -EAGAIN;
127
128 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
129
130 for (;;) {
131 if (signal_pending(current))
132 break;
133 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
134 timeout = MAX_SCHEDULE_TIMEOUT;
135 if (sk_wait_event(sk, &timeout, !ctx->more)) {
136 err = 0;
137 break;
138 }
139 }
140 finish_wait(sk_sleep(sk), &wait);
141
142 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
143
144 return err;
145}
146
147static void aead_data_wakeup(struct sock *sk)
148{
149 struct alg_sock *ask = alg_sk(sk);
150 struct aead_ctx *ctx = ask->private;
151 struct socket_wq *wq;
152
153 if (ctx->more)
154 return;
155 if (!ctx->used)
156 return;
157
158 rcu_read_lock();
159 wq = rcu_dereference(sk->sk_wq);
160 if (skwq_has_sleeper(wq))
161 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
162 POLLRDNORM |
163 POLLRDBAND);
164 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
165 rcu_read_unlock();
166}
167
168static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
169{
170 struct sock *sk = sock->sk;
171 struct alg_sock *ask = alg_sk(sk);
172 struct aead_ctx *ctx = ask->private;
173 unsigned ivsize =
174 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
175 struct aead_sg_list *sgl = &ctx->tsgl;
176 struct af_alg_control con = {};
177 long copied = 0;
178 bool enc = 0;
179 bool init = 0;
180 int err = -EINVAL;
181
182 if (msg->msg_controllen) {
183 err = af_alg_cmsg_send(msg, &con);
184 if (err)
185 return err;
186
187 init = 1;
188 switch (con.op) {
189 case ALG_OP_ENCRYPT:
190 enc = 1;
191 break;
192 case ALG_OP_DECRYPT:
193 enc = 0;
194 break;
195 default:
196 return -EINVAL;
197 }
198
199 if (con.iv && con.iv->ivlen != ivsize)
200 return -EINVAL;
201 }
202
203 lock_sock(sk);
204 if (!ctx->more && ctx->used)
205 goto unlock;
206
207 if (init) {
208 ctx->enc = enc;
209 if (con.iv)
210 memcpy(ctx->iv, con.iv->iv, ivsize);
211
212 ctx->aead_assoclen = con.aead_assoclen;
213 }
214
215 while (size) {
216 size_t len = size;
217 struct scatterlist *sg = NULL;
218
219 /* use the existing memory in an allocated page */
220 if (ctx->merge) {
221 sg = sgl->sg + sgl->cur - 1;
222 len = min_t(unsigned long, len,
223 PAGE_SIZE - sg->offset - sg->length);
224 err = memcpy_from_msg(page_address(sg_page(sg)) +
225 sg->offset + sg->length,
226 msg, len);
227 if (err)
228 goto unlock;
229
230 sg->length += len;
231 ctx->merge = (sg->offset + sg->length) &
232 (PAGE_SIZE - 1);
233
234 ctx->used += len;
235 copied += len;
236 size -= len;
237 continue;
238 }
239
240 if (!aead_writable(sk)) {
241 /* user space sent too much data */
242 aead_put_sgl(sk);
243 err = -EMSGSIZE;
244 goto unlock;
245 }
246
247 /* allocate a new page */
248 len = min_t(unsigned long, size, aead_sndbuf(sk));
249 while (len) {
250 size_t plen = 0;
251
252 if (sgl->cur >= ALG_MAX_PAGES) {
253 aead_put_sgl(sk);
254 err = -E2BIG;
255 goto unlock;
256 }
257
258 sg = sgl->sg + sgl->cur;
259 plen = min_t(size_t, len, PAGE_SIZE);
260
261 sg_assign_page(sg, alloc_page(GFP_KERNEL));
262 err = -ENOMEM;
263 if (!sg_page(sg))
264 goto unlock;
265
266 err = memcpy_from_msg(page_address(sg_page(sg)),
267 msg, plen);
268 if (err) {
269 __free_page(sg_page(sg));
270 sg_assign_page(sg, NULL);
271 goto unlock;
272 }
273
274 sg->offset = 0;
275 sg->length = plen;
276 len -= plen;
277 ctx->used += plen;
278 copied += plen;
279 sgl->cur++;
280 size -= plen;
281 ctx->merge = plen & (PAGE_SIZE - 1);
282 }
283 }
284
285 err = 0;
286
287 ctx->more = msg->msg_flags & MSG_MORE;
288 if (!ctx->more && !aead_sufficient_data(ctx)) {
289 aead_put_sgl(sk);
290 err = -EMSGSIZE;
291 }
292
293unlock:
294 aead_data_wakeup(sk);
295 release_sock(sk);
296
297 return err ?: copied;
298}
299
300static ssize_t aead_sendpage(struct socket *sock, struct page *page,
301 int offset, size_t size, int flags)
302{
303 struct sock *sk = sock->sk;
304 struct alg_sock *ask = alg_sk(sk);
305 struct aead_ctx *ctx = ask->private;
306 struct aead_sg_list *sgl = &ctx->tsgl;
307 int err = -EINVAL;
308
309 if (flags & MSG_SENDPAGE_NOTLAST)
310 flags |= MSG_MORE;
311
312 if (sgl->cur >= ALG_MAX_PAGES)
313 return -E2BIG;
314
315 lock_sock(sk);
316 if (!ctx->more && ctx->used)
317 goto unlock;
318
319 if (!size)
320 goto done;
321
322 if (!aead_writable(sk)) {
323 /* user space sent too much data */
324 aead_put_sgl(sk);
325 err = -EMSGSIZE;
326 goto unlock;
327 }
328
329 ctx->merge = 0;
330
331 get_page(page);
332 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
333 sgl->cur++;
334 ctx->used += size;
335
336 err = 0;
337
338done:
339 ctx->more = flags & MSG_MORE;
340 if (!ctx->more && !aead_sufficient_data(ctx)) {
341 aead_put_sgl(sk);
342 err = -EMSGSIZE;
343 }
344
345unlock:
346 aead_data_wakeup(sk);
347 release_sock(sk);
348
349 return err ?: size;
350}
351
352static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags)
353{
354 struct sock *sk = sock->sk;
355 struct alg_sock *ask = alg_sk(sk);
356 struct aead_ctx *ctx = ask->private;
357 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
358 struct aead_sg_list *sgl = &ctx->tsgl;
359 unsigned int i = 0;
360 int err = -EINVAL;
361 unsigned long used = 0;
362 size_t outlen = 0;
363 size_t usedpages = 0;
364 unsigned int cnt = 0;
365
366 /* Limit number of IOV blocks to be accessed below */
367 if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES)
368 return -ENOMSG;
369
370 lock_sock(sk);
371
372 /*
373 * AEAD memory structure: For encryption, the tag is appended to the
374 * ciphertext which implies that the memory allocated for the ciphertext
375 * must be increased by the tag length. For decryption, the tag
376 * is expected to be concatenated to the ciphertext. The plaintext
377 * therefore has a memory size of the ciphertext minus the tag length.
378 *
379 * The memory structure for cipher operation has the following
380 * structure:
381 * AEAD encryption input: assoc data || plaintext
382 * AEAD encryption output: cipherntext || auth tag
383 * AEAD decryption input: assoc data || ciphertext || auth tag
384 * AEAD decryption output: plaintext
385 */
386
387 if (ctx->more) {
388 err = aead_wait_for_data(sk, flags);
389 if (err)
390 goto unlock;
391 }
392
393 used = ctx->used;
394
395 /*
396 * Make sure sufficient data is present -- note, the same check is
397 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
398 * shall provide an information to the data sender that something is
399 * wrong, but they are irrelevant to maintain the kernel integrity.
400 * We need this check here too in case user space decides to not honor
401 * the error message in sendmsg/sendpage and still call recvmsg. This
402 * check here protects the kernel integrity.
403 */
404 if (!aead_sufficient_data(ctx))
405 goto unlock;
406
407 outlen = used;
408
409 /*
410 * The cipher operation input data is reduced by the associated data
411 * length as this data is processed separately later on.
412 */
413 used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
414
415 /* convert iovecs of output buffers into scatterlists */
416 while (iov_iter_count(&msg->msg_iter)) {
417 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
418 (outlen - usedpages));
419
420 /* make one iovec available as scatterlist */
421 err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter,
422 seglen);
423 if (err < 0)
424 goto unlock;
425 usedpages += err;
426 /* chain the new scatterlist with previous one */
427 if (cnt)
428 af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
429
430 /* we do not need more iovecs as we have sufficient memory */
431 if (outlen <= usedpages)
432 break;
433 iov_iter_advance(&msg->msg_iter, err);
434 cnt++;
435 }
436
437 err = -EINVAL;
438 /* ensure output buffer is sufficiently large */
439 if (usedpages < outlen)
440 goto unlock;
441
442 sg_mark_end(sgl->sg + sgl->cur - 1);
443
444 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg,
445 used, ctx->iv);
446 aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
447
448 err = af_alg_wait_for_completion(ctx->enc ?
449 crypto_aead_encrypt(&ctx->aead_req) :
450 crypto_aead_decrypt(&ctx->aead_req),
451 &ctx->completion);
452
453 if (err) {
454 /* EBADMSG implies a valid cipher operation took place */
455 if (err == -EBADMSG)
456 aead_put_sgl(sk);
457 goto unlock;
458 }
459
460 aead_put_sgl(sk);
461
462 err = 0;
463
464unlock:
465 for (i = 0; i < cnt; i++)
466 af_alg_free_sg(&ctx->rsgl[i]);
467
468 aead_wmem_wakeup(sk);
469 release_sock(sk);
470
471 return err ? err : outlen;
472}
473
474static unsigned int aead_poll(struct file *file, struct socket *sock,
475 poll_table *wait)
476{
477 struct sock *sk = sock->sk;
478 struct alg_sock *ask = alg_sk(sk);
479 struct aead_ctx *ctx = ask->private;
480 unsigned int mask;
481
482 sock_poll_wait(file, sk_sleep(sk), wait);
483 mask = 0;
484
485 if (!ctx->more)
486 mask |= POLLIN | POLLRDNORM;
487
488 if (aead_writable(sk))
489 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
490
491 return mask;
492}
493
494static struct proto_ops algif_aead_ops = {
495 .family = PF_ALG,
496
497 .connect = sock_no_connect,
498 .socketpair = sock_no_socketpair,
499 .getname = sock_no_getname,
500 .ioctl = sock_no_ioctl,
501 .listen = sock_no_listen,
502 .shutdown = sock_no_shutdown,
503 .getsockopt = sock_no_getsockopt,
504 .mmap = sock_no_mmap,
505 .bind = sock_no_bind,
506 .accept = sock_no_accept,
507 .setsockopt = sock_no_setsockopt,
508
509 .release = af_alg_release,
510 .sendmsg = aead_sendmsg,
511 .sendpage = aead_sendpage,
512 .recvmsg = aead_recvmsg,
513 .poll = aead_poll,
514};
515
516static void *aead_bind(const char *name, u32 type, u32 mask)
517{
518 return crypto_alloc_aead(name, type, mask);
519}
520
521static void aead_release(void *private)
522{
523 crypto_free_aead(private);
524}
525
526static int aead_setauthsize(void *private, unsigned int authsize)
527{
528 return crypto_aead_setauthsize(private, authsize);
529}
530
531static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
532{
533 return crypto_aead_setkey(private, key, keylen);
534}
535
536static void aead_sock_destruct(struct sock *sk)
537{
538 struct alg_sock *ask = alg_sk(sk);
539 struct aead_ctx *ctx = ask->private;
540 unsigned int ivlen = crypto_aead_ivsize(
541 crypto_aead_reqtfm(&ctx->aead_req));
542
543 aead_put_sgl(sk);
544 sock_kzfree_s(sk, ctx->iv, ivlen);
545 sock_kfree_s(sk, ctx, ctx->len);
546 af_alg_release_parent(sk);
547}
548
549static int aead_accept_parent(void *private, struct sock *sk)
550{
551 struct aead_ctx *ctx;
552 struct alg_sock *ask = alg_sk(sk);
553 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
554 unsigned int ivlen = crypto_aead_ivsize(private);
555
556 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
557 if (!ctx)
558 return -ENOMEM;
559 memset(ctx, 0, len);
560
561 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
562 if (!ctx->iv) {
563 sock_kfree_s(sk, ctx, len);
564 return -ENOMEM;
565 }
566 memset(ctx->iv, 0, ivlen);
567
568 ctx->len = len;
569 ctx->used = 0;
570 ctx->more = 0;
571 ctx->merge = 0;
572 ctx->enc = 0;
573 ctx->tsgl.cur = 0;
574 ctx->aead_assoclen = 0;
575 af_alg_init_completion(&ctx->completion);
576 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
577
578 ask->private = ctx;
579
580 aead_request_set_tfm(&ctx->aead_req, private);
581 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
582 af_alg_complete, &ctx->completion);
583
584 sk->sk_destruct = aead_sock_destruct;
585
586 return 0;
587}
588
589static const struct af_alg_type algif_type_aead = {
590 .bind = aead_bind,
591 .release = aead_release,
592 .setkey = aead_setkey,
593 .setauthsize = aead_setauthsize,
594 .accept = aead_accept_parent,
595 .ops = &algif_aead_ops,
596 .name = "aead",
597 .owner = THIS_MODULE
598};
599
600static int __init algif_aead_init(void)
601{
602 return af_alg_register_type(&algif_type_aead);
603}
604
605static void __exit algif_aead_exit(void)
606{
607 int err = af_alg_unregister_type(&algif_type_aead);
608 BUG_ON(err);
609}
610
611module_init(algif_aead_init);
612module_exit(algif_aead_exit);
613MODULE_LICENSE("GPL");
614MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
615MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
1/*
2 * algif_aead: User-space interface for AEAD algorithms
3 *
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5 *
6 * This file provides the user-space API for AEAD ciphers.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * The following concept of the memory management is used:
14 *
15 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
16 * filled by user space with the data submitted via sendpage/sendmsg. Filling
17 * up the TX SGL does not cause a crypto operation -- the data will only be
18 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
19 * provide a buffer which is tracked with the RX SGL.
20 *
21 * During the processing of the recvmsg operation, the cipher request is
22 * allocated and prepared. As part of the recvmsg operation, the processed
23 * TX buffers are extracted from the TX SGL into a separate SGL.
24 *
25 * After the completion of the crypto operation, the RX SGL and the cipher
26 * request is released. The extracted TX SGL parts are released together with
27 * the RX SGL release.
28 */
29
30#include <crypto/internal/aead.h>
31#include <crypto/scatterwalk.h>
32#include <crypto/if_alg.h>
33#include <crypto/skcipher.h>
34#include <crypto/null.h>
35#include <linux/init.h>
36#include <linux/list.h>
37#include <linux/kernel.h>
38#include <linux/mm.h>
39#include <linux/module.h>
40#include <linux/net.h>
41#include <net/sock.h>
42
43struct aead_tfm {
44 struct crypto_aead *aead;
45 struct crypto_skcipher *null_tfm;
46};
47
48static inline bool aead_sufficient_data(struct sock *sk)
49{
50 struct alg_sock *ask = alg_sk(sk);
51 struct sock *psk = ask->parent;
52 struct alg_sock *pask = alg_sk(psk);
53 struct af_alg_ctx *ctx = ask->private;
54 struct aead_tfm *aeadc = pask->private;
55 struct crypto_aead *tfm = aeadc->aead;
56 unsigned int as = crypto_aead_authsize(tfm);
57
58 /*
59 * The minimum amount of memory needed for an AEAD cipher is
60 * the AAD and in case of decryption the tag.
61 */
62 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
63}
64
65static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
66{
67 struct sock *sk = sock->sk;
68 struct alg_sock *ask = alg_sk(sk);
69 struct sock *psk = ask->parent;
70 struct alg_sock *pask = alg_sk(psk);
71 struct aead_tfm *aeadc = pask->private;
72 struct crypto_aead *tfm = aeadc->aead;
73 unsigned int ivsize = crypto_aead_ivsize(tfm);
74
75 return af_alg_sendmsg(sock, msg, size, ivsize);
76}
77
78static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
79 struct scatterlist *src,
80 struct scatterlist *dst, unsigned int len)
81{
82 SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
83
84 skcipher_request_set_tfm(skreq, null_tfm);
85 skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
86 NULL, NULL);
87 skcipher_request_set_crypt(skreq, src, dst, len, NULL);
88
89 return crypto_skcipher_encrypt(skreq);
90}
91
92static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
93 size_t ignored, int flags)
94{
95 struct sock *sk = sock->sk;
96 struct alg_sock *ask = alg_sk(sk);
97 struct sock *psk = ask->parent;
98 struct alg_sock *pask = alg_sk(psk);
99 struct af_alg_ctx *ctx = ask->private;
100 struct aead_tfm *aeadc = pask->private;
101 struct crypto_aead *tfm = aeadc->aead;
102 struct crypto_skcipher *null_tfm = aeadc->null_tfm;
103 unsigned int i, as = crypto_aead_authsize(tfm);
104 struct af_alg_async_req *areq;
105 struct af_alg_tsgl *tsgl, *tmp;
106 struct scatterlist *rsgl_src, *tsgl_src = NULL;
107 int err = 0;
108 size_t used = 0; /* [in] TX bufs to be en/decrypted */
109 size_t outlen = 0; /* [out] RX bufs produced by kernel */
110 size_t usedpages = 0; /* [in] RX bufs to be used from user */
111 size_t processed = 0; /* [in] TX bufs to be consumed */
112
113 if (!ctx->used) {
114 err = af_alg_wait_for_data(sk, flags);
115 if (err)
116 return err;
117 }
118
119 /*
120 * Data length provided by caller via sendmsg/sendpage that has not
121 * yet been processed.
122 */
123 used = ctx->used;
124
125 /*
126 * Make sure sufficient data is present -- note, the same check is
127 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
128 * shall provide an information to the data sender that something is
129 * wrong, but they are irrelevant to maintain the kernel integrity.
130 * We need this check here too in case user space decides to not honor
131 * the error message in sendmsg/sendpage and still call recvmsg. This
132 * check here protects the kernel integrity.
133 */
134 if (!aead_sufficient_data(sk))
135 return -EINVAL;
136
137 /*
138 * Calculate the minimum output buffer size holding the result of the
139 * cipher operation. When encrypting data, the receiving buffer is
140 * larger by the tag length compared to the input buffer as the
141 * encryption operation generates the tag. For decryption, the input
142 * buffer provides the tag which is consumed resulting in only the
143 * plaintext without a buffer for the tag returned to the caller.
144 */
145 if (ctx->enc)
146 outlen = used + as;
147 else
148 outlen = used - as;
149
150 /*
151 * The cipher operation input data is reduced by the associated data
152 * length as this data is processed separately later on.
153 */
154 used -= ctx->aead_assoclen;
155
156 /* Allocate cipher request for current operation. */
157 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
158 crypto_aead_reqsize(tfm));
159 if (IS_ERR(areq))
160 return PTR_ERR(areq);
161
162 /* convert iovecs of output buffers into RX SGL */
163 err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
164 if (err)
165 goto free;
166
167 /*
168 * Ensure output buffer is sufficiently large. If the caller provides
169 * less buffer space, only use the relative required input size. This
170 * allows AIO operation where the caller sent all data to be processed
171 * and the AIO operation performs the operation on the different chunks
172 * of the input data.
173 */
174 if (usedpages < outlen) {
175 size_t less = outlen - usedpages;
176
177 if (used < less) {
178 err = -EINVAL;
179 goto free;
180 }
181 used -= less;
182 outlen -= less;
183 }
184
185 processed = used + ctx->aead_assoclen;
186 list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
187 for (i = 0; i < tsgl->cur; i++) {
188 struct scatterlist *process_sg = tsgl->sg + i;
189
190 if (!(process_sg->length) || !sg_page(process_sg))
191 continue;
192 tsgl_src = process_sg;
193 break;
194 }
195 if (tsgl_src)
196 break;
197 }
198 if (processed && !tsgl_src) {
199 err = -EFAULT;
200 goto free;
201 }
202
203 /*
204 * Copy of AAD from source to destination
205 *
206 * The AAD is copied to the destination buffer without change. Even
207 * when user space uses an in-place cipher operation, the kernel
208 * will copy the data as it does not see whether such in-place operation
209 * is initiated.
210 *
211 * To ensure efficiency, the following implementation ensure that the
212 * ciphers are invoked to perform a crypto operation in-place. This
213 * is achieved by memory management specified as follows.
214 */
215
216 /* Use the RX SGL as source (and destination) for crypto op. */
217 rsgl_src = areq->first_rsgl.sgl.sg;
218
219 if (ctx->enc) {
220 /*
221 * Encryption operation - The in-place cipher operation is
222 * achieved by the following operation:
223 *
224 * TX SGL: AAD || PT
225 * | |
226 * | copy |
227 * v v
228 * RX SGL: AAD || PT || Tag
229 */
230 err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
231 areq->first_rsgl.sgl.sg, processed);
232 if (err)
233 goto free;
234 af_alg_pull_tsgl(sk, processed, NULL, 0);
235 } else {
236 /*
237 * Decryption operation - To achieve an in-place cipher
238 * operation, the following SGL structure is used:
239 *
240 * TX SGL: AAD || CT || Tag
241 * | | ^
242 * | copy | | Create SGL link.
243 * v v |
244 * RX SGL: AAD || CT ----+
245 */
246
247 /* Copy AAD || CT to RX SGL buffer for in-place operation. */
248 err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
249 areq->first_rsgl.sgl.sg, outlen);
250 if (err)
251 goto free;
252
253 /* Create TX SGL for tag and chain it to RX SGL. */
254 areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
255 processed - as);
256 if (!areq->tsgl_entries)
257 areq->tsgl_entries = 1;
258 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) *
259 areq->tsgl_entries,
260 GFP_KERNEL);
261 if (!areq->tsgl) {
262 err = -ENOMEM;
263 goto free;
264 }
265 sg_init_table(areq->tsgl, areq->tsgl_entries);
266
267 /* Release TX SGL, except for tag data and reassign tag data. */
268 af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
269
270 /* chain the areq TX SGL holding the tag with RX SGL */
271 if (usedpages) {
272 /* RX SGL present */
273 struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
274
275 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
276 sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
277 areq->tsgl);
278 } else
279 /* no RX SGL present (e.g. authentication only) */
280 rsgl_src = areq->tsgl;
281 }
282
283 /* Initialize the crypto operation */
284 aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
285 areq->first_rsgl.sgl.sg, used, ctx->iv);
286 aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
287 aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
288
289 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
290 /* AIO operation */
291 sock_hold(sk);
292 areq->iocb = msg->msg_iocb;
293
294 /* Remember output size that will be generated. */
295 areq->outlen = outlen;
296
297 aead_request_set_callback(&areq->cra_u.aead_req,
298 CRYPTO_TFM_REQ_MAY_BACKLOG,
299 af_alg_async_cb, areq);
300 err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
301 crypto_aead_decrypt(&areq->cra_u.aead_req);
302
303 /* AIO operation in progress */
304 if (err == -EINPROGRESS || err == -EBUSY)
305 return -EIOCBQUEUED;
306
307 sock_put(sk);
308 } else {
309 /* Synchronous operation */
310 aead_request_set_callback(&areq->cra_u.aead_req,
311 CRYPTO_TFM_REQ_MAY_BACKLOG,
312 crypto_req_done, &ctx->wait);
313 err = crypto_wait_req(ctx->enc ?
314 crypto_aead_encrypt(&areq->cra_u.aead_req) :
315 crypto_aead_decrypt(&areq->cra_u.aead_req),
316 &ctx->wait);
317 }
318
319
320free:
321 af_alg_free_resources(areq);
322
323 return err ? err : outlen;
324}
325
326static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
327 size_t ignored, int flags)
328{
329 struct sock *sk = sock->sk;
330 int ret = 0;
331
332 lock_sock(sk);
333 while (msg_data_left(msg)) {
334 int err = _aead_recvmsg(sock, msg, ignored, flags);
335
336 /*
337 * This error covers -EIOCBQUEUED which implies that we can
338 * only handle one AIO request. If the caller wants to have
339 * multiple AIO requests in parallel, he must make multiple
340 * separate AIO calls.
341 *
342 * Also return the error if no data has been processed so far.
343 */
344 if (err <= 0) {
345 if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
346 ret = err;
347 goto out;
348 }
349
350 ret += err;
351 }
352
353out:
354 af_alg_wmem_wakeup(sk);
355 release_sock(sk);
356 return ret;
357}
358
359static struct proto_ops algif_aead_ops = {
360 .family = PF_ALG,
361
362 .connect = sock_no_connect,
363 .socketpair = sock_no_socketpair,
364 .getname = sock_no_getname,
365 .ioctl = sock_no_ioctl,
366 .listen = sock_no_listen,
367 .shutdown = sock_no_shutdown,
368 .getsockopt = sock_no_getsockopt,
369 .mmap = sock_no_mmap,
370 .bind = sock_no_bind,
371 .accept = sock_no_accept,
372 .setsockopt = sock_no_setsockopt,
373
374 .release = af_alg_release,
375 .sendmsg = aead_sendmsg,
376 .sendpage = af_alg_sendpage,
377 .recvmsg = aead_recvmsg,
378 .poll = af_alg_poll,
379};
380
381static int aead_check_key(struct socket *sock)
382{
383 int err = 0;
384 struct sock *psk;
385 struct alg_sock *pask;
386 struct aead_tfm *tfm;
387 struct sock *sk = sock->sk;
388 struct alg_sock *ask = alg_sk(sk);
389
390 lock_sock(sk);
391 if (ask->refcnt)
392 goto unlock_child;
393
394 psk = ask->parent;
395 pask = alg_sk(ask->parent);
396 tfm = pask->private;
397
398 err = -ENOKEY;
399 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
400 if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
401 goto unlock;
402
403 if (!pask->refcnt++)
404 sock_hold(psk);
405
406 ask->refcnt = 1;
407 sock_put(psk);
408
409 err = 0;
410
411unlock:
412 release_sock(psk);
413unlock_child:
414 release_sock(sk);
415
416 return err;
417}
418
419static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
420 size_t size)
421{
422 int err;
423
424 err = aead_check_key(sock);
425 if (err)
426 return err;
427
428 return aead_sendmsg(sock, msg, size);
429}
430
431static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
432 int offset, size_t size, int flags)
433{
434 int err;
435
436 err = aead_check_key(sock);
437 if (err)
438 return err;
439
440 return af_alg_sendpage(sock, page, offset, size, flags);
441}
442
443static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
444 size_t ignored, int flags)
445{
446 int err;
447
448 err = aead_check_key(sock);
449 if (err)
450 return err;
451
452 return aead_recvmsg(sock, msg, ignored, flags);
453}
454
455static struct proto_ops algif_aead_ops_nokey = {
456 .family = PF_ALG,
457
458 .connect = sock_no_connect,
459 .socketpair = sock_no_socketpair,
460 .getname = sock_no_getname,
461 .ioctl = sock_no_ioctl,
462 .listen = sock_no_listen,
463 .shutdown = sock_no_shutdown,
464 .getsockopt = sock_no_getsockopt,
465 .mmap = sock_no_mmap,
466 .bind = sock_no_bind,
467 .accept = sock_no_accept,
468 .setsockopt = sock_no_setsockopt,
469
470 .release = af_alg_release,
471 .sendmsg = aead_sendmsg_nokey,
472 .sendpage = aead_sendpage_nokey,
473 .recvmsg = aead_recvmsg_nokey,
474 .poll = af_alg_poll,
475};
476
477static void *aead_bind(const char *name, u32 type, u32 mask)
478{
479 struct aead_tfm *tfm;
480 struct crypto_aead *aead;
481 struct crypto_skcipher *null_tfm;
482
483 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
484 if (!tfm)
485 return ERR_PTR(-ENOMEM);
486
487 aead = crypto_alloc_aead(name, type, mask);
488 if (IS_ERR(aead)) {
489 kfree(tfm);
490 return ERR_CAST(aead);
491 }
492
493 null_tfm = crypto_get_default_null_skcipher();
494 if (IS_ERR(null_tfm)) {
495 crypto_free_aead(aead);
496 kfree(tfm);
497 return ERR_CAST(null_tfm);
498 }
499
500 tfm->aead = aead;
501 tfm->null_tfm = null_tfm;
502
503 return tfm;
504}
505
506static void aead_release(void *private)
507{
508 struct aead_tfm *tfm = private;
509
510 crypto_free_aead(tfm->aead);
511 crypto_put_default_null_skcipher();
512 kfree(tfm);
513}
514
515static int aead_setauthsize(void *private, unsigned int authsize)
516{
517 struct aead_tfm *tfm = private;
518
519 return crypto_aead_setauthsize(tfm->aead, authsize);
520}
521
522static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
523{
524 struct aead_tfm *tfm = private;
525
526 return crypto_aead_setkey(tfm->aead, key, keylen);
527}
528
529static void aead_sock_destruct(struct sock *sk)
530{
531 struct alg_sock *ask = alg_sk(sk);
532 struct af_alg_ctx *ctx = ask->private;
533 struct sock *psk = ask->parent;
534 struct alg_sock *pask = alg_sk(psk);
535 struct aead_tfm *aeadc = pask->private;
536 struct crypto_aead *tfm = aeadc->aead;
537 unsigned int ivlen = crypto_aead_ivsize(tfm);
538
539 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
540 sock_kzfree_s(sk, ctx->iv, ivlen);
541 sock_kfree_s(sk, ctx, ctx->len);
542 af_alg_release_parent(sk);
543}
544
545static int aead_accept_parent_nokey(void *private, struct sock *sk)
546{
547 struct af_alg_ctx *ctx;
548 struct alg_sock *ask = alg_sk(sk);
549 struct aead_tfm *tfm = private;
550 struct crypto_aead *aead = tfm->aead;
551 unsigned int len = sizeof(*ctx);
552 unsigned int ivlen = crypto_aead_ivsize(aead);
553
554 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
555 if (!ctx)
556 return -ENOMEM;
557 memset(ctx, 0, len);
558
559 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
560 if (!ctx->iv) {
561 sock_kfree_s(sk, ctx, len);
562 return -ENOMEM;
563 }
564 memset(ctx->iv, 0, ivlen);
565
566 INIT_LIST_HEAD(&ctx->tsgl_list);
567 ctx->len = len;
568 ctx->used = 0;
569 atomic_set(&ctx->rcvused, 0);
570 ctx->more = 0;
571 ctx->merge = 0;
572 ctx->enc = 0;
573 ctx->aead_assoclen = 0;
574 crypto_init_wait(&ctx->wait);
575
576 ask->private = ctx;
577
578 sk->sk_destruct = aead_sock_destruct;
579
580 return 0;
581}
582
583static int aead_accept_parent(void *private, struct sock *sk)
584{
585 struct aead_tfm *tfm = private;
586
587 if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
588 return -ENOKEY;
589
590 return aead_accept_parent_nokey(private, sk);
591}
592
593static const struct af_alg_type algif_type_aead = {
594 .bind = aead_bind,
595 .release = aead_release,
596 .setkey = aead_setkey,
597 .setauthsize = aead_setauthsize,
598 .accept = aead_accept_parent,
599 .accept_nokey = aead_accept_parent_nokey,
600 .ops = &algif_aead_ops,
601 .ops_nokey = &algif_aead_ops_nokey,
602 .name = "aead",
603 .owner = THIS_MODULE
604};
605
606static int __init algif_aead_init(void)
607{
608 return af_alg_register_type(&algif_type_aead);
609}
610
611static void __exit algif_aead_exit(void)
612{
613 int err = af_alg_unregister_type(&algif_type_aead);
614 BUG_ON(err);
615}
616
617module_init(algif_aead_init);
618module_exit(algif_aead_exit);
619MODULE_LICENSE("GPL");
620MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
621MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");