Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
10#include <net/tls.h>
11
12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13{
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
16 return true;
17
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
21 return true;
22
23 return false;
24}
25
26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
28{
29 struct page_frag *pfrag = sk_page_frag(sk);
30 int ret = 0;
31
32 len -= msg->sg.size;
33 while (len > 0) {
34 struct scatterlist *sge;
35 u32 orig_offset;
36 int use, i;
37
38 if (!sk_page_frag_refill(sk, pfrag))
39 return -ENOMEM;
40
41 orig_offset = pfrag->offset;
42 use = min_t(int, len, pfrag->size - orig_offset);
43 if (!sk_wmem_schedule(sk, use))
44 return -ENOMEM;
45
46 i = msg->sg.end;
47 sk_msg_iter_var_prev(i);
48 sge = &msg->sg.data[i];
49
50 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51 sg_page(sge) == pfrag->page &&
52 sge->offset + sge->length == orig_offset) {
53 sge->length += use;
54 } else {
55 if (sk_msg_full(msg)) {
56 ret = -ENOSPC;
57 break;
58 }
59
60 sge = &msg->sg.data[msg->sg.end];
61 sg_unmark_end(sge);
62 sg_set_page(sge, pfrag->page, use, orig_offset);
63 get_page(pfrag->page);
64 sk_msg_iter_next(msg, end);
65 }
66
67 sk_mem_charge(sk, use);
68 msg->sg.size += use;
69 pfrag->offset += use;
70 len -= use;
71 }
72
73 return ret;
74}
75EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
77int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78 u32 off, u32 len)
79{
80 int i = src->sg.start;
81 struct scatterlist *sge = sk_msg_elem(src, i);
82 struct scatterlist *sgd = NULL;
83 u32 sge_len, sge_off;
84
85 while (off) {
86 if (sge->length > off)
87 break;
88 off -= sge->length;
89 sk_msg_iter_var_next(i);
90 if (i == src->sg.end && off)
91 return -ENOSPC;
92 sge = sk_msg_elem(src, i);
93 }
94
95 while (len) {
96 sge_len = sge->length - off;
97 if (sge_len > len)
98 sge_len = len;
99
100 if (dst->sg.end)
101 sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103 if (sgd &&
104 (sg_page(sge) == sg_page(sgd)) &&
105 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106 sgd->length += sge_len;
107 dst->sg.size += sge_len;
108 } else if (!sk_msg_full(dst)) {
109 sge_off = sge->offset + off;
110 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111 } else {
112 return -ENOSPC;
113 }
114
115 off = 0;
116 len -= sge_len;
117 sk_mem_charge(sk, sge_len);
118 sk_msg_iter_var_next(i);
119 if (i == src->sg.end && len)
120 return -ENOSPC;
121 sge = sk_msg_elem(src, i);
122 }
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(sk_msg_clone);
127
128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129{
130 int i = msg->sg.start;
131
132 do {
133 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135 if (bytes < sge->length) {
136 sge->length -= bytes;
137 sge->offset += bytes;
138 sk_mem_uncharge(sk, bytes);
139 break;
140 }
141
142 sk_mem_uncharge(sk, sge->length);
143 bytes -= sge->length;
144 sge->length = 0;
145 sge->offset = 0;
146 sk_msg_iter_var_next(i);
147 } while (bytes && i != msg->sg.end);
148 msg->sg.start = i;
149}
150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153{
154 int i = msg->sg.start;
155
156 do {
157 struct scatterlist *sge = &msg->sg.data[i];
158 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160 sk_mem_uncharge(sk, uncharge);
161 bytes -= uncharge;
162 sk_msg_iter_var_next(i);
163 } while (i != msg->sg.end);
164}
165EXPORT_SYMBOL_GPL(sk_msg_return);
166
167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168 bool charge)
169{
170 struct scatterlist *sge = sk_msg_elem(msg, i);
171 u32 len = sge->length;
172
173 /* When the skb owns the memory we free it from consume_skb path. */
174 if (!msg->skb) {
175 if (charge)
176 sk_mem_uncharge(sk, len);
177 put_page(sg_page(sge));
178 }
179 memset(sge, 0, sizeof(*sge));
180 return len;
181}
182
183static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184 bool charge)
185{
186 struct scatterlist *sge = sk_msg_elem(msg, i);
187 int freed = 0;
188
189 while (msg->sg.size) {
190 msg->sg.size -= sge->length;
191 freed += sk_msg_free_elem(sk, msg, i, charge);
192 sk_msg_iter_var_next(i);
193 sk_msg_check_to_free(msg, i, msg->sg.size);
194 sge = sk_msg_elem(msg, i);
195 }
196 consume_skb(msg->skb);
197 sk_msg_init(msg);
198 return freed;
199}
200
201int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202{
203 return __sk_msg_free(sk, msg, msg->sg.start, false);
204}
205EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206
207int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208{
209 return __sk_msg_free(sk, msg, msg->sg.start, true);
210}
211EXPORT_SYMBOL_GPL(sk_msg_free);
212
213static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214 u32 bytes, bool charge)
215{
216 struct scatterlist *sge;
217 u32 i = msg->sg.start;
218
219 while (bytes) {
220 sge = sk_msg_elem(msg, i);
221 if (!sge->length)
222 break;
223 if (bytes < sge->length) {
224 if (charge)
225 sk_mem_uncharge(sk, bytes);
226 sge->length -= bytes;
227 sge->offset += bytes;
228 msg->sg.size -= bytes;
229 break;
230 }
231
232 msg->sg.size -= sge->length;
233 bytes -= sge->length;
234 sk_msg_free_elem(sk, msg, i, charge);
235 sk_msg_iter_var_next(i);
236 sk_msg_check_to_free(msg, i, bytes);
237 }
238 msg->sg.start = i;
239}
240
241void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242{
243 __sk_msg_free_partial(sk, msg, bytes, true);
244}
245EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246
247void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248 u32 bytes)
249{
250 __sk_msg_free_partial(sk, msg, bytes, false);
251}
252
253void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254{
255 int trim = msg->sg.size - len;
256 u32 i = msg->sg.end;
257
258 if (trim <= 0) {
259 WARN_ON(trim < 0);
260 return;
261 }
262
263 sk_msg_iter_var_prev(i);
264 msg->sg.size = len;
265 while (msg->sg.data[i].length &&
266 trim >= msg->sg.data[i].length) {
267 trim -= msg->sg.data[i].length;
268 sk_msg_free_elem(sk, msg, i, true);
269 sk_msg_iter_var_prev(i);
270 if (!trim)
271 goto out;
272 }
273
274 msg->sg.data[i].length -= trim;
275 sk_mem_uncharge(sk, trim);
276 /* Adjust copybreak if it falls into the trimmed part of last buf */
277 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278 msg->sg.copybreak = msg->sg.data[i].length;
279out:
280 sk_msg_iter_var_next(i);
281 msg->sg.end = i;
282
283 /* If we trim data a full sg elem before curr pointer update
284 * copybreak and current so that any future copy operations
285 * start at new copy location.
286 * However trimed data that has not yet been used in a copy op
287 * does not require an update.
288 */
289 if (!msg->sg.size) {
290 msg->sg.curr = msg->sg.start;
291 msg->sg.copybreak = 0;
292 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294 sk_msg_iter_var_prev(i);
295 msg->sg.curr = i;
296 msg->sg.copybreak = msg->sg.data[i].length;
297 }
298}
299EXPORT_SYMBOL_GPL(sk_msg_trim);
300
301int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302 struct sk_msg *msg, u32 bytes)
303{
304 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305 const int to_max_pages = MAX_MSG_FRAGS;
306 struct page *pages[MAX_MSG_FRAGS];
307 ssize_t orig, copied, use, offset;
308
309 orig = msg->sg.size;
310 while (bytes > 0) {
311 i = 0;
312 maxpages = to_max_pages - num_elems;
313 if (maxpages == 0) {
314 ret = -EFAULT;
315 goto out;
316 }
317
318 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319 &offset);
320 if (copied <= 0) {
321 ret = -EFAULT;
322 goto out;
323 }
324
325 iov_iter_advance(from, copied);
326 bytes -= copied;
327 msg->sg.size += copied;
328
329 while (copied) {
330 use = min_t(int, copied, PAGE_SIZE - offset);
331 sg_set_page(&msg->sg.data[msg->sg.end],
332 pages[i], use, offset);
333 sg_unmark_end(&msg->sg.data[msg->sg.end]);
334 sk_mem_charge(sk, use);
335
336 offset = 0;
337 copied -= use;
338 sk_msg_iter_next(msg, end);
339 num_elems++;
340 i++;
341 }
342 /* When zerocopy is mixed with sk_msg_*copy* operations we
343 * may have a copybreak set in this case clear and prefer
344 * zerocopy remainder when possible.
345 */
346 msg->sg.copybreak = 0;
347 msg->sg.curr = msg->sg.end;
348 }
349out:
350 /* Revert iov_iter updates, msg will need to use 'trim' later if it
351 * also needs to be cleared.
352 */
353 if (ret)
354 iov_iter_revert(from, msg->sg.size - orig);
355 return ret;
356}
357EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358
359int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360 struct sk_msg *msg, u32 bytes)
361{
362 int ret = -ENOSPC, i = msg->sg.curr;
363 struct scatterlist *sge;
364 u32 copy, buf_size;
365 void *to;
366
367 do {
368 sge = sk_msg_elem(msg, i);
369 /* This is possible if a trim operation shrunk the buffer */
370 if (msg->sg.copybreak >= sge->length) {
371 msg->sg.copybreak = 0;
372 sk_msg_iter_var_next(i);
373 if (i == msg->sg.end)
374 break;
375 sge = sk_msg_elem(msg, i);
376 }
377
378 buf_size = sge->length - msg->sg.copybreak;
379 copy = (buf_size > bytes) ? bytes : buf_size;
380 to = sg_virt(sge) + msg->sg.copybreak;
381 msg->sg.copybreak += copy;
382 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383 ret = copy_from_iter_nocache(to, copy, from);
384 else
385 ret = copy_from_iter(to, copy, from);
386 if (ret != copy) {
387 ret = -EFAULT;
388 goto out;
389 }
390 bytes -= copy;
391 if (!bytes)
392 break;
393 msg->sg.copybreak = 0;
394 sk_msg_iter_var_next(i);
395 } while (i != msg->sg.end);
396out:
397 msg->sg.curr = i;
398 return ret;
399}
400EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401
402/* Receive sk_msg from psock->ingress_msg to @msg. */
403int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
404 int len, int flags)
405{
406 struct iov_iter *iter = &msg->msg_iter;
407 int peek = flags & MSG_PEEK;
408 struct sk_msg *msg_rx;
409 int i, copied = 0;
410
411 msg_rx = sk_psock_peek_msg(psock);
412 while (copied != len) {
413 struct scatterlist *sge;
414
415 if (unlikely(!msg_rx))
416 break;
417
418 i = msg_rx->sg.start;
419 do {
420 struct page *page;
421 int copy;
422
423 sge = sk_msg_elem(msg_rx, i);
424 copy = sge->length;
425 page = sg_page(sge);
426 if (copied + copy > len)
427 copy = len - copied;
428 copy = copy_page_to_iter(page, sge->offset, copy, iter);
429 if (!copy)
430 return copied ? copied : -EFAULT;
431
432 copied += copy;
433 if (likely(!peek)) {
434 sge->offset += copy;
435 sge->length -= copy;
436 if (!msg_rx->skb)
437 sk_mem_uncharge(sk, copy);
438 msg_rx->sg.size -= copy;
439
440 if (!sge->length) {
441 sk_msg_iter_var_next(i);
442 if (!msg_rx->skb)
443 put_page(page);
444 }
445 } else {
446 /* Lets not optimize peek case if copy_page_to_iter
447 * didn't copy the entire length lets just break.
448 */
449 if (copy != sge->length)
450 return copied;
451 sk_msg_iter_var_next(i);
452 }
453
454 if (copied == len)
455 break;
456 } while (i != msg_rx->sg.end);
457
458 if (unlikely(peek)) {
459 msg_rx = sk_psock_next_msg(psock, msg_rx);
460 if (!msg_rx)
461 break;
462 continue;
463 }
464
465 msg_rx->sg.start = i;
466 if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
467 msg_rx = sk_psock_dequeue_msg(psock);
468 kfree_sk_msg(msg_rx);
469 }
470 msg_rx = sk_psock_peek_msg(psock);
471 }
472
473 return copied;
474}
475EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
476
477static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
478 struct sk_buff *skb)
479{
480 struct sk_msg *msg;
481
482 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
483 return NULL;
484
485 if (!sk_rmem_schedule(sk, skb, skb->truesize))
486 return NULL;
487
488 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
489 if (unlikely(!msg))
490 return NULL;
491
492 sk_msg_init(msg);
493 return msg;
494}
495
496static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
497 struct sk_psock *psock,
498 struct sock *sk,
499 struct sk_msg *msg)
500{
501 int num_sge, copied;
502
503 /* skb linearize may fail with ENOMEM, but lets simply try again
504 * later if this happens. Under memory pressure we don't want to
505 * drop the skb. We need to linearize the skb so that the mapping
506 * in skb_to_sgvec can not error.
507 */
508 if (skb_linearize(skb))
509 return -EAGAIN;
510 num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
511 if (unlikely(num_sge < 0))
512 return num_sge;
513
514 copied = skb->len;
515 msg->sg.start = 0;
516 msg->sg.size = copied;
517 msg->sg.end = num_sge;
518 msg->skb = skb;
519
520 sk_psock_queue_msg(psock, msg);
521 sk_psock_data_ready(sk, psock);
522 return copied;
523}
524
525static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
526
527static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
528{
529 struct sock *sk = psock->sk;
530 struct sk_msg *msg;
531 int err;
532
533 /* If we are receiving on the same sock skb->sk is already assigned,
534 * skip memory accounting and owner transition seeing it already set
535 * correctly.
536 */
537 if (unlikely(skb->sk == sk))
538 return sk_psock_skb_ingress_self(psock, skb);
539 msg = sk_psock_create_ingress_msg(sk, skb);
540 if (!msg)
541 return -EAGAIN;
542
543 /* This will transition ownership of the data from the socket where
544 * the BPF program was run initiating the redirect to the socket
545 * we will eventually receive this data on. The data will be released
546 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
547 * into user buffers.
548 */
549 skb_set_owner_r(skb, sk);
550 err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
551 if (err < 0)
552 kfree(msg);
553 return err;
554}
555
556/* Puts an skb on the ingress queue of the socket already assigned to the
557 * skb. In this case we do not need to check memory limits or skb_set_owner_r
558 * because the skb is already accounted for here.
559 */
560static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
561{
562 struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
563 struct sock *sk = psock->sk;
564 int err;
565
566 if (unlikely(!msg))
567 return -EAGAIN;
568 sk_msg_init(msg);
569 skb_set_owner_r(skb, sk);
570 err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
571 if (err < 0)
572 kfree(msg);
573 return err;
574}
575
576static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
577 u32 off, u32 len, bool ingress)
578{
579 if (!ingress) {
580 if (!sock_writeable(psock->sk))
581 return -EAGAIN;
582 return skb_send_sock(psock->sk, skb, off, len);
583 }
584 return sk_psock_skb_ingress(psock, skb);
585}
586
587static void sk_psock_skb_state(struct sk_psock *psock,
588 struct sk_psock_work_state *state,
589 struct sk_buff *skb,
590 int len, int off)
591{
592 spin_lock_bh(&psock->ingress_lock);
593 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
594 state->skb = skb;
595 state->len = len;
596 state->off = off;
597 } else {
598 sock_drop(psock->sk, skb);
599 }
600 spin_unlock_bh(&psock->ingress_lock);
601}
602
603static void sk_psock_backlog(struct work_struct *work)
604{
605 struct sk_psock *psock = container_of(work, struct sk_psock, work);
606 struct sk_psock_work_state *state = &psock->work_state;
607 struct sk_buff *skb = NULL;
608 bool ingress;
609 u32 len, off;
610 int ret;
611
612 mutex_lock(&psock->work_mutex);
613 if (unlikely(state->skb)) {
614 spin_lock_bh(&psock->ingress_lock);
615 skb = state->skb;
616 len = state->len;
617 off = state->off;
618 state->skb = NULL;
619 spin_unlock_bh(&psock->ingress_lock);
620 }
621 if (skb)
622 goto start;
623
624 while ((skb = skb_dequeue(&psock->ingress_skb))) {
625 len = skb->len;
626 off = 0;
627start:
628 ingress = skb_bpf_ingress(skb);
629 skb_bpf_redirect_clear(skb);
630 do {
631 ret = -EIO;
632 if (!sock_flag(psock->sk, SOCK_DEAD))
633 ret = sk_psock_handle_skb(psock, skb, off,
634 len, ingress);
635 if (ret <= 0) {
636 if (ret == -EAGAIN) {
637 sk_psock_skb_state(psock, state, skb,
638 len, off);
639 goto end;
640 }
641 /* Hard errors break pipe and stop xmit. */
642 sk_psock_report_error(psock, ret ? -ret : EPIPE);
643 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
644 sock_drop(psock->sk, skb);
645 goto end;
646 }
647 off += ret;
648 len -= ret;
649 } while (len);
650
651 if (!ingress)
652 kfree_skb(skb);
653 }
654end:
655 mutex_unlock(&psock->work_mutex);
656}
657
658struct sk_psock *sk_psock_init(struct sock *sk, int node)
659{
660 struct sk_psock *psock;
661 struct proto *prot;
662
663 write_lock_bh(&sk->sk_callback_lock);
664
665 if (sk->sk_user_data) {
666 psock = ERR_PTR(-EBUSY);
667 goto out;
668 }
669
670 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
671 if (!psock) {
672 psock = ERR_PTR(-ENOMEM);
673 goto out;
674 }
675
676 prot = READ_ONCE(sk->sk_prot);
677 psock->sk = sk;
678 psock->eval = __SK_NONE;
679 psock->sk_proto = prot;
680 psock->saved_unhash = prot->unhash;
681 psock->saved_close = prot->close;
682 psock->saved_write_space = sk->sk_write_space;
683
684 INIT_LIST_HEAD(&psock->link);
685 spin_lock_init(&psock->link_lock);
686
687 INIT_WORK(&psock->work, sk_psock_backlog);
688 mutex_init(&psock->work_mutex);
689 INIT_LIST_HEAD(&psock->ingress_msg);
690 spin_lock_init(&psock->ingress_lock);
691 skb_queue_head_init(&psock->ingress_skb);
692
693 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
694 refcount_set(&psock->refcnt, 1);
695
696 rcu_assign_sk_user_data_nocopy(sk, psock);
697 sock_hold(sk);
698
699out:
700 write_unlock_bh(&sk->sk_callback_lock);
701 return psock;
702}
703EXPORT_SYMBOL_GPL(sk_psock_init);
704
705struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
706{
707 struct sk_psock_link *link;
708
709 spin_lock_bh(&psock->link_lock);
710 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
711 list);
712 if (link)
713 list_del(&link->list);
714 spin_unlock_bh(&psock->link_lock);
715 return link;
716}
717
718static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
719{
720 struct sk_msg *msg, *tmp;
721
722 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
723 list_del(&msg->list);
724 sk_msg_free(psock->sk, msg);
725 kfree(msg);
726 }
727}
728
729static void __sk_psock_zap_ingress(struct sk_psock *psock)
730{
731 struct sk_buff *skb;
732
733 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
734 skb_bpf_redirect_clear(skb);
735 sock_drop(psock->sk, skb);
736 }
737 kfree_skb(psock->work_state.skb);
738 /* We null the skb here to ensure that calls to sk_psock_backlog
739 * do not pick up the free'd skb.
740 */
741 psock->work_state.skb = NULL;
742 __sk_psock_purge_ingress_msg(psock);
743}
744
745static void sk_psock_link_destroy(struct sk_psock *psock)
746{
747 struct sk_psock_link *link, *tmp;
748
749 list_for_each_entry_safe(link, tmp, &psock->link, list) {
750 list_del(&link->list);
751 sk_psock_free_link(link);
752 }
753}
754
755void sk_psock_stop(struct sk_psock *psock, bool wait)
756{
757 spin_lock_bh(&psock->ingress_lock);
758 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
759 sk_psock_cork_free(psock);
760 __sk_psock_zap_ingress(psock);
761 spin_unlock_bh(&psock->ingress_lock);
762
763 if (wait)
764 cancel_work_sync(&psock->work);
765}
766
767static void sk_psock_done_strp(struct sk_psock *psock);
768
769static void sk_psock_destroy(struct work_struct *work)
770{
771 struct sk_psock *psock = container_of(to_rcu_work(work),
772 struct sk_psock, rwork);
773 /* No sk_callback_lock since already detached. */
774
775 sk_psock_done_strp(psock);
776
777 cancel_work_sync(&psock->work);
778 mutex_destroy(&psock->work_mutex);
779
780 psock_progs_drop(&psock->progs);
781
782 sk_psock_link_destroy(psock);
783 sk_psock_cork_free(psock);
784
785 if (psock->sk_redir)
786 sock_put(psock->sk_redir);
787 sock_put(psock->sk);
788 kfree(psock);
789}
790
791void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
792{
793 write_lock_bh(&sk->sk_callback_lock);
794 sk_psock_restore_proto(sk, psock);
795 rcu_assign_sk_user_data(sk, NULL);
796 if (psock->progs.stream_parser)
797 sk_psock_stop_strp(sk, psock);
798 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
799 sk_psock_stop_verdict(sk, psock);
800 write_unlock_bh(&sk->sk_callback_lock);
801
802 sk_psock_stop(psock, false);
803
804 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
805 queue_rcu_work(system_wq, &psock->rwork);
806}
807EXPORT_SYMBOL_GPL(sk_psock_drop);
808
809static int sk_psock_map_verd(int verdict, bool redir)
810{
811 switch (verdict) {
812 case SK_PASS:
813 return redir ? __SK_REDIRECT : __SK_PASS;
814 case SK_DROP:
815 default:
816 break;
817 }
818
819 return __SK_DROP;
820}
821
822int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
823 struct sk_msg *msg)
824{
825 struct bpf_prog *prog;
826 int ret;
827
828 rcu_read_lock();
829 prog = READ_ONCE(psock->progs.msg_parser);
830 if (unlikely(!prog)) {
831 ret = __SK_PASS;
832 goto out;
833 }
834
835 sk_msg_compute_data_pointers(msg);
836 msg->sk = sk;
837 ret = bpf_prog_run_pin_on_cpu(prog, msg);
838 ret = sk_psock_map_verd(ret, msg->sk_redir);
839 psock->apply_bytes = msg->apply_bytes;
840 if (ret == __SK_REDIRECT) {
841 if (psock->sk_redir)
842 sock_put(psock->sk_redir);
843 psock->sk_redir = msg->sk_redir;
844 if (!psock->sk_redir) {
845 ret = __SK_DROP;
846 goto out;
847 }
848 sock_hold(psock->sk_redir);
849 }
850out:
851 rcu_read_unlock();
852 return ret;
853}
854EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
855
856static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
857{
858 struct sk_psock *psock_other;
859 struct sock *sk_other;
860
861 sk_other = skb_bpf_redirect_fetch(skb);
862 /* This error is a buggy BPF program, it returned a redirect
863 * return code, but then didn't set a redirect interface.
864 */
865 if (unlikely(!sk_other)) {
866 sock_drop(from->sk, skb);
867 return -EIO;
868 }
869 psock_other = sk_psock(sk_other);
870 /* This error indicates the socket is being torn down or had another
871 * error that caused the pipe to break. We can't send a packet on
872 * a socket that is in this state so we drop the skb.
873 */
874 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
875 skb_bpf_redirect_clear(skb);
876 sock_drop(from->sk, skb);
877 return -EIO;
878 }
879 spin_lock_bh(&psock_other->ingress_lock);
880 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
881 spin_unlock_bh(&psock_other->ingress_lock);
882 skb_bpf_redirect_clear(skb);
883 sock_drop(from->sk, skb);
884 return -EIO;
885 }
886
887 skb_queue_tail(&psock_other->ingress_skb, skb);
888 schedule_work(&psock_other->work);
889 spin_unlock_bh(&psock_other->ingress_lock);
890 return 0;
891}
892
893static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
894 struct sk_psock *from, int verdict)
895{
896 switch (verdict) {
897 case __SK_REDIRECT:
898 sk_psock_skb_redirect(from, skb);
899 break;
900 case __SK_PASS:
901 case __SK_DROP:
902 default:
903 break;
904 }
905}
906
907int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
908{
909 struct bpf_prog *prog;
910 int ret = __SK_PASS;
911
912 rcu_read_lock();
913 prog = READ_ONCE(psock->progs.stream_verdict);
914 if (likely(prog)) {
915 skb->sk = psock->sk;
916 skb_dst_drop(skb);
917 skb_bpf_redirect_clear(skb);
918 ret = bpf_prog_run_pin_on_cpu(prog, skb);
919 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
920 skb->sk = NULL;
921 }
922 sk_psock_tls_verdict_apply(skb, psock, ret);
923 rcu_read_unlock();
924 return ret;
925}
926EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
927
928static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
929 int verdict)
930{
931 struct sock *sk_other;
932 int err = 0;
933
934 switch (verdict) {
935 case __SK_PASS:
936 err = -EIO;
937 sk_other = psock->sk;
938 if (sock_flag(sk_other, SOCK_DEAD) ||
939 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
940 goto out_free;
941 }
942
943 skb_bpf_set_ingress(skb);
944
945 /* If the queue is empty then we can submit directly
946 * into the msg queue. If its not empty we have to
947 * queue work otherwise we may get OOO data. Otherwise,
948 * if sk_psock_skb_ingress errors will be handled by
949 * retrying later from workqueue.
950 */
951 if (skb_queue_empty(&psock->ingress_skb)) {
952 err = sk_psock_skb_ingress_self(psock, skb);
953 }
954 if (err < 0) {
955 spin_lock_bh(&psock->ingress_lock);
956 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
957 skb_queue_tail(&psock->ingress_skb, skb);
958 schedule_work(&psock->work);
959 err = 0;
960 }
961 spin_unlock_bh(&psock->ingress_lock);
962 if (err < 0) {
963 skb_bpf_redirect_clear(skb);
964 goto out_free;
965 }
966 }
967 break;
968 case __SK_REDIRECT:
969 err = sk_psock_skb_redirect(psock, skb);
970 break;
971 case __SK_DROP:
972 default:
973out_free:
974 sock_drop(psock->sk, skb);
975 }
976
977 return err;
978}
979
980static void sk_psock_write_space(struct sock *sk)
981{
982 struct sk_psock *psock;
983 void (*write_space)(struct sock *sk) = NULL;
984
985 rcu_read_lock();
986 psock = sk_psock(sk);
987 if (likely(psock)) {
988 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
989 schedule_work(&psock->work);
990 write_space = psock->saved_write_space;
991 }
992 rcu_read_unlock();
993 if (write_space)
994 write_space(sk);
995}
996
997#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
998static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
999{
1000 struct sk_psock *psock;
1001 struct bpf_prog *prog;
1002 int ret = __SK_DROP;
1003 struct sock *sk;
1004
1005 rcu_read_lock();
1006 sk = strp->sk;
1007 psock = sk_psock(sk);
1008 if (unlikely(!psock)) {
1009 sock_drop(sk, skb);
1010 goto out;
1011 }
1012 prog = READ_ONCE(psock->progs.stream_verdict);
1013 if (likely(prog)) {
1014 skb->sk = sk;
1015 skb_dst_drop(skb);
1016 skb_bpf_redirect_clear(skb);
1017 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1018 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1019 skb->sk = NULL;
1020 }
1021 sk_psock_verdict_apply(psock, skb, ret);
1022out:
1023 rcu_read_unlock();
1024}
1025
1026static int sk_psock_strp_read_done(struct strparser *strp, int err)
1027{
1028 return err;
1029}
1030
1031static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1032{
1033 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1034 struct bpf_prog *prog;
1035 int ret = skb->len;
1036
1037 rcu_read_lock();
1038 prog = READ_ONCE(psock->progs.stream_parser);
1039 if (likely(prog)) {
1040 skb->sk = psock->sk;
1041 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1042 skb->sk = NULL;
1043 }
1044 rcu_read_unlock();
1045 return ret;
1046}
1047
1048/* Called with socket lock held. */
1049static void sk_psock_strp_data_ready(struct sock *sk)
1050{
1051 struct sk_psock *psock;
1052
1053 rcu_read_lock();
1054 psock = sk_psock(sk);
1055 if (likely(psock)) {
1056 if (tls_sw_has_ctx_rx(sk)) {
1057 psock->saved_data_ready(sk);
1058 } else {
1059 write_lock_bh(&sk->sk_callback_lock);
1060 strp_data_ready(&psock->strp);
1061 write_unlock_bh(&sk->sk_callback_lock);
1062 }
1063 }
1064 rcu_read_unlock();
1065}
1066
1067int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1068{
1069 static const struct strp_callbacks cb = {
1070 .rcv_msg = sk_psock_strp_read,
1071 .read_sock_done = sk_psock_strp_read_done,
1072 .parse_msg = sk_psock_strp_parse,
1073 };
1074
1075 return strp_init(&psock->strp, sk, &cb);
1076}
1077
1078void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1079{
1080 if (psock->saved_data_ready)
1081 return;
1082
1083 psock->saved_data_ready = sk->sk_data_ready;
1084 sk->sk_data_ready = sk_psock_strp_data_ready;
1085 sk->sk_write_space = sk_psock_write_space;
1086}
1087
1088void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1089{
1090 if (!psock->saved_data_ready)
1091 return;
1092
1093 sk->sk_data_ready = psock->saved_data_ready;
1094 psock->saved_data_ready = NULL;
1095 strp_stop(&psock->strp);
1096}
1097
1098static void sk_psock_done_strp(struct sk_psock *psock)
1099{
1100 /* Parser has been stopped */
1101 if (psock->progs.stream_parser)
1102 strp_done(&psock->strp);
1103}
1104#else
1105static void sk_psock_done_strp(struct sk_psock *psock)
1106{
1107}
1108#endif /* CONFIG_BPF_STREAM_PARSER */
1109
1110static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1111 unsigned int offset, size_t orig_len)
1112{
1113 struct sock *sk = (struct sock *)desc->arg.data;
1114 struct sk_psock *psock;
1115 struct bpf_prog *prog;
1116 int ret = __SK_DROP;
1117 int len = skb->len;
1118
1119 /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1120 skb = skb_clone(skb, GFP_ATOMIC);
1121 if (!skb) {
1122 desc->error = -ENOMEM;
1123 return 0;
1124 }
1125
1126 rcu_read_lock();
1127 psock = sk_psock(sk);
1128 if (unlikely(!psock)) {
1129 len = 0;
1130 sock_drop(sk, skb);
1131 goto out;
1132 }
1133 prog = READ_ONCE(psock->progs.stream_verdict);
1134 if (!prog)
1135 prog = READ_ONCE(psock->progs.skb_verdict);
1136 if (likely(prog)) {
1137 skb->sk = sk;
1138 skb_dst_drop(skb);
1139 skb_bpf_redirect_clear(skb);
1140 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1141 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1142 skb->sk = NULL;
1143 }
1144 if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1145 len = 0;
1146out:
1147 rcu_read_unlock();
1148 return len;
1149}
1150
1151static void sk_psock_verdict_data_ready(struct sock *sk)
1152{
1153 struct socket *sock = sk->sk_socket;
1154 read_descriptor_t desc;
1155
1156 if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1157 return;
1158
1159 desc.arg.data = sk;
1160 desc.error = 0;
1161 desc.count = 1;
1162
1163 sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1164}
1165
1166void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1167{
1168 if (psock->saved_data_ready)
1169 return;
1170
1171 psock->saved_data_ready = sk->sk_data_ready;
1172 sk->sk_data_ready = sk_psock_verdict_data_ready;
1173 sk->sk_write_space = sk_psock_write_space;
1174}
1175
1176void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1177{
1178 if (!psock->saved_data_ready)
1179 return;
1180
1181 sk->sk_data_ready = psock->saved_data_ready;
1182 psock->saved_data_ready = NULL;
1183}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
10#include <net/tls.h>
11#include <trace/events/sock.h>
12
13static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
14{
15 if (msg->sg.end > msg->sg.start &&
16 elem_first_coalesce < msg->sg.end)
17 return true;
18
19 if (msg->sg.end < msg->sg.start &&
20 (elem_first_coalesce > msg->sg.start ||
21 elem_first_coalesce < msg->sg.end))
22 return true;
23
24 return false;
25}
26
27int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
28 int elem_first_coalesce)
29{
30 struct page_frag *pfrag = sk_page_frag(sk);
31 u32 osize = msg->sg.size;
32 int ret = 0;
33
34 len -= msg->sg.size;
35 while (len > 0) {
36 struct scatterlist *sge;
37 u32 orig_offset;
38 int use, i;
39
40 if (!sk_page_frag_refill(sk, pfrag)) {
41 ret = -ENOMEM;
42 goto msg_trim;
43 }
44
45 orig_offset = pfrag->offset;
46 use = min_t(int, len, pfrag->size - orig_offset);
47 if (!sk_wmem_schedule(sk, use)) {
48 ret = -ENOMEM;
49 goto msg_trim;
50 }
51
52 i = msg->sg.end;
53 sk_msg_iter_var_prev(i);
54 sge = &msg->sg.data[i];
55
56 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
57 sg_page(sge) == pfrag->page &&
58 sge->offset + sge->length == orig_offset) {
59 sge->length += use;
60 } else {
61 if (sk_msg_full(msg)) {
62 ret = -ENOSPC;
63 break;
64 }
65
66 sge = &msg->sg.data[msg->sg.end];
67 sg_unmark_end(sge);
68 sg_set_page(sge, pfrag->page, use, orig_offset);
69 get_page(pfrag->page);
70 sk_msg_iter_next(msg, end);
71 }
72
73 sk_mem_charge(sk, use);
74 msg->sg.size += use;
75 pfrag->offset += use;
76 len -= use;
77 }
78
79 return ret;
80
81msg_trim:
82 sk_msg_trim(sk, msg, osize);
83 return ret;
84}
85EXPORT_SYMBOL_GPL(sk_msg_alloc);
86
87int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
88 u32 off, u32 len)
89{
90 int i = src->sg.start;
91 struct scatterlist *sge = sk_msg_elem(src, i);
92 struct scatterlist *sgd = NULL;
93 u32 sge_len, sge_off;
94
95 while (off) {
96 if (sge->length > off)
97 break;
98 off -= sge->length;
99 sk_msg_iter_var_next(i);
100 if (i == src->sg.end && off)
101 return -ENOSPC;
102 sge = sk_msg_elem(src, i);
103 }
104
105 while (len) {
106 sge_len = sge->length - off;
107 if (sge_len > len)
108 sge_len = len;
109
110 if (dst->sg.end)
111 sgd = sk_msg_elem(dst, dst->sg.end - 1);
112
113 if (sgd &&
114 (sg_page(sge) == sg_page(sgd)) &&
115 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
116 sgd->length += sge_len;
117 dst->sg.size += sge_len;
118 } else if (!sk_msg_full(dst)) {
119 sge_off = sge->offset + off;
120 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
121 } else {
122 return -ENOSPC;
123 }
124
125 off = 0;
126 len -= sge_len;
127 sk_mem_charge(sk, sge_len);
128 sk_msg_iter_var_next(i);
129 if (i == src->sg.end && len)
130 return -ENOSPC;
131 sge = sk_msg_elem(src, i);
132 }
133
134 return 0;
135}
136EXPORT_SYMBOL_GPL(sk_msg_clone);
137
138void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
139{
140 int i = msg->sg.start;
141
142 do {
143 struct scatterlist *sge = sk_msg_elem(msg, i);
144
145 if (bytes < sge->length) {
146 sge->length -= bytes;
147 sge->offset += bytes;
148 sk_mem_uncharge(sk, bytes);
149 break;
150 }
151
152 sk_mem_uncharge(sk, sge->length);
153 bytes -= sge->length;
154 sge->length = 0;
155 sge->offset = 0;
156 sk_msg_iter_var_next(i);
157 } while (bytes && i != msg->sg.end);
158 msg->sg.start = i;
159}
160EXPORT_SYMBOL_GPL(sk_msg_return_zero);
161
162void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
163{
164 int i = msg->sg.start;
165
166 do {
167 struct scatterlist *sge = &msg->sg.data[i];
168 int uncharge = (bytes < sge->length) ? bytes : sge->length;
169
170 sk_mem_uncharge(sk, uncharge);
171 bytes -= uncharge;
172 sk_msg_iter_var_next(i);
173 } while (i != msg->sg.end);
174}
175EXPORT_SYMBOL_GPL(sk_msg_return);
176
177static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
178 bool charge)
179{
180 struct scatterlist *sge = sk_msg_elem(msg, i);
181 u32 len = sge->length;
182
183 /* When the skb owns the memory we free it from consume_skb path. */
184 if (!msg->skb) {
185 if (charge)
186 sk_mem_uncharge(sk, len);
187 put_page(sg_page(sge));
188 }
189 memset(sge, 0, sizeof(*sge));
190 return len;
191}
192
193static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
194 bool charge)
195{
196 struct scatterlist *sge = sk_msg_elem(msg, i);
197 int freed = 0;
198
199 while (msg->sg.size) {
200 msg->sg.size -= sge->length;
201 freed += sk_msg_free_elem(sk, msg, i, charge);
202 sk_msg_iter_var_next(i);
203 sk_msg_check_to_free(msg, i, msg->sg.size);
204 sge = sk_msg_elem(msg, i);
205 }
206 consume_skb(msg->skb);
207 sk_msg_init(msg);
208 return freed;
209}
210
211int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
212{
213 return __sk_msg_free(sk, msg, msg->sg.start, false);
214}
215EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
216
217int sk_msg_free(struct sock *sk, struct sk_msg *msg)
218{
219 return __sk_msg_free(sk, msg, msg->sg.start, true);
220}
221EXPORT_SYMBOL_GPL(sk_msg_free);
222
223static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
224 u32 bytes, bool charge)
225{
226 struct scatterlist *sge;
227 u32 i = msg->sg.start;
228
229 while (bytes) {
230 sge = sk_msg_elem(msg, i);
231 if (!sge->length)
232 break;
233 if (bytes < sge->length) {
234 if (charge)
235 sk_mem_uncharge(sk, bytes);
236 sge->length -= bytes;
237 sge->offset += bytes;
238 msg->sg.size -= bytes;
239 break;
240 }
241
242 msg->sg.size -= sge->length;
243 bytes -= sge->length;
244 sk_msg_free_elem(sk, msg, i, charge);
245 sk_msg_iter_var_next(i);
246 sk_msg_check_to_free(msg, i, bytes);
247 }
248 msg->sg.start = i;
249}
250
251void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
252{
253 __sk_msg_free_partial(sk, msg, bytes, true);
254}
255EXPORT_SYMBOL_GPL(sk_msg_free_partial);
256
257void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
258 u32 bytes)
259{
260 __sk_msg_free_partial(sk, msg, bytes, false);
261}
262
263void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
264{
265 int trim = msg->sg.size - len;
266 u32 i = msg->sg.end;
267
268 if (trim <= 0) {
269 WARN_ON(trim < 0);
270 return;
271 }
272
273 sk_msg_iter_var_prev(i);
274 msg->sg.size = len;
275 while (msg->sg.data[i].length &&
276 trim >= msg->sg.data[i].length) {
277 trim -= msg->sg.data[i].length;
278 sk_msg_free_elem(sk, msg, i, true);
279 sk_msg_iter_var_prev(i);
280 if (!trim)
281 goto out;
282 }
283
284 msg->sg.data[i].length -= trim;
285 sk_mem_uncharge(sk, trim);
286 /* Adjust copybreak if it falls into the trimmed part of last buf */
287 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
288 msg->sg.copybreak = msg->sg.data[i].length;
289out:
290 sk_msg_iter_var_next(i);
291 msg->sg.end = i;
292
293 /* If we trim data a full sg elem before curr pointer update
294 * copybreak and current so that any future copy operations
295 * start at new copy location.
296 * However trimed data that has not yet been used in a copy op
297 * does not require an update.
298 */
299 if (!msg->sg.size) {
300 msg->sg.curr = msg->sg.start;
301 msg->sg.copybreak = 0;
302 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
303 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
304 sk_msg_iter_var_prev(i);
305 msg->sg.curr = i;
306 msg->sg.copybreak = msg->sg.data[i].length;
307 }
308}
309EXPORT_SYMBOL_GPL(sk_msg_trim);
310
311int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
312 struct sk_msg *msg, u32 bytes)
313{
314 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
315 const int to_max_pages = MAX_MSG_FRAGS;
316 struct page *pages[MAX_MSG_FRAGS];
317 ssize_t orig, copied, use, offset;
318
319 orig = msg->sg.size;
320 while (bytes > 0) {
321 i = 0;
322 maxpages = to_max_pages - num_elems;
323 if (maxpages == 0) {
324 ret = -EFAULT;
325 goto out;
326 }
327
328 copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
329 &offset);
330 if (copied <= 0) {
331 ret = -EFAULT;
332 goto out;
333 }
334
335 bytes -= copied;
336 msg->sg.size += copied;
337
338 while (copied) {
339 use = min_t(int, copied, PAGE_SIZE - offset);
340 sg_set_page(&msg->sg.data[msg->sg.end],
341 pages[i], use, offset);
342 sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 sk_mem_charge(sk, use);
344
345 offset = 0;
346 copied -= use;
347 sk_msg_iter_next(msg, end);
348 num_elems++;
349 i++;
350 }
351 /* When zerocopy is mixed with sk_msg_*copy* operations we
352 * may have a copybreak set in this case clear and prefer
353 * zerocopy remainder when possible.
354 */
355 msg->sg.copybreak = 0;
356 msg->sg.curr = msg->sg.end;
357 }
358out:
359 /* Revert iov_iter updates, msg will need to use 'trim' later if it
360 * also needs to be cleared.
361 */
362 if (ret)
363 iov_iter_revert(from, msg->sg.size - orig);
364 return ret;
365}
366EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367
368int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 struct sk_msg *msg, u32 bytes)
370{
371 int ret = -ENOSPC, i = msg->sg.curr;
372 struct scatterlist *sge;
373 u32 copy, buf_size;
374 void *to;
375
376 do {
377 sge = sk_msg_elem(msg, i);
378 /* This is possible if a trim operation shrunk the buffer */
379 if (msg->sg.copybreak >= sge->length) {
380 msg->sg.copybreak = 0;
381 sk_msg_iter_var_next(i);
382 if (i == msg->sg.end)
383 break;
384 sge = sk_msg_elem(msg, i);
385 }
386
387 buf_size = sge->length - msg->sg.copybreak;
388 copy = (buf_size > bytes) ? bytes : buf_size;
389 to = sg_virt(sge) + msg->sg.copybreak;
390 msg->sg.copybreak += copy;
391 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 ret = copy_from_iter_nocache(to, copy, from);
393 else
394 ret = copy_from_iter(to, copy, from);
395 if (ret != copy) {
396 ret = -EFAULT;
397 goto out;
398 }
399 bytes -= copy;
400 if (!bytes)
401 break;
402 msg->sg.copybreak = 0;
403 sk_msg_iter_var_next(i);
404 } while (i != msg->sg.end);
405out:
406 msg->sg.curr = i;
407 return ret;
408}
409EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
410
411/* Receive sk_msg from psock->ingress_msg to @msg. */
412int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
413 int len, int flags)
414{
415 struct iov_iter *iter = &msg->msg_iter;
416 int peek = flags & MSG_PEEK;
417 struct sk_msg *msg_rx;
418 int i, copied = 0;
419
420 msg_rx = sk_psock_peek_msg(psock);
421 while (copied != len) {
422 struct scatterlist *sge;
423
424 if (unlikely(!msg_rx))
425 break;
426
427 i = msg_rx->sg.start;
428 do {
429 struct page *page;
430 int copy;
431
432 sge = sk_msg_elem(msg_rx, i);
433 copy = sge->length;
434 page = sg_page(sge);
435 if (copied + copy > len)
436 copy = len - copied;
437 copy = copy_page_to_iter(page, sge->offset, copy, iter);
438 if (!copy) {
439 copied = copied ? copied : -EFAULT;
440 goto out;
441 }
442
443 copied += copy;
444 if (likely(!peek)) {
445 sge->offset += copy;
446 sge->length -= copy;
447 if (!msg_rx->skb)
448 sk_mem_uncharge(sk, copy);
449 msg_rx->sg.size -= copy;
450
451 if (!sge->length) {
452 sk_msg_iter_var_next(i);
453 if (!msg_rx->skb)
454 put_page(page);
455 }
456 } else {
457 /* Lets not optimize peek case if copy_page_to_iter
458 * didn't copy the entire length lets just break.
459 */
460 if (copy != sge->length)
461 goto out;
462 sk_msg_iter_var_next(i);
463 }
464
465 if (copied == len)
466 break;
467 } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
468
469 if (unlikely(peek)) {
470 msg_rx = sk_psock_next_msg(psock, msg_rx);
471 if (!msg_rx)
472 break;
473 continue;
474 }
475
476 msg_rx->sg.start = i;
477 if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
478 msg_rx = sk_psock_dequeue_msg(psock);
479 kfree_sk_msg(msg_rx);
480 }
481 msg_rx = sk_psock_peek_msg(psock);
482 }
483out:
484 return copied;
485}
486EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
487
488bool sk_msg_is_readable(struct sock *sk)
489{
490 struct sk_psock *psock;
491 bool empty = true;
492
493 rcu_read_lock();
494 psock = sk_psock(sk);
495 if (likely(psock))
496 empty = list_empty(&psock->ingress_msg);
497 rcu_read_unlock();
498 return !empty;
499}
500EXPORT_SYMBOL_GPL(sk_msg_is_readable);
501
502static struct sk_msg *alloc_sk_msg(gfp_t gfp)
503{
504 struct sk_msg *msg;
505
506 msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
507 if (unlikely(!msg))
508 return NULL;
509 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
510 return msg;
511}
512
513static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
514 struct sk_buff *skb)
515{
516 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
517 return NULL;
518
519 if (!sk_rmem_schedule(sk, skb, skb->truesize))
520 return NULL;
521
522 return alloc_sk_msg(GFP_KERNEL);
523}
524
525static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
526 u32 off, u32 len,
527 struct sk_psock *psock,
528 struct sock *sk,
529 struct sk_msg *msg)
530{
531 int num_sge, copied;
532
533 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
534 if (num_sge < 0) {
535 /* skb linearize may fail with ENOMEM, but lets simply try again
536 * later if this happens. Under memory pressure we don't want to
537 * drop the skb. We need to linearize the skb so that the mapping
538 * in skb_to_sgvec can not error.
539 */
540 if (skb_linearize(skb))
541 return -EAGAIN;
542
543 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
544 if (unlikely(num_sge < 0))
545 return num_sge;
546 }
547
548 copied = len;
549 msg->sg.start = 0;
550 msg->sg.size = copied;
551 msg->sg.end = num_sge;
552 msg->skb = skb;
553
554 sk_psock_queue_msg(psock, msg);
555 sk_psock_data_ready(sk, psock);
556 return copied;
557}
558
559static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
560 u32 off, u32 len);
561
562static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
563 u32 off, u32 len)
564{
565 struct sock *sk = psock->sk;
566 struct sk_msg *msg;
567 int err;
568
569 /* If we are receiving on the same sock skb->sk is already assigned,
570 * skip memory accounting and owner transition seeing it already set
571 * correctly.
572 */
573 if (unlikely(skb->sk == sk))
574 return sk_psock_skb_ingress_self(psock, skb, off, len);
575 msg = sk_psock_create_ingress_msg(sk, skb);
576 if (!msg)
577 return -EAGAIN;
578
579 /* This will transition ownership of the data from the socket where
580 * the BPF program was run initiating the redirect to the socket
581 * we will eventually receive this data on. The data will be released
582 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
583 * into user buffers.
584 */
585 skb_set_owner_r(skb, sk);
586 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
587 if (err < 0)
588 kfree(msg);
589 return err;
590}
591
592/* Puts an skb on the ingress queue of the socket already assigned to the
593 * skb. In this case we do not need to check memory limits or skb_set_owner_r
594 * because the skb is already accounted for here.
595 */
596static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
597 u32 off, u32 len)
598{
599 struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
600 struct sock *sk = psock->sk;
601 int err;
602
603 if (unlikely(!msg))
604 return -EAGAIN;
605 skb_set_owner_r(skb, sk);
606 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
607 if (err < 0)
608 kfree(msg);
609 return err;
610}
611
612static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
613 u32 off, u32 len, bool ingress)
614{
615 int err = 0;
616
617 if (!ingress) {
618 if (!sock_writeable(psock->sk))
619 return -EAGAIN;
620 return skb_send_sock(psock->sk, skb, off, len);
621 }
622 skb_get(skb);
623 err = sk_psock_skb_ingress(psock, skb, off, len);
624 if (err < 0)
625 kfree_skb(skb);
626 return err;
627}
628
629static void sk_psock_skb_state(struct sk_psock *psock,
630 struct sk_psock_work_state *state,
631 int len, int off)
632{
633 spin_lock_bh(&psock->ingress_lock);
634 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
635 state->len = len;
636 state->off = off;
637 }
638 spin_unlock_bh(&psock->ingress_lock);
639}
640
641static void sk_psock_backlog(struct work_struct *work)
642{
643 struct delayed_work *dwork = to_delayed_work(work);
644 struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
645 struct sk_psock_work_state *state = &psock->work_state;
646 struct sk_buff *skb = NULL;
647 u32 len = 0, off = 0;
648 bool ingress;
649 int ret;
650
651 mutex_lock(&psock->work_mutex);
652 if (unlikely(state->len)) {
653 len = state->len;
654 off = state->off;
655 }
656
657 while ((skb = skb_peek(&psock->ingress_skb))) {
658 len = skb->len;
659 off = 0;
660 if (skb_bpf_strparser(skb)) {
661 struct strp_msg *stm = strp_msg(skb);
662
663 off = stm->offset;
664 len = stm->full_len;
665 }
666 ingress = skb_bpf_ingress(skb);
667 skb_bpf_redirect_clear(skb);
668 do {
669 ret = -EIO;
670 if (!sock_flag(psock->sk, SOCK_DEAD))
671 ret = sk_psock_handle_skb(psock, skb, off,
672 len, ingress);
673 if (ret <= 0) {
674 if (ret == -EAGAIN) {
675 sk_psock_skb_state(psock, state, len, off);
676
677 /* Delay slightly to prioritize any
678 * other work that might be here.
679 */
680 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
681 schedule_delayed_work(&psock->work, 1);
682 goto end;
683 }
684 /* Hard errors break pipe and stop xmit. */
685 sk_psock_report_error(psock, ret ? -ret : EPIPE);
686 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
687 goto end;
688 }
689 off += ret;
690 len -= ret;
691 } while (len);
692
693 skb = skb_dequeue(&psock->ingress_skb);
694 kfree_skb(skb);
695 }
696end:
697 mutex_unlock(&psock->work_mutex);
698}
699
700struct sk_psock *sk_psock_init(struct sock *sk, int node)
701{
702 struct sk_psock *psock;
703 struct proto *prot;
704
705 write_lock_bh(&sk->sk_callback_lock);
706
707 if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
708 psock = ERR_PTR(-EINVAL);
709 goto out;
710 }
711
712 if (sk->sk_user_data) {
713 psock = ERR_PTR(-EBUSY);
714 goto out;
715 }
716
717 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
718 if (!psock) {
719 psock = ERR_PTR(-ENOMEM);
720 goto out;
721 }
722
723 prot = READ_ONCE(sk->sk_prot);
724 psock->sk = sk;
725 psock->eval = __SK_NONE;
726 psock->sk_proto = prot;
727 psock->saved_unhash = prot->unhash;
728 psock->saved_destroy = prot->destroy;
729 psock->saved_close = prot->close;
730 psock->saved_write_space = sk->sk_write_space;
731
732 INIT_LIST_HEAD(&psock->link);
733 spin_lock_init(&psock->link_lock);
734
735 INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
736 mutex_init(&psock->work_mutex);
737 INIT_LIST_HEAD(&psock->ingress_msg);
738 spin_lock_init(&psock->ingress_lock);
739 skb_queue_head_init(&psock->ingress_skb);
740
741 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
742 refcount_set(&psock->refcnt, 1);
743
744 __rcu_assign_sk_user_data_with_flags(sk, psock,
745 SK_USER_DATA_NOCOPY |
746 SK_USER_DATA_PSOCK);
747 sock_hold(sk);
748
749out:
750 write_unlock_bh(&sk->sk_callback_lock);
751 return psock;
752}
753EXPORT_SYMBOL_GPL(sk_psock_init);
754
755struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
756{
757 struct sk_psock_link *link;
758
759 spin_lock_bh(&psock->link_lock);
760 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
761 list);
762 if (link)
763 list_del(&link->list);
764 spin_unlock_bh(&psock->link_lock);
765 return link;
766}
767
768static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
769{
770 struct sk_msg *msg, *tmp;
771
772 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
773 list_del(&msg->list);
774 sk_msg_free(psock->sk, msg);
775 kfree(msg);
776 }
777}
778
779static void __sk_psock_zap_ingress(struct sk_psock *psock)
780{
781 struct sk_buff *skb;
782
783 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
784 skb_bpf_redirect_clear(skb);
785 sock_drop(psock->sk, skb);
786 }
787 __sk_psock_purge_ingress_msg(psock);
788}
789
790static void sk_psock_link_destroy(struct sk_psock *psock)
791{
792 struct sk_psock_link *link, *tmp;
793
794 list_for_each_entry_safe(link, tmp, &psock->link, list) {
795 list_del(&link->list);
796 sk_psock_free_link(link);
797 }
798}
799
800void sk_psock_stop(struct sk_psock *psock)
801{
802 spin_lock_bh(&psock->ingress_lock);
803 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
804 sk_psock_cork_free(psock);
805 spin_unlock_bh(&psock->ingress_lock);
806}
807
808static void sk_psock_done_strp(struct sk_psock *psock);
809
810static void sk_psock_destroy(struct work_struct *work)
811{
812 struct sk_psock *psock = container_of(to_rcu_work(work),
813 struct sk_psock, rwork);
814 /* No sk_callback_lock since already detached. */
815
816 sk_psock_done_strp(psock);
817
818 cancel_delayed_work_sync(&psock->work);
819 __sk_psock_zap_ingress(psock);
820 mutex_destroy(&psock->work_mutex);
821
822 psock_progs_drop(&psock->progs);
823
824 sk_psock_link_destroy(psock);
825 sk_psock_cork_free(psock);
826
827 if (psock->sk_redir)
828 sock_put(psock->sk_redir);
829 if (psock->sk_pair)
830 sock_put(psock->sk_pair);
831 sock_put(psock->sk);
832 kfree(psock);
833}
834
835void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
836{
837 write_lock_bh(&sk->sk_callback_lock);
838 sk_psock_restore_proto(sk, psock);
839 rcu_assign_sk_user_data(sk, NULL);
840 if (psock->progs.stream_parser)
841 sk_psock_stop_strp(sk, psock);
842 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
843 sk_psock_stop_verdict(sk, psock);
844 write_unlock_bh(&sk->sk_callback_lock);
845
846 sk_psock_stop(psock);
847
848 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
849 queue_rcu_work(system_wq, &psock->rwork);
850}
851EXPORT_SYMBOL_GPL(sk_psock_drop);
852
853static int sk_psock_map_verd(int verdict, bool redir)
854{
855 switch (verdict) {
856 case SK_PASS:
857 return redir ? __SK_REDIRECT : __SK_PASS;
858 case SK_DROP:
859 default:
860 break;
861 }
862
863 return __SK_DROP;
864}
865
866int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
867 struct sk_msg *msg)
868{
869 struct bpf_prog *prog;
870 int ret;
871
872 rcu_read_lock();
873 prog = READ_ONCE(psock->progs.msg_parser);
874 if (unlikely(!prog)) {
875 ret = __SK_PASS;
876 goto out;
877 }
878
879 sk_msg_compute_data_pointers(msg);
880 msg->sk = sk;
881 ret = bpf_prog_run_pin_on_cpu(prog, msg);
882 ret = sk_psock_map_verd(ret, msg->sk_redir);
883 psock->apply_bytes = msg->apply_bytes;
884 if (ret == __SK_REDIRECT) {
885 if (psock->sk_redir) {
886 sock_put(psock->sk_redir);
887 psock->sk_redir = NULL;
888 }
889 if (!msg->sk_redir) {
890 ret = __SK_DROP;
891 goto out;
892 }
893 psock->redir_ingress = sk_msg_to_ingress(msg);
894 psock->sk_redir = msg->sk_redir;
895 sock_hold(psock->sk_redir);
896 }
897out:
898 rcu_read_unlock();
899 return ret;
900}
901EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
902
903static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
904{
905 struct sk_psock *psock_other;
906 struct sock *sk_other;
907
908 sk_other = skb_bpf_redirect_fetch(skb);
909 /* This error is a buggy BPF program, it returned a redirect
910 * return code, but then didn't set a redirect interface.
911 */
912 if (unlikely(!sk_other)) {
913 skb_bpf_redirect_clear(skb);
914 sock_drop(from->sk, skb);
915 return -EIO;
916 }
917 psock_other = sk_psock(sk_other);
918 /* This error indicates the socket is being torn down or had another
919 * error that caused the pipe to break. We can't send a packet on
920 * a socket that is in this state so we drop the skb.
921 */
922 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
923 skb_bpf_redirect_clear(skb);
924 sock_drop(from->sk, skb);
925 return -EIO;
926 }
927 spin_lock_bh(&psock_other->ingress_lock);
928 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
929 spin_unlock_bh(&psock_other->ingress_lock);
930 skb_bpf_redirect_clear(skb);
931 sock_drop(from->sk, skb);
932 return -EIO;
933 }
934
935 skb_queue_tail(&psock_other->ingress_skb, skb);
936 schedule_delayed_work(&psock_other->work, 0);
937 spin_unlock_bh(&psock_other->ingress_lock);
938 return 0;
939}
940
941static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
942 struct sk_psock *from, int verdict)
943{
944 switch (verdict) {
945 case __SK_REDIRECT:
946 sk_psock_skb_redirect(from, skb);
947 break;
948 case __SK_PASS:
949 case __SK_DROP:
950 default:
951 break;
952 }
953}
954
955int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
956{
957 struct bpf_prog *prog;
958 int ret = __SK_PASS;
959
960 rcu_read_lock();
961 prog = READ_ONCE(psock->progs.stream_verdict);
962 if (likely(prog)) {
963 skb->sk = psock->sk;
964 skb_dst_drop(skb);
965 skb_bpf_redirect_clear(skb);
966 ret = bpf_prog_run_pin_on_cpu(prog, skb);
967 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
968 skb->sk = NULL;
969 }
970 sk_psock_tls_verdict_apply(skb, psock, ret);
971 rcu_read_unlock();
972 return ret;
973}
974EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
975
976static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
977 int verdict)
978{
979 struct sock *sk_other;
980 int err = 0;
981 u32 len, off;
982
983 switch (verdict) {
984 case __SK_PASS:
985 err = -EIO;
986 sk_other = psock->sk;
987 if (sock_flag(sk_other, SOCK_DEAD) ||
988 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
989 goto out_free;
990
991 skb_bpf_set_ingress(skb);
992
993 /* If the queue is empty then we can submit directly
994 * into the msg queue. If its not empty we have to
995 * queue work otherwise we may get OOO data. Otherwise,
996 * if sk_psock_skb_ingress errors will be handled by
997 * retrying later from workqueue.
998 */
999 if (skb_queue_empty(&psock->ingress_skb)) {
1000 len = skb->len;
1001 off = 0;
1002 if (skb_bpf_strparser(skb)) {
1003 struct strp_msg *stm = strp_msg(skb);
1004
1005 off = stm->offset;
1006 len = stm->full_len;
1007 }
1008 err = sk_psock_skb_ingress_self(psock, skb, off, len);
1009 }
1010 if (err < 0) {
1011 spin_lock_bh(&psock->ingress_lock);
1012 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1013 skb_queue_tail(&psock->ingress_skb, skb);
1014 schedule_delayed_work(&psock->work, 0);
1015 err = 0;
1016 }
1017 spin_unlock_bh(&psock->ingress_lock);
1018 if (err < 0)
1019 goto out_free;
1020 }
1021 break;
1022 case __SK_REDIRECT:
1023 tcp_eat_skb(psock->sk, skb);
1024 err = sk_psock_skb_redirect(psock, skb);
1025 break;
1026 case __SK_DROP:
1027 default:
1028out_free:
1029 skb_bpf_redirect_clear(skb);
1030 tcp_eat_skb(psock->sk, skb);
1031 sock_drop(psock->sk, skb);
1032 }
1033
1034 return err;
1035}
1036
1037static void sk_psock_write_space(struct sock *sk)
1038{
1039 struct sk_psock *psock;
1040 void (*write_space)(struct sock *sk) = NULL;
1041
1042 rcu_read_lock();
1043 psock = sk_psock(sk);
1044 if (likely(psock)) {
1045 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1046 schedule_delayed_work(&psock->work, 0);
1047 write_space = psock->saved_write_space;
1048 }
1049 rcu_read_unlock();
1050 if (write_space)
1051 write_space(sk);
1052}
1053
1054#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1055static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1056{
1057 struct sk_psock *psock;
1058 struct bpf_prog *prog;
1059 int ret = __SK_DROP;
1060 struct sock *sk;
1061
1062 rcu_read_lock();
1063 sk = strp->sk;
1064 psock = sk_psock(sk);
1065 if (unlikely(!psock)) {
1066 sock_drop(sk, skb);
1067 goto out;
1068 }
1069 prog = READ_ONCE(psock->progs.stream_verdict);
1070 if (likely(prog)) {
1071 skb->sk = sk;
1072 skb_dst_drop(skb);
1073 skb_bpf_redirect_clear(skb);
1074 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1075 skb_bpf_set_strparser(skb);
1076 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1077 skb->sk = NULL;
1078 }
1079 sk_psock_verdict_apply(psock, skb, ret);
1080out:
1081 rcu_read_unlock();
1082}
1083
1084static int sk_psock_strp_read_done(struct strparser *strp, int err)
1085{
1086 return err;
1087}
1088
1089static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1090{
1091 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1092 struct bpf_prog *prog;
1093 int ret = skb->len;
1094
1095 rcu_read_lock();
1096 prog = READ_ONCE(psock->progs.stream_parser);
1097 if (likely(prog)) {
1098 skb->sk = psock->sk;
1099 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1100 skb->sk = NULL;
1101 }
1102 rcu_read_unlock();
1103 return ret;
1104}
1105
1106/* Called with socket lock held. */
1107static void sk_psock_strp_data_ready(struct sock *sk)
1108{
1109 struct sk_psock *psock;
1110
1111 trace_sk_data_ready(sk);
1112
1113 rcu_read_lock();
1114 psock = sk_psock(sk);
1115 if (likely(psock)) {
1116 if (tls_sw_has_ctx_rx(sk)) {
1117 psock->saved_data_ready(sk);
1118 } else {
1119 write_lock_bh(&sk->sk_callback_lock);
1120 strp_data_ready(&psock->strp);
1121 write_unlock_bh(&sk->sk_callback_lock);
1122 }
1123 }
1124 rcu_read_unlock();
1125}
1126
1127int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1128{
1129 int ret;
1130
1131 static const struct strp_callbacks cb = {
1132 .rcv_msg = sk_psock_strp_read,
1133 .read_sock_done = sk_psock_strp_read_done,
1134 .parse_msg = sk_psock_strp_parse,
1135 };
1136
1137 ret = strp_init(&psock->strp, sk, &cb);
1138 if (!ret)
1139 sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1140
1141 return ret;
1142}
1143
1144void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1145{
1146 if (psock->saved_data_ready)
1147 return;
1148
1149 psock->saved_data_ready = sk->sk_data_ready;
1150 sk->sk_data_ready = sk_psock_strp_data_ready;
1151 sk->sk_write_space = sk_psock_write_space;
1152}
1153
1154void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1155{
1156 psock_set_prog(&psock->progs.stream_parser, NULL);
1157
1158 if (!psock->saved_data_ready)
1159 return;
1160
1161 sk->sk_data_ready = psock->saved_data_ready;
1162 psock->saved_data_ready = NULL;
1163 strp_stop(&psock->strp);
1164}
1165
1166static void sk_psock_done_strp(struct sk_psock *psock)
1167{
1168 /* Parser has been stopped */
1169 if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1170 strp_done(&psock->strp);
1171}
1172#else
1173static void sk_psock_done_strp(struct sk_psock *psock)
1174{
1175}
1176#endif /* CONFIG_BPF_STREAM_PARSER */
1177
1178static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1179{
1180 struct sk_psock *psock;
1181 struct bpf_prog *prog;
1182 int ret = __SK_DROP;
1183 int len = skb->len;
1184
1185 rcu_read_lock();
1186 psock = sk_psock(sk);
1187 if (unlikely(!psock)) {
1188 len = 0;
1189 tcp_eat_skb(sk, skb);
1190 sock_drop(sk, skb);
1191 goto out;
1192 }
1193 prog = READ_ONCE(psock->progs.stream_verdict);
1194 if (!prog)
1195 prog = READ_ONCE(psock->progs.skb_verdict);
1196 if (likely(prog)) {
1197 skb_dst_drop(skb);
1198 skb_bpf_redirect_clear(skb);
1199 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1200 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1201 }
1202 ret = sk_psock_verdict_apply(psock, skb, ret);
1203 if (ret < 0)
1204 len = ret;
1205out:
1206 rcu_read_unlock();
1207 return len;
1208}
1209
1210static void sk_psock_verdict_data_ready(struct sock *sk)
1211{
1212 struct socket *sock = sk->sk_socket;
1213 const struct proto_ops *ops;
1214 int copied;
1215
1216 trace_sk_data_ready(sk);
1217
1218 if (unlikely(!sock))
1219 return;
1220 ops = READ_ONCE(sock->ops);
1221 if (!ops || !ops->read_skb)
1222 return;
1223 copied = ops->read_skb(sk, sk_psock_verdict_recv);
1224 if (copied >= 0) {
1225 struct sk_psock *psock;
1226
1227 rcu_read_lock();
1228 psock = sk_psock(sk);
1229 if (psock)
1230 sk_psock_data_ready(sk, psock);
1231 rcu_read_unlock();
1232 }
1233}
1234
1235void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1236{
1237 if (psock->saved_data_ready)
1238 return;
1239
1240 psock->saved_data_ready = sk->sk_data_ready;
1241 sk->sk_data_ready = sk_psock_verdict_data_ready;
1242 sk->sk_write_space = sk_psock_write_space;
1243}
1244
1245void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1246{
1247 psock_set_prog(&psock->progs.stream_verdict, NULL);
1248 psock_set_prog(&psock->progs.skb_verdict, NULL);
1249
1250 if (!psock->saved_data_ready)
1251 return;
1252
1253 sk->sk_data_ready = psock->saved_data_ready;
1254 psock->saved_data_ready = NULL;
1255}