Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
  3
  4#include <linux/skmsg.h>
  5#include <linux/filter.h>
  6#include <linux/bpf.h>
  7#include <linux/init.h>
  8#include <linux/wait.h>
  9#include <linux/util_macros.h>
 10
 11#include <net/inet_common.h>
 12#include <net/tls.h>
 13
 14void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
 15{
 16	struct tcp_sock *tcp;
 17	int copied;
 18
 19	if (!skb || !skb->len || !sk_is_tcp(sk))
 20		return;
 21
 22	if (skb_bpf_strparser(skb))
 23		return;
 24
 25	tcp = tcp_sk(sk);
 26	copied = tcp->copied_seq + skb->len;
 27	WRITE_ONCE(tcp->copied_seq, copied);
 28	tcp_rcv_space_adjust(sk);
 29	__tcp_cleanup_rbuf(sk, skb->len);
 30}
 31
 32static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
 33			   struct sk_msg *msg, u32 apply_bytes)
 34{
 35	bool apply = apply_bytes;
 36	struct scatterlist *sge;
 37	u32 size, copied = 0;
 38	struct sk_msg *tmp;
 39	int i, ret = 0;
 40
 41	tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL);
 42	if (unlikely(!tmp))
 43		return -ENOMEM;
 44
 45	lock_sock(sk);
 46	tmp->sg.start = msg->sg.start;
 47	i = msg->sg.start;
 48	do {
 49		sge = sk_msg_elem(msg, i);
 50		size = (apply && apply_bytes < sge->length) ?
 51			apply_bytes : sge->length;
 52		if (!__sk_rmem_schedule(sk, size, false)) {
 53			if (!copied)
 54				ret = -ENOMEM;
 55			break;
 56		}
 57
 58		sk_mem_charge(sk, size);
 59		atomic_add(size, &sk->sk_rmem_alloc);
 60		sk_msg_xfer(tmp, msg, i, size);
 61		copied += size;
 62		if (sge->length)
 63			get_page(sk_msg_page(tmp, i));
 64		sk_msg_iter_var_next(i);
 65		tmp->sg.end = i;
 66		if (apply) {
 67			apply_bytes -= size;
 68			if (!apply_bytes) {
 69				if (sge->length)
 70					sk_msg_iter_var_prev(i);
 71				break;
 72			}
 73		}
 74	} while (i != msg->sg.end);
 75
 76	if (!ret) {
 77		msg->sg.start = i;
 78		if (!sk_psock_queue_msg(psock, tmp))
 79			atomic_sub(copied, &sk->sk_rmem_alloc);
 80		sk_psock_data_ready(sk, psock);
 81	} else {
 82		sk_msg_free(sk, tmp);
 83		kfree(tmp);
 84	}
 85
 86	release_sock(sk);
 87	return ret;
 88}
 89
 90static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
 91			int flags, bool uncharge)
 92{
 93	struct msghdr msghdr = {};
 94	bool apply = apply_bytes;
 95	struct scatterlist *sge;
 96	struct page *page;
 97	int size, ret = 0;
 98	u32 off;
 99
100	while (1) {
101		struct bio_vec bvec;
102		bool has_tx_ulp;
103
104		sge = sk_msg_elem(msg, msg->sg.start);
105		size = (apply && apply_bytes < sge->length) ?
106			apply_bytes : sge->length;
107		off  = sge->offset;
108		page = sg_page(sge);
109
110		tcp_rate_check_app_limited(sk);
111retry:
112		msghdr.msg_flags = flags | MSG_SPLICE_PAGES;
113		has_tx_ulp = tls_sw_has_ctx_tx(sk);
114		if (has_tx_ulp)
115			msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY;
116
117		if (size < sge->length && msg->sg.start != msg->sg.end)
118			msghdr.msg_flags |= MSG_MORE;
119
120		bvec_set_page(&bvec, page, size, off);
121		iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size);
122		ret = tcp_sendmsg_locked(sk, &msghdr, size);
123		if (ret <= 0)
124			return ret;
125
126		if (apply)
127			apply_bytes -= ret;
128		msg->sg.size -= ret;
129		sge->offset += ret;
130		sge->length -= ret;
131		if (uncharge)
132			sk_mem_uncharge(sk, ret);
133		if (ret != size) {
134			size -= ret;
135			off  += ret;
136			goto retry;
137		}
138		if (!sge->length) {
139			put_page(page);
140			sk_msg_iter_next(msg, start);
141			sg_init_table(sge, 1);
142			if (msg->sg.start == msg->sg.end)
143				break;
144		}
145		if (apply && !apply_bytes)
146			break;
147	}
148
149	return 0;
150}
151
152static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
153			       u32 apply_bytes, int flags, bool uncharge)
154{
155	int ret;
156
157	lock_sock(sk);
158	ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
159	release_sock(sk);
160	return ret;
161}
162
163int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
164			  struct sk_msg *msg, u32 bytes, int flags)
165{
166	struct sk_psock *psock = sk_psock_get(sk);
167	int ret;
168
169	if (unlikely(!psock))
170		return -EPIPE;
171
172	ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes) :
173			tcp_bpf_push_locked(sk, msg, bytes, flags, false);
174	sk_psock_put(sk, psock);
175	return ret;
176}
177EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
178
179#ifdef CONFIG_BPF_SYSCALL
180static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
181			     long timeo)
182{
183	DEFINE_WAIT_FUNC(wait, woken_wake_function);
184	int ret = 0;
185
186	if (sk->sk_shutdown & RCV_SHUTDOWN)
187		return 1;
188
189	if (!timeo)
190		return ret;
191
192	add_wait_queue(sk_sleep(sk), &wait);
193	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
194	ret = sk_wait_event(sk, &timeo,
195			    !list_empty(&psock->ingress_msg) ||
196			    !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait);
197	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
198	remove_wait_queue(sk_sleep(sk), &wait);
199	return ret;
200}
201
202static bool is_next_msg_fin(struct sk_psock *psock)
203{
204	struct scatterlist *sge;
205	struct sk_msg *msg_rx;
206	int i;
207
208	msg_rx = sk_psock_peek_msg(psock);
209	i = msg_rx->sg.start;
210	sge = sk_msg_elem(msg_rx, i);
211	if (!sge->length) {
212		struct sk_buff *skb = msg_rx->skb;
213
214		if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
215			return true;
216	}
217	return false;
218}
219
220static int tcp_bpf_recvmsg_parser(struct sock *sk,
221				  struct msghdr *msg,
222				  size_t len,
223				  int flags,
224				  int *addr_len)
225{
 
226	int peek = flags & MSG_PEEK;
 
227	struct sk_psock *psock;
228	struct tcp_sock *tcp;
229	int copied = 0;
230	u32 seq;
231
232	if (unlikely(flags & MSG_ERRQUEUE))
233		return inet_recv_error(sk, msg, len, addr_len);
234
235	if (!len)
236		return 0;
237
238	psock = sk_psock_get(sk);
239	if (unlikely(!psock))
240		return tcp_recvmsg(sk, msg, len, flags, addr_len);
241
242	lock_sock(sk);
243	tcp = tcp_sk(sk);
244	seq = tcp->copied_seq;
245	/* We may have received data on the sk_receive_queue pre-accept and
246	 * then we can not use read_skb in this context because we haven't
247	 * assigned a sk_socket yet so have no link to the ops. The work-around
248	 * is to check the sk_receive_queue and in these cases read skbs off
249	 * queue again. The read_skb hook is not running at this point because
250	 * of lock_sock so we avoid having multiple runners in read_skb.
251	 */
252	if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
253		tcp_data_ready(sk);
254		/* This handles the ENOMEM errors if we both receive data
255		 * pre accept and are already under memory pressure. At least
256		 * let user know to retry.
257		 */
258		if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
259			copied = -EAGAIN;
260			goto out;
261		}
262	}
263
264msg_bytes_ready:
265	copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
266	/* The typical case for EFAULT is the socket was gracefully
267	 * shutdown with a FIN pkt. So check here the other case is
268	 * some error on copy_page_to_iter which would be unexpected.
269	 * On fin return correct return code to zero.
270	 */
271	if (copied == -EFAULT) {
272		bool is_fin = is_next_msg_fin(psock);
273
274		if (is_fin) {
275			copied = 0;
276			seq++;
277			goto out;
278		}
279	}
280	seq += copied;
281	if (!copied) {
282		long timeo;
283		int data;
284
285		if (sock_flag(sk, SOCK_DONE))
286			goto out;
287
288		if (sk->sk_err) {
289			copied = sock_error(sk);
290			goto out;
291		}
292
293		if (sk->sk_shutdown & RCV_SHUTDOWN)
294			goto out;
295
296		if (sk->sk_state == TCP_CLOSE) {
297			copied = -ENOTCONN;
298			goto out;
299		}
300
301		timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
302		if (!timeo) {
303			copied = -EAGAIN;
304			goto out;
305		}
306
307		if (signal_pending(current)) {
308			copied = sock_intr_errno(timeo);
309			goto out;
310		}
311
312		data = tcp_msg_wait_data(sk, psock, timeo);
313		if (data < 0) {
314			copied = data;
315			goto unlock;
316		}
317		if (data && !sk_psock_queue_empty(psock))
318			goto msg_bytes_ready;
319		copied = -EAGAIN;
320	}
321out:
322	if (!peek)
323		WRITE_ONCE(tcp->copied_seq, seq);
324	tcp_rcv_space_adjust(sk);
325	if (copied > 0)
326		__tcp_cleanup_rbuf(sk, copied);
327
328unlock:
329	release_sock(sk);
330	sk_psock_put(sk, psock);
331	return copied;
332}
333
334static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
335			   int flags, int *addr_len)
336{
337	struct sk_psock *psock;
338	int copied, ret;
339
340	if (unlikely(flags & MSG_ERRQUEUE))
341		return inet_recv_error(sk, msg, len, addr_len);
342
343	if (!len)
344		return 0;
345
346	psock = sk_psock_get(sk);
347	if (unlikely(!psock))
348		return tcp_recvmsg(sk, msg, len, flags, addr_len);
349	if (!skb_queue_empty(&sk->sk_receive_queue) &&
350	    sk_psock_queue_empty(psock)) {
351		sk_psock_put(sk, psock);
352		return tcp_recvmsg(sk, msg, len, flags, addr_len);
353	}
354	lock_sock(sk);
355msg_bytes_ready:
356	copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
357	if (!copied) {
358		long timeo;
359		int data;
360
361		timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
362		data = tcp_msg_wait_data(sk, psock, timeo);
363		if (data < 0) {
364			ret = data;
365			goto unlock;
366		}
367		if (data) {
368			if (!sk_psock_queue_empty(psock))
369				goto msg_bytes_ready;
370			release_sock(sk);
371			sk_psock_put(sk, psock);
372			return tcp_recvmsg(sk, msg, len, flags, addr_len);
373		}
374		copied = -EAGAIN;
375	}
376	ret = copied;
377
378unlock:
379	release_sock(sk);
380	sk_psock_put(sk, psock);
381	return ret;
382}
383
384static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
385				struct sk_msg *msg, int *copied, int flags)
386{
387	bool cork = false, enospc = sk_msg_full(msg), redir_ingress;
388	struct sock *sk_redir;
389	u32 tosend, origsize, sent, delta = 0;
390	u32 eval;
391	int ret;
392
393more_data:
394	if (psock->eval == __SK_NONE) {
395		/* Track delta in msg size to add/subtract it on SK_DROP from
396		 * returned to user copied size. This ensures user doesn't
397		 * get a positive return code with msg_cut_data and SK_DROP
398		 * verdict.
399		 */
400		delta = msg->sg.size;
401		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
402		delta -= msg->sg.size;
403	}
404
405	if (msg->cork_bytes &&
406	    msg->cork_bytes > msg->sg.size && !enospc) {
407		psock->cork_bytes = msg->cork_bytes - msg->sg.size;
408		if (!psock->cork) {
409			psock->cork = kzalloc(sizeof(*psock->cork),
410					      GFP_ATOMIC | __GFP_NOWARN);
411			if (!psock->cork)
412				return -ENOMEM;
413		}
414		memcpy(psock->cork, msg, sizeof(*msg));
415		return 0;
416	}
417
418	tosend = msg->sg.size;
419	if (psock->apply_bytes && psock->apply_bytes < tosend)
420		tosend = psock->apply_bytes;
421	eval = __SK_NONE;
422
423	switch (psock->eval) {
424	case __SK_PASS:
425		ret = tcp_bpf_push(sk, msg, tosend, flags, true);
426		if (unlikely(ret)) {
427			*copied -= sk_msg_free(sk, msg);
428			break;
429		}
430		sk_msg_apply_bytes(psock, tosend);
431		break;
432	case __SK_REDIRECT:
433		redir_ingress = psock->redir_ingress;
434		sk_redir = psock->sk_redir;
435		sk_msg_apply_bytes(psock, tosend);
436		if (!psock->apply_bytes) {
437			/* Clean up before releasing the sock lock. */
438			eval = psock->eval;
439			psock->eval = __SK_NONE;
440			psock->sk_redir = NULL;
441		}
442		if (psock->cork) {
443			cork = true;
444			psock->cork = NULL;
445		}
 
446		release_sock(sk);
447
448		origsize = msg->sg.size;
449		ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
450					    msg, tosend, flags);
451		sent = origsize - msg->sg.size;
452
453		if (eval == __SK_REDIRECT)
454			sock_put(sk_redir);
455
456		lock_sock(sk);
457		sk_mem_uncharge(sk, sent);
458		if (unlikely(ret < 0)) {
459			int free = sk_msg_free(sk, msg);
460
461			if (!cork)
462				*copied -= free;
463		}
464		if (cork) {
465			sk_msg_free(sk, msg);
466			kfree(msg);
467			msg = NULL;
468			ret = 0;
469		}
470		break;
471	case __SK_DROP:
472	default:
473		sk_msg_free(sk, msg);
474		sk_msg_apply_bytes(psock, tosend);
475		*copied -= (tosend + delta);
476		return -EACCES;
477	}
478
479	if (likely(!ret)) {
480		if (!psock->apply_bytes) {
481			psock->eval =  __SK_NONE;
482			if (psock->sk_redir) {
483				sock_put(psock->sk_redir);
484				psock->sk_redir = NULL;
485			}
486		}
487		if (msg &&
488		    msg->sg.data[msg->sg.start].page_link &&
489		    msg->sg.data[msg->sg.start].length)
 
 
490			goto more_data;
 
491	}
492	return ret;
493}
494
495static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
496{
497	struct sk_msg tmp, *msg_tx = NULL;
498	int copied = 0, err = 0, ret = 0;
499	struct sk_psock *psock;
500	long timeo;
501	int flags;
502
503	/* Don't let internal flags through */
504	flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
505	flags |= MSG_NO_SHARED_FRAGS;
506
507	psock = sk_psock_get(sk);
508	if (unlikely(!psock))
509		return tcp_sendmsg(sk, msg, size);
510
511	lock_sock(sk);
512	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
513	while (msg_data_left(msg)) {
514		bool enospc = false;
515		u32 copy, osize;
516
517		if (sk->sk_err) {
518			err = -sk->sk_err;
519			goto out_err;
520		}
521
522		copy = msg_data_left(msg);
523		if (!sk_stream_memory_free(sk))
524			goto wait_for_sndbuf;
525		if (psock->cork) {
526			msg_tx = psock->cork;
527		} else {
528			msg_tx = &tmp;
529			sk_msg_init(msg_tx);
530		}
531
532		osize = msg_tx->sg.size;
533		err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1);
534		if (err) {
535			if (err != -ENOSPC)
536				goto wait_for_memory;
537			enospc = true;
538			copy = msg_tx->sg.size - osize;
539		}
540
541		ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
542					       copy);
543		if (ret < 0) {
544			sk_msg_trim(sk, msg_tx, osize);
545			goto out_err;
546		}
547
548		copied += ret;
549		if (psock->cork_bytes) {
550			if (size > psock->cork_bytes)
551				psock->cork_bytes = 0;
552			else
553				psock->cork_bytes -= size;
554			if (psock->cork_bytes && !enospc)
555				goto out_err;
556			/* All cork bytes are accounted, rerun the prog. */
557			psock->eval = __SK_NONE;
558			psock->cork_bytes = 0;
559		}
560
561		err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags);
562		if (unlikely(err < 0))
563			goto out_err;
564		continue;
565wait_for_sndbuf:
566		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
567wait_for_memory:
568		err = sk_stream_wait_memory(sk, &timeo);
569		if (err) {
570			if (msg_tx && msg_tx != psock->cork)
571				sk_msg_free(sk, msg_tx);
572			goto out_err;
573		}
574	}
575out_err:
576	if (err < 0)
577		err = sk_stream_error(sk, msg->msg_flags, err);
578	release_sock(sk);
579	sk_psock_put(sk, psock);
580	return copied > 0 ? copied : err;
581}
582
583enum {
584	TCP_BPF_IPV4,
585	TCP_BPF_IPV6,
586	TCP_BPF_NUM_PROTS,
587};
588
589enum {
590	TCP_BPF_BASE,
591	TCP_BPF_TX,
592	TCP_BPF_RX,
593	TCP_BPF_TXRX,
594	TCP_BPF_NUM_CFGS,
595};
596
597static struct proto *tcpv6_prot_saved __read_mostly;
598static DEFINE_SPINLOCK(tcpv6_prot_lock);
599static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS];
600
601static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
602				   struct proto *base)
603{
604	prot[TCP_BPF_BASE]			= *base;
605	prot[TCP_BPF_BASE].destroy		= sock_map_destroy;
606	prot[TCP_BPF_BASE].close		= sock_map_close;
607	prot[TCP_BPF_BASE].recvmsg		= tcp_bpf_recvmsg;
608	prot[TCP_BPF_BASE].sock_is_readable	= sk_msg_is_readable;
609
610	prot[TCP_BPF_TX]			= prot[TCP_BPF_BASE];
611	prot[TCP_BPF_TX].sendmsg		= tcp_bpf_sendmsg;
612
613	prot[TCP_BPF_RX]			= prot[TCP_BPF_BASE];
614	prot[TCP_BPF_RX].recvmsg		= tcp_bpf_recvmsg_parser;
615
616	prot[TCP_BPF_TXRX]			= prot[TCP_BPF_TX];
617	prot[TCP_BPF_TXRX].recvmsg		= tcp_bpf_recvmsg_parser;
618}
619
620static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops)
621{
622	if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
623		spin_lock_bh(&tcpv6_prot_lock);
624		if (likely(ops != tcpv6_prot_saved)) {
625			tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops);
626			smp_store_release(&tcpv6_prot_saved, ops);
627		}
628		spin_unlock_bh(&tcpv6_prot_lock);
629	}
630}
631
632static int __init tcp_bpf_v4_build_proto(void)
633{
634	tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
635	return 0;
636}
637late_initcall(tcp_bpf_v4_build_proto);
638
639static int tcp_bpf_assert_proto_ops(struct proto *ops)
640{
641	/* In order to avoid retpoline, we make assumptions when we call
642	 * into ops if e.g. a psock is not present. Make sure they are
643	 * indeed valid assumptions.
644	 */
645	return ops->recvmsg  == tcp_recvmsg &&
646	       ops->sendmsg  == tcp_sendmsg ? 0 : -ENOTSUPP;
647}
648
649int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
650{
651	int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
652	int config = psock->progs.msg_parser   ? TCP_BPF_TX   : TCP_BPF_BASE;
653
654	if (psock->progs.stream_verdict || psock->progs.skb_verdict) {
655		config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX;
656	}
657
658	if (restore) {
659		if (inet_csk_has_ulp(sk)) {
660			/* TLS does not have an unhash proto in SW cases,
661			 * but we need to ensure we stop using the sock_map
662			 * unhash routine because the associated psock is being
663			 * removed. So use the original unhash handler.
664			 */
665			WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
666			tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
667		} else {
668			sk->sk_write_space = psock->saved_write_space;
669			/* Pairs with lockless read in sk_clone_lock() */
670			sock_replace_proto(sk, psock->sk_proto);
671		}
672		return 0;
673	}
674
675	if (sk->sk_family == AF_INET6) {
676		if (tcp_bpf_assert_proto_ops(psock->sk_proto))
677			return -EINVAL;
678
679		tcp_bpf_check_v6_needs_rebuild(psock->sk_proto);
680	}
681
682	/* Pairs with lockless read in sk_clone_lock() */
683	sock_replace_proto(sk, &tcp_bpf_prots[family][config]);
684	return 0;
685}
686EXPORT_SYMBOL_GPL(tcp_bpf_update_proto);
687
688/* If a child got cloned from a listening socket that had tcp_bpf
689 * protocol callbacks installed, we need to restore the callbacks to
690 * the default ones because the child does not inherit the psock state
691 * that tcp_bpf callbacks expect.
692 */
693void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
694{
695	struct proto *prot = newsk->sk_prot;
696
697	if (is_insidevar(prot, tcp_bpf_prots))
698		newsk->sk_prot = sk->sk_prot_creator;
699}
700#endif /* CONFIG_BPF_SYSCALL */
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
  3
  4#include <linux/skmsg.h>
  5#include <linux/filter.h>
  6#include <linux/bpf.h>
  7#include <linux/init.h>
  8#include <linux/wait.h>
  9#include <linux/util_macros.h>
 10
 11#include <net/inet_common.h>
 12#include <net/tls.h>
 13
 14void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
 15{
 16	struct tcp_sock *tcp;
 17	int copied;
 18
 19	if (!skb || !skb->len || !sk_is_tcp(sk))
 20		return;
 21
 22	if (skb_bpf_strparser(skb))
 23		return;
 24
 25	tcp = tcp_sk(sk);
 26	copied = tcp->copied_seq + skb->len;
 27	WRITE_ONCE(tcp->copied_seq, copied);
 28	tcp_rcv_space_adjust(sk);
 29	__tcp_cleanup_rbuf(sk, skb->len);
 30}
 31
 32static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
 33			   struct sk_msg *msg, u32 apply_bytes, int flags)
 34{
 35	bool apply = apply_bytes;
 36	struct scatterlist *sge;
 37	u32 size, copied = 0;
 38	struct sk_msg *tmp;
 39	int i, ret = 0;
 40
 41	tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL);
 42	if (unlikely(!tmp))
 43		return -ENOMEM;
 44
 45	lock_sock(sk);
 46	tmp->sg.start = msg->sg.start;
 47	i = msg->sg.start;
 48	do {
 49		sge = sk_msg_elem(msg, i);
 50		size = (apply && apply_bytes < sge->length) ?
 51			apply_bytes : sge->length;
 52		if (!sk_wmem_schedule(sk, size)) {
 53			if (!copied)
 54				ret = -ENOMEM;
 55			break;
 56		}
 57
 58		sk_mem_charge(sk, size);
 
 59		sk_msg_xfer(tmp, msg, i, size);
 60		copied += size;
 61		if (sge->length)
 62			get_page(sk_msg_page(tmp, i));
 63		sk_msg_iter_var_next(i);
 64		tmp->sg.end = i;
 65		if (apply) {
 66			apply_bytes -= size;
 67			if (!apply_bytes) {
 68				if (sge->length)
 69					sk_msg_iter_var_prev(i);
 70				break;
 71			}
 72		}
 73	} while (i != msg->sg.end);
 74
 75	if (!ret) {
 76		msg->sg.start = i;
 77		sk_psock_queue_msg(psock, tmp);
 
 78		sk_psock_data_ready(sk, psock);
 79	} else {
 80		sk_msg_free(sk, tmp);
 81		kfree(tmp);
 82	}
 83
 84	release_sock(sk);
 85	return ret;
 86}
 87
 88static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
 89			int flags, bool uncharge)
 90{
 91	struct msghdr msghdr = {};
 92	bool apply = apply_bytes;
 93	struct scatterlist *sge;
 94	struct page *page;
 95	int size, ret = 0;
 96	u32 off;
 97
 98	while (1) {
 99		struct bio_vec bvec;
100		bool has_tx_ulp;
101
102		sge = sk_msg_elem(msg, msg->sg.start);
103		size = (apply && apply_bytes < sge->length) ?
104			apply_bytes : sge->length;
105		off  = sge->offset;
106		page = sg_page(sge);
107
108		tcp_rate_check_app_limited(sk);
109retry:
110		msghdr.msg_flags = flags | MSG_SPLICE_PAGES;
111		has_tx_ulp = tls_sw_has_ctx_tx(sk);
112		if (has_tx_ulp)
113			msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY;
114
115		if (size < sge->length && msg->sg.start != msg->sg.end)
116			msghdr.msg_flags |= MSG_MORE;
117
118		bvec_set_page(&bvec, page, size, off);
119		iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size);
120		ret = tcp_sendmsg_locked(sk, &msghdr, size);
121		if (ret <= 0)
122			return ret;
123
124		if (apply)
125			apply_bytes -= ret;
126		msg->sg.size -= ret;
127		sge->offset += ret;
128		sge->length -= ret;
129		if (uncharge)
130			sk_mem_uncharge(sk, ret);
131		if (ret != size) {
132			size -= ret;
133			off  += ret;
134			goto retry;
135		}
136		if (!sge->length) {
137			put_page(page);
138			sk_msg_iter_next(msg, start);
139			sg_init_table(sge, 1);
140			if (msg->sg.start == msg->sg.end)
141				break;
142		}
143		if (apply && !apply_bytes)
144			break;
145	}
146
147	return 0;
148}
149
150static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
151			       u32 apply_bytes, int flags, bool uncharge)
152{
153	int ret;
154
155	lock_sock(sk);
156	ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
157	release_sock(sk);
158	return ret;
159}
160
161int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
162			  struct sk_msg *msg, u32 bytes, int flags)
163{
164	struct sk_psock *psock = sk_psock_get(sk);
165	int ret;
166
167	if (unlikely(!psock))
168		return -EPIPE;
169
170	ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
171			tcp_bpf_push_locked(sk, msg, bytes, flags, false);
172	sk_psock_put(sk, psock);
173	return ret;
174}
175EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
176
177#ifdef CONFIG_BPF_SYSCALL
178static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
179			     long timeo)
180{
181	DEFINE_WAIT_FUNC(wait, woken_wake_function);
182	int ret = 0;
183
184	if (sk->sk_shutdown & RCV_SHUTDOWN)
185		return 1;
186
187	if (!timeo)
188		return ret;
189
190	add_wait_queue(sk_sleep(sk), &wait);
191	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
192	ret = sk_wait_event(sk, &timeo,
193			    !list_empty(&psock->ingress_msg) ||
194			    !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait);
195	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
196	remove_wait_queue(sk_sleep(sk), &wait);
197	return ret;
198}
199
200static bool is_next_msg_fin(struct sk_psock *psock)
201{
202	struct scatterlist *sge;
203	struct sk_msg *msg_rx;
204	int i;
205
206	msg_rx = sk_psock_peek_msg(psock);
207	i = msg_rx->sg.start;
208	sge = sk_msg_elem(msg_rx, i);
209	if (!sge->length) {
210		struct sk_buff *skb = msg_rx->skb;
211
212		if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
213			return true;
214	}
215	return false;
216}
217
218static int tcp_bpf_recvmsg_parser(struct sock *sk,
219				  struct msghdr *msg,
220				  size_t len,
221				  int flags,
222				  int *addr_len)
223{
224	struct tcp_sock *tcp = tcp_sk(sk);
225	int peek = flags & MSG_PEEK;
226	u32 seq = tcp->copied_seq;
227	struct sk_psock *psock;
 
228	int copied = 0;
 
229
230	if (unlikely(flags & MSG_ERRQUEUE))
231		return inet_recv_error(sk, msg, len, addr_len);
232
233	if (!len)
234		return 0;
235
236	psock = sk_psock_get(sk);
237	if (unlikely(!psock))
238		return tcp_recvmsg(sk, msg, len, flags, addr_len);
239
240	lock_sock(sk);
241
 
242	/* We may have received data on the sk_receive_queue pre-accept and
243	 * then we can not use read_skb in this context because we haven't
244	 * assigned a sk_socket yet so have no link to the ops. The work-around
245	 * is to check the sk_receive_queue and in these cases read skbs off
246	 * queue again. The read_skb hook is not running at this point because
247	 * of lock_sock so we avoid having multiple runners in read_skb.
248	 */
249	if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
250		tcp_data_ready(sk);
251		/* This handles the ENOMEM errors if we both receive data
252		 * pre accept and are already under memory pressure. At least
253		 * let user know to retry.
254		 */
255		if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
256			copied = -EAGAIN;
257			goto out;
258		}
259	}
260
261msg_bytes_ready:
262	copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
263	/* The typical case for EFAULT is the socket was gracefully
264	 * shutdown with a FIN pkt. So check here the other case is
265	 * some error on copy_page_to_iter which would be unexpected.
266	 * On fin return correct return code to zero.
267	 */
268	if (copied == -EFAULT) {
269		bool is_fin = is_next_msg_fin(psock);
270
271		if (is_fin) {
272			copied = 0;
273			seq++;
274			goto out;
275		}
276	}
277	seq += copied;
278	if (!copied) {
279		long timeo;
280		int data;
281
282		if (sock_flag(sk, SOCK_DONE))
283			goto out;
284
285		if (sk->sk_err) {
286			copied = sock_error(sk);
287			goto out;
288		}
289
290		if (sk->sk_shutdown & RCV_SHUTDOWN)
291			goto out;
292
293		if (sk->sk_state == TCP_CLOSE) {
294			copied = -ENOTCONN;
295			goto out;
296		}
297
298		timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
299		if (!timeo) {
300			copied = -EAGAIN;
301			goto out;
302		}
303
304		if (signal_pending(current)) {
305			copied = sock_intr_errno(timeo);
306			goto out;
307		}
308
309		data = tcp_msg_wait_data(sk, psock, timeo);
310		if (data < 0) {
311			copied = data;
312			goto unlock;
313		}
314		if (data && !sk_psock_queue_empty(psock))
315			goto msg_bytes_ready;
316		copied = -EAGAIN;
317	}
318out:
319	if (!peek)
320		WRITE_ONCE(tcp->copied_seq, seq);
321	tcp_rcv_space_adjust(sk);
322	if (copied > 0)
323		__tcp_cleanup_rbuf(sk, copied);
324
325unlock:
326	release_sock(sk);
327	sk_psock_put(sk, psock);
328	return copied;
329}
330
331static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
332			   int flags, int *addr_len)
333{
334	struct sk_psock *psock;
335	int copied, ret;
336
337	if (unlikely(flags & MSG_ERRQUEUE))
338		return inet_recv_error(sk, msg, len, addr_len);
339
340	if (!len)
341		return 0;
342
343	psock = sk_psock_get(sk);
344	if (unlikely(!psock))
345		return tcp_recvmsg(sk, msg, len, flags, addr_len);
346	if (!skb_queue_empty(&sk->sk_receive_queue) &&
347	    sk_psock_queue_empty(psock)) {
348		sk_psock_put(sk, psock);
349		return tcp_recvmsg(sk, msg, len, flags, addr_len);
350	}
351	lock_sock(sk);
352msg_bytes_ready:
353	copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
354	if (!copied) {
355		long timeo;
356		int data;
357
358		timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
359		data = tcp_msg_wait_data(sk, psock, timeo);
360		if (data < 0) {
361			ret = data;
362			goto unlock;
363		}
364		if (data) {
365			if (!sk_psock_queue_empty(psock))
366				goto msg_bytes_ready;
367			release_sock(sk);
368			sk_psock_put(sk, psock);
369			return tcp_recvmsg(sk, msg, len, flags, addr_len);
370		}
371		copied = -EAGAIN;
372	}
373	ret = copied;
374
375unlock:
376	release_sock(sk);
377	sk_psock_put(sk, psock);
378	return ret;
379}
380
381static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
382				struct sk_msg *msg, int *copied, int flags)
383{
384	bool cork = false, enospc = sk_msg_full(msg), redir_ingress;
385	struct sock *sk_redir;
386	u32 tosend, origsize, sent, delta = 0;
387	u32 eval;
388	int ret;
389
390more_data:
391	if (psock->eval == __SK_NONE) {
392		/* Track delta in msg size to add/subtract it on SK_DROP from
393		 * returned to user copied size. This ensures user doesn't
394		 * get a positive return code with msg_cut_data and SK_DROP
395		 * verdict.
396		 */
397		delta = msg->sg.size;
398		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
399		delta -= msg->sg.size;
400	}
401
402	if (msg->cork_bytes &&
403	    msg->cork_bytes > msg->sg.size && !enospc) {
404		psock->cork_bytes = msg->cork_bytes - msg->sg.size;
405		if (!psock->cork) {
406			psock->cork = kzalloc(sizeof(*psock->cork),
407					      GFP_ATOMIC | __GFP_NOWARN);
408			if (!psock->cork)
409				return -ENOMEM;
410		}
411		memcpy(psock->cork, msg, sizeof(*msg));
412		return 0;
413	}
414
415	tosend = msg->sg.size;
416	if (psock->apply_bytes && psock->apply_bytes < tosend)
417		tosend = psock->apply_bytes;
418	eval = __SK_NONE;
419
420	switch (psock->eval) {
421	case __SK_PASS:
422		ret = tcp_bpf_push(sk, msg, tosend, flags, true);
423		if (unlikely(ret)) {
424			*copied -= sk_msg_free(sk, msg);
425			break;
426		}
427		sk_msg_apply_bytes(psock, tosend);
428		break;
429	case __SK_REDIRECT:
430		redir_ingress = psock->redir_ingress;
431		sk_redir = psock->sk_redir;
432		sk_msg_apply_bytes(psock, tosend);
433		if (!psock->apply_bytes) {
434			/* Clean up before releasing the sock lock. */
435			eval = psock->eval;
436			psock->eval = __SK_NONE;
437			psock->sk_redir = NULL;
438		}
439		if (psock->cork) {
440			cork = true;
441			psock->cork = NULL;
442		}
443		sk_msg_return(sk, msg, tosend);
444		release_sock(sk);
445
446		origsize = msg->sg.size;
447		ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
448					    msg, tosend, flags);
449		sent = origsize - msg->sg.size;
450
451		if (eval == __SK_REDIRECT)
452			sock_put(sk_redir);
453
454		lock_sock(sk);
 
455		if (unlikely(ret < 0)) {
456			int free = sk_msg_free_nocharge(sk, msg);
457
458			if (!cork)
459				*copied -= free;
460		}
461		if (cork) {
462			sk_msg_free(sk, msg);
463			kfree(msg);
464			msg = NULL;
465			ret = 0;
466		}
467		break;
468	case __SK_DROP:
469	default:
470		sk_msg_free_partial(sk, msg, tosend);
471		sk_msg_apply_bytes(psock, tosend);
472		*copied -= (tosend + delta);
473		return -EACCES;
474	}
475
476	if (likely(!ret)) {
477		if (!psock->apply_bytes) {
478			psock->eval =  __SK_NONE;
479			if (psock->sk_redir) {
480				sock_put(psock->sk_redir);
481				psock->sk_redir = NULL;
482			}
483		}
484		if (msg &&
485		    msg->sg.data[msg->sg.start].page_link &&
486		    msg->sg.data[msg->sg.start].length) {
487			if (eval == __SK_REDIRECT)
488				sk_mem_charge(sk, tosend - sent);
489			goto more_data;
490		}
491	}
492	return ret;
493}
494
495static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
496{
497	struct sk_msg tmp, *msg_tx = NULL;
498	int copied = 0, err = 0;
499	struct sk_psock *psock;
500	long timeo;
501	int flags;
502
503	/* Don't let internal flags through */
504	flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
505	flags |= MSG_NO_SHARED_FRAGS;
506
507	psock = sk_psock_get(sk);
508	if (unlikely(!psock))
509		return tcp_sendmsg(sk, msg, size);
510
511	lock_sock(sk);
512	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
513	while (msg_data_left(msg)) {
514		bool enospc = false;
515		u32 copy, osize;
516
517		if (sk->sk_err) {
518			err = -sk->sk_err;
519			goto out_err;
520		}
521
522		copy = msg_data_left(msg);
523		if (!sk_stream_memory_free(sk))
524			goto wait_for_sndbuf;
525		if (psock->cork) {
526			msg_tx = psock->cork;
527		} else {
528			msg_tx = &tmp;
529			sk_msg_init(msg_tx);
530		}
531
532		osize = msg_tx->sg.size;
533		err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1);
534		if (err) {
535			if (err != -ENOSPC)
536				goto wait_for_memory;
537			enospc = true;
538			copy = msg_tx->sg.size - osize;
539		}
540
541		err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
542					       copy);
543		if (err < 0) {
544			sk_msg_trim(sk, msg_tx, osize);
545			goto out_err;
546		}
547
548		copied += copy;
549		if (psock->cork_bytes) {
550			if (size > psock->cork_bytes)
551				psock->cork_bytes = 0;
552			else
553				psock->cork_bytes -= size;
554			if (psock->cork_bytes && !enospc)
555				goto out_err;
556			/* All cork bytes are accounted, rerun the prog. */
557			psock->eval = __SK_NONE;
558			psock->cork_bytes = 0;
559		}
560
561		err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags);
562		if (unlikely(err < 0))
563			goto out_err;
564		continue;
565wait_for_sndbuf:
566		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
567wait_for_memory:
568		err = sk_stream_wait_memory(sk, &timeo);
569		if (err) {
570			if (msg_tx && msg_tx != psock->cork)
571				sk_msg_free(sk, msg_tx);
572			goto out_err;
573		}
574	}
575out_err:
576	if (err < 0)
577		err = sk_stream_error(sk, msg->msg_flags, err);
578	release_sock(sk);
579	sk_psock_put(sk, psock);
580	return copied ? copied : err;
581}
582
583enum {
584	TCP_BPF_IPV4,
585	TCP_BPF_IPV6,
586	TCP_BPF_NUM_PROTS,
587};
588
589enum {
590	TCP_BPF_BASE,
591	TCP_BPF_TX,
592	TCP_BPF_RX,
593	TCP_BPF_TXRX,
594	TCP_BPF_NUM_CFGS,
595};
596
597static struct proto *tcpv6_prot_saved __read_mostly;
598static DEFINE_SPINLOCK(tcpv6_prot_lock);
599static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS];
600
601static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
602				   struct proto *base)
603{
604	prot[TCP_BPF_BASE]			= *base;
605	prot[TCP_BPF_BASE].destroy		= sock_map_destroy;
606	prot[TCP_BPF_BASE].close		= sock_map_close;
607	prot[TCP_BPF_BASE].recvmsg		= tcp_bpf_recvmsg;
608	prot[TCP_BPF_BASE].sock_is_readable	= sk_msg_is_readable;
609
610	prot[TCP_BPF_TX]			= prot[TCP_BPF_BASE];
611	prot[TCP_BPF_TX].sendmsg		= tcp_bpf_sendmsg;
612
613	prot[TCP_BPF_RX]			= prot[TCP_BPF_BASE];
614	prot[TCP_BPF_RX].recvmsg		= tcp_bpf_recvmsg_parser;
615
616	prot[TCP_BPF_TXRX]			= prot[TCP_BPF_TX];
617	prot[TCP_BPF_TXRX].recvmsg		= tcp_bpf_recvmsg_parser;
618}
619
620static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops)
621{
622	if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
623		spin_lock_bh(&tcpv6_prot_lock);
624		if (likely(ops != tcpv6_prot_saved)) {
625			tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops);
626			smp_store_release(&tcpv6_prot_saved, ops);
627		}
628		spin_unlock_bh(&tcpv6_prot_lock);
629	}
630}
631
632static int __init tcp_bpf_v4_build_proto(void)
633{
634	tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
635	return 0;
636}
637late_initcall(tcp_bpf_v4_build_proto);
638
639static int tcp_bpf_assert_proto_ops(struct proto *ops)
640{
641	/* In order to avoid retpoline, we make assumptions when we call
642	 * into ops if e.g. a psock is not present. Make sure they are
643	 * indeed valid assumptions.
644	 */
645	return ops->recvmsg  == tcp_recvmsg &&
646	       ops->sendmsg  == tcp_sendmsg ? 0 : -ENOTSUPP;
647}
648
649int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
650{
651	int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
652	int config = psock->progs.msg_parser   ? TCP_BPF_TX   : TCP_BPF_BASE;
653
654	if (psock->progs.stream_verdict || psock->progs.skb_verdict) {
655		config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX;
656	}
657
658	if (restore) {
659		if (inet_csk_has_ulp(sk)) {
660			/* TLS does not have an unhash proto in SW cases,
661			 * but we need to ensure we stop using the sock_map
662			 * unhash routine because the associated psock is being
663			 * removed. So use the original unhash handler.
664			 */
665			WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
666			tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
667		} else {
668			sk->sk_write_space = psock->saved_write_space;
669			/* Pairs with lockless read in sk_clone_lock() */
670			sock_replace_proto(sk, psock->sk_proto);
671		}
672		return 0;
673	}
674
675	if (sk->sk_family == AF_INET6) {
676		if (tcp_bpf_assert_proto_ops(psock->sk_proto))
677			return -EINVAL;
678
679		tcp_bpf_check_v6_needs_rebuild(psock->sk_proto);
680	}
681
682	/* Pairs with lockless read in sk_clone_lock() */
683	sock_replace_proto(sk, &tcp_bpf_prots[family][config]);
684	return 0;
685}
686EXPORT_SYMBOL_GPL(tcp_bpf_update_proto);
687
688/* If a child got cloned from a listening socket that had tcp_bpf
689 * protocol callbacks installed, we need to restore the callbacks to
690 * the default ones because the child does not inherit the psock state
691 * that tcp_bpf callbacks expect.
692 */
693void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
694{
695	struct proto *prot = newsk->sk_prot;
696
697	if (is_insidevar(prot, tcp_bpf_prots))
698		newsk->sk_prot = sk->sk_prot_creator;
699}
700#endif /* CONFIG_BPF_SYSCALL */