Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
  3 */
  4
  5#include <linux/filter.h>
  6#include <linux/kernel.h>
  7#include <linux/module.h>
  8#include <linux/skbuff.h>
  9#include <linux/types.h>
 10#include <linux/bpf.h>
 11#include <net/lwtunnel.h>
 12#include <net/gre.h>
 13#include <net/ip.h>
 14#include <net/ip6_route.h>
 15#include <net/ipv6_stubs.h>
 16#include <net/inet_dscp.h>
 17
 18struct bpf_lwt_prog {
 19	struct bpf_prog *prog;
 20	char *name;
 21};
 22
 23struct bpf_lwt {
 24	struct bpf_lwt_prog in;
 25	struct bpf_lwt_prog out;
 26	struct bpf_lwt_prog xmit;
 27	int family;
 28};
 29
 30#define MAX_PROG_NAME 256
 31
 32static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
 33{
 34	return (struct bpf_lwt *)lwt->data;
 35}
 36
 37#define NO_REDIRECT false
 38#define CAN_REDIRECT true
 39
 40static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
 41		       struct dst_entry *dst, bool can_redirect)
 42{
 43	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
 44	int ret;
 45
 46	/* Disabling BH is needed to protect per-CPU bpf_redirect_info between
 47	 * BPF prog and skb_do_redirect().
 
 
 48	 */
 49	local_bh_disable();
 50	bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
 51	bpf_compute_data_pointers(skb);
 52	ret = bpf_prog_run_save_cb(lwt->prog, skb);
 53
 54	switch (ret) {
 55	case BPF_OK:
 56	case BPF_LWT_REROUTE:
 57		break;
 58
 59	case BPF_REDIRECT:
 60		if (unlikely(!can_redirect)) {
 61			pr_warn_once("Illegal redirect return code in prog %s\n",
 62				     lwt->name ? : "<unknown>");
 63			ret = BPF_OK;
 64		} else {
 65			skb_reset_mac_header(skb);
 66			skb_do_redirect(skb);
 67			ret = BPF_REDIRECT;
 
 68		}
 69		break;
 70
 71	case BPF_DROP:
 72		kfree_skb(skb);
 73		ret = -EPERM;
 74		break;
 75
 76	default:
 77		pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret);
 78		kfree_skb(skb);
 79		ret = -EINVAL;
 80		break;
 81	}
 82
 83	bpf_net_ctx_clear(bpf_net_ctx);
 84	local_bh_enable();
 85
 86	return ret;
 87}
 88
 89static int bpf_lwt_input_reroute(struct sk_buff *skb)
 90{
 91	enum skb_drop_reason reason;
 92	int err = -EINVAL;
 93
 94	if (skb->protocol == htons(ETH_P_IP)) {
 95		struct net_device *dev = skb_dst(skb)->dev;
 96		const struct iphdr *iph = ip_hdr(skb);
 97
 98		dev_hold(dev);
 99		skb_dst_drop(skb);
100		reason = ip_route_input_noref(skb, iph->daddr, iph->saddr,
101					      ip4h_dscp(iph), dev);
102		err = reason ? -EINVAL : 0;
103		dev_put(dev);
104	} else if (skb->protocol == htons(ETH_P_IPV6)) {
105		skb_dst_drop(skb);
106		err = ipv6_stub->ipv6_route_input(skb);
107	} else {
108		err = -EAFNOSUPPORT;
109	}
110
111	if (err)
112		goto err;
113	return dst_input(skb);
114
115err:
116	kfree_skb(skb);
117	return err;
118}
119
120static int bpf_input(struct sk_buff *skb)
121{
122	struct dst_entry *dst = skb_dst(skb);
123	struct bpf_lwt *bpf;
124	int ret;
125
126	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
127	if (bpf->in.prog) {
128		ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
129		if (ret < 0)
130			return ret;
131		if (ret == BPF_LWT_REROUTE)
132			return bpf_lwt_input_reroute(skb);
133	}
134
135	if (unlikely(!dst->lwtstate->orig_input)) {
136		kfree_skb(skb);
137		return -EINVAL;
138	}
139
140	return dst->lwtstate->orig_input(skb);
141}
142
143static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
144{
145	struct dst_entry *dst = skb_dst(skb);
146	struct bpf_lwt *bpf;
147	int ret;
148
149	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
150	if (bpf->out.prog) {
151		ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT);
152		if (ret < 0)
153			return ret;
154	}
155
156	if (unlikely(!dst->lwtstate->orig_output)) {
157		pr_warn_once("orig_output not set on dst for prog %s\n",
158			     bpf->out.name);
159		kfree_skb(skb);
160		return -EINVAL;
161	}
162
163	return dst->lwtstate->orig_output(net, sk, skb);
164}
165
166static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
167{
 
 
168	if (skb_headroom(skb) < hh_len) {
169		int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
170
171		if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC))
172			return -ENOMEM;
173	}
174
175	return 0;
176}
177
178static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
179{
180	struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
181	int oif = l3mdev ? l3mdev->ifindex : 0;
182	struct dst_entry *dst = NULL;
183	int err = -EAFNOSUPPORT;
184	struct sock *sk;
185	struct net *net;
186	bool ipv4;
187
188	if (skb->protocol == htons(ETH_P_IP))
189		ipv4 = true;
190	else if (skb->protocol == htons(ETH_P_IPV6))
191		ipv4 = false;
192	else
193		goto err;
194
195	sk = sk_to_full_sk(skb->sk);
196	if (sk) {
197		if (sk->sk_bound_dev_if)
198			oif = sk->sk_bound_dev_if;
199		net = sock_net(sk);
200	} else {
201		net = dev_net(skb_dst(skb)->dev);
202	}
203
204	if (ipv4) {
205		struct iphdr *iph = ip_hdr(skb);
206		struct flowi4 fl4 = {};
207		struct rtable *rt;
208
209		fl4.flowi4_oif = oif;
210		fl4.flowi4_mark = skb->mark;
211		fl4.flowi4_uid = sock_net_uid(net, sk);
212		fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
213		fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
214		fl4.flowi4_proto = iph->protocol;
215		fl4.daddr = iph->daddr;
216		fl4.saddr = iph->saddr;
217
218		rt = ip_route_output_key(net, &fl4);
219		if (IS_ERR(rt)) {
220			err = PTR_ERR(rt);
221			goto err;
222		}
223		dst = &rt->dst;
224	} else {
225		struct ipv6hdr *iph6 = ipv6_hdr(skb);
226		struct flowi6 fl6 = {};
227
228		fl6.flowi6_oif = oif;
229		fl6.flowi6_mark = skb->mark;
230		fl6.flowi6_uid = sock_net_uid(net, sk);
231		fl6.flowlabel = ip6_flowinfo(iph6);
232		fl6.flowi6_proto = iph6->nexthdr;
233		fl6.daddr = iph6->daddr;
234		fl6.saddr = iph6->saddr;
235
236		dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL);
 
 
237		if (IS_ERR(dst)) {
238			err = PTR_ERR(dst);
239			goto err;
240		}
241	}
242	if (unlikely(dst->error)) {
243		err = dst->error;
244		dst_release(dst);
245		goto err;
246	}
247
248	/* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
249	 * was done for the previous dst, so we are doing it here again, in
250	 * case the new dst needs much more space. The call below is a noop
251	 * if there is enough header space in skb.
252	 */
253	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
254	if (unlikely(err))
255		goto err;
256
257	skb_dst_drop(skb);
258	skb_dst_set(skb, dst);
259
260	err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
261	if (unlikely(err))
262		return net_xmit_errno(err);
263
264	/* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
265	return LWTUNNEL_XMIT_DONE;
266
267err:
268	kfree_skb(skb);
269	return err;
270}
271
272static int bpf_xmit(struct sk_buff *skb)
273{
274	struct dst_entry *dst = skb_dst(skb);
275	struct bpf_lwt *bpf;
276
277	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
278	if (bpf->xmit.prog) {
279		int hh_len = dst->dev->hard_header_len;
280		__be16 proto = skb->protocol;
281		int ret;
282
283		ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
284		switch (ret) {
285		case BPF_OK:
286			/* If the header changed, e.g. via bpf_lwt_push_encap,
287			 * BPF_LWT_REROUTE below should have been used if the
288			 * protocol was also changed.
289			 */
290			if (skb->protocol != proto) {
291				kfree_skb(skb);
292				return -EINVAL;
293			}
294			/* If the header was expanded, headroom might be too
295			 * small for L2 header to come, expand as needed.
296			 */
297			ret = xmit_check_hhlen(skb, hh_len);
298			if (unlikely(ret))
299				return ret;
300
301			return LWTUNNEL_XMIT_CONTINUE;
302		case BPF_REDIRECT:
303			return LWTUNNEL_XMIT_DONE;
304		case BPF_LWT_REROUTE:
305			return bpf_lwt_xmit_reroute(skb);
306		default:
307			return ret;
308		}
309	}
310
311	return LWTUNNEL_XMIT_CONTINUE;
312}
313
314static void bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog)
315{
316	if (prog->prog)
317		bpf_prog_put(prog->prog);
318
319	kfree(prog->name);
320}
321
322static void bpf_destroy_state(struct lwtunnel_state *lwt)
323{
324	struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
325
326	bpf_lwt_prog_destroy(&bpf->in);
327	bpf_lwt_prog_destroy(&bpf->out);
328	bpf_lwt_prog_destroy(&bpf->xmit);
329}
330
331static const struct nla_policy bpf_prog_policy[LWT_BPF_PROG_MAX + 1] = {
332	[LWT_BPF_PROG_FD]   = { .type = NLA_U32, },
333	[LWT_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
334				.len = MAX_PROG_NAME },
335};
336
337static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
338			  enum bpf_prog_type type)
339{
340	struct nlattr *tb[LWT_BPF_PROG_MAX + 1];
341	struct bpf_prog *p;
342	int ret;
343	u32 fd;
344
345	ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
346					  bpf_prog_policy, NULL);
347	if (ret < 0)
348		return ret;
349
350	if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
351		return -EINVAL;
352
353	prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
354	if (!prog->name)
355		return -ENOMEM;
356
357	fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
358	p = bpf_prog_get_type(fd, type);
359	if (IS_ERR(p))
360		return PTR_ERR(p);
361
362	prog->prog = p;
363
364	return 0;
365}
366
367static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
368	[LWT_BPF_IN]		= { .type = NLA_NESTED, },
369	[LWT_BPF_OUT]		= { .type = NLA_NESTED, },
370	[LWT_BPF_XMIT]		= { .type = NLA_NESTED, },
371	[LWT_BPF_XMIT_HEADROOM]	= { .type = NLA_U32 },
372};
373
374static int bpf_build_state(struct net *net, struct nlattr *nla,
375			   unsigned int family, const void *cfg,
376			   struct lwtunnel_state **ts,
377			   struct netlink_ext_ack *extack)
378{
379	struct nlattr *tb[LWT_BPF_MAX + 1];
380	struct lwtunnel_state *newts;
381	struct bpf_lwt *bpf;
382	int ret;
383
384	if (family != AF_INET && family != AF_INET6)
385		return -EAFNOSUPPORT;
386
387	ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
388					  extack);
389	if (ret < 0)
390		return ret;
391
392	if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT])
393		return -EINVAL;
394
395	newts = lwtunnel_state_alloc(sizeof(*bpf));
396	if (!newts)
397		return -ENOMEM;
398
399	newts->type = LWTUNNEL_ENCAP_BPF;
400	bpf = bpf_lwt_lwtunnel(newts);
401
402	if (tb[LWT_BPF_IN]) {
403		newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
404		ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in,
405				     BPF_PROG_TYPE_LWT_IN);
406		if (ret  < 0)
407			goto errout;
408	}
409
410	if (tb[LWT_BPF_OUT]) {
411		newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
412		ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out,
413				     BPF_PROG_TYPE_LWT_OUT);
414		if (ret < 0)
415			goto errout;
416	}
417
418	if (tb[LWT_BPF_XMIT]) {
419		newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
420		ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit,
421				     BPF_PROG_TYPE_LWT_XMIT);
422		if (ret < 0)
423			goto errout;
424	}
425
426	if (tb[LWT_BPF_XMIT_HEADROOM]) {
427		u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]);
428
429		if (headroom > LWT_BPF_MAX_HEADROOM) {
430			ret = -ERANGE;
431			goto errout;
432		}
433
434		newts->headroom = headroom;
435	}
436
437	bpf->family = family;
438	*ts = newts;
439
440	return 0;
441
442errout:
443	bpf_destroy_state(newts);
444	kfree(newts);
445	return ret;
446}
447
448static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
449			     struct bpf_lwt_prog *prog)
450{
451	struct nlattr *nest;
452
453	if (!prog->prog)
454		return 0;
455
456	nest = nla_nest_start_noflag(skb, attr);
457	if (!nest)
458		return -EMSGSIZE;
459
460	if (prog->name &&
461	    nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name))
462		return -EMSGSIZE;
463
464	return nla_nest_end(skb, nest);
465}
466
467static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt)
468{
469	struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
470
471	if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 ||
472	    bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 ||
473	    bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0)
474		return -EMSGSIZE;
475
476	return 0;
477}
478
479static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
480{
481	int nest_len = nla_total_size(sizeof(struct nlattr)) +
482		       nla_total_size(MAX_PROG_NAME) + /* LWT_BPF_PROG_NAME */
483		       0;
484
485	return nest_len + /* LWT_BPF_IN */
486	       nest_len + /* LWT_BPF_OUT */
487	       nest_len + /* LWT_BPF_XMIT */
488	       0;
489}
490
491static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
492{
493	/* FIXME:
494	 * The LWT state is currently rebuilt for delete requests which
495	 * results in a new bpf_prog instance. Comparing names for now.
496	 */
497	if (!a->name && !b->name)
498		return 0;
499
500	if (!a->name || !b->name)
501		return 1;
502
503	return strcmp(a->name, b->name);
504}
505
506static int bpf_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
507{
508	struct bpf_lwt *a_bpf = bpf_lwt_lwtunnel(a);
509	struct bpf_lwt *b_bpf = bpf_lwt_lwtunnel(b);
510
511	return bpf_lwt_prog_cmp(&a_bpf->in, &b_bpf->in) ||
512	       bpf_lwt_prog_cmp(&a_bpf->out, &b_bpf->out) ||
513	       bpf_lwt_prog_cmp(&a_bpf->xmit, &b_bpf->xmit);
514}
515
516static const struct lwtunnel_encap_ops bpf_encap_ops = {
517	.build_state	= bpf_build_state,
518	.destroy_state	= bpf_destroy_state,
519	.input		= bpf_input,
520	.output		= bpf_output,
521	.xmit		= bpf_xmit,
522	.fill_encap	= bpf_fill_encap_info,
523	.get_encap_size = bpf_encap_nlsize,
524	.cmp_encap	= bpf_encap_cmp,
525	.owner		= THIS_MODULE,
526};
527
528static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
529			   int encap_len)
530{
531	struct skb_shared_info *shinfo = skb_shinfo(skb);
532
533	gso_type |= SKB_GSO_DODGY;
534	shinfo->gso_type |= gso_type;
535	skb_decrease_gso_size(shinfo, encap_len);
536	shinfo->gso_segs = 0;
537	return 0;
538}
539
540static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
541{
542	int next_hdr_offset;
543	void *next_hdr;
544	__u8 protocol;
545
546	/* SCTP and UDP_L4 gso need more nuanced handling than what
547	 * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
548	 * So at the moment only TCP GSO packets are let through.
549	 */
550	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
551		return -ENOTSUPP;
552
553	if (ipv4) {
554		protocol = ip_hdr(skb)->protocol;
555		next_hdr_offset = sizeof(struct iphdr);
556		next_hdr = skb_network_header(skb) + next_hdr_offset;
557	} else {
558		protocol = ipv6_hdr(skb)->nexthdr;
559		next_hdr_offset = sizeof(struct ipv6hdr);
560		next_hdr = skb_network_header(skb) + next_hdr_offset;
561	}
562
563	switch (protocol) {
564	case IPPROTO_GRE:
565		next_hdr_offset += sizeof(struct gre_base_hdr);
566		if (next_hdr_offset > encap_len)
567			return -EINVAL;
568
569		if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
570			return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
571					       encap_len);
572		return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
573
574	case IPPROTO_UDP:
575		next_hdr_offset += sizeof(struct udphdr);
576		if (next_hdr_offset > encap_len)
577			return -EINVAL;
578
579		if (((struct udphdr *)next_hdr)->check)
580			return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
581					       encap_len);
582		return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
583
584	case IPPROTO_IP:
585	case IPPROTO_IPV6:
586		if (ipv4)
587			return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
588		else
589			return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
590
591	default:
592		return -EPROTONOSUPPORT;
593	}
594}
595
596int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
597{
598	struct iphdr *iph;
599	bool ipv4;
600	int err;
601
602	if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
603		return -EINVAL;
604
605	/* validate protocol and length */
606	iph = (struct iphdr *)hdr;
607	if (iph->version == 4) {
608		ipv4 = true;
609		if (unlikely(len < iph->ihl * 4))
610			return -EINVAL;
611	} else if (iph->version == 6) {
612		ipv4 = false;
613		if (unlikely(len < sizeof(struct ipv6hdr)))
614			return -EINVAL;
615	} else {
616		return -EINVAL;
617	}
618
619	if (ingress)
620		err = skb_cow_head(skb, len + skb->mac_len);
621	else
622		err = skb_cow_head(skb,
623				   len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
624	if (unlikely(err))
625		return err;
626
627	/* push the encap headers and fix pointers */
628	skb_reset_inner_headers(skb);
629	skb_reset_inner_mac_header(skb);  /* mac header is not yet set */
630	skb_set_inner_protocol(skb, skb->protocol);
631	skb->encapsulation = 1;
632	skb_push(skb, len);
633	if (ingress)
634		skb_postpush_rcsum(skb, iph, len);
635	skb_reset_network_header(skb);
636	memcpy(skb_network_header(skb), hdr, len);
637	bpf_compute_data_pointers(skb);
638	skb_clear_hash(skb);
639
640	if (ipv4) {
641		skb->protocol = htons(ETH_P_IP);
642		iph = ip_hdr(skb);
643
644		if (!iph->check)
645			iph->check = ip_fast_csum((unsigned char *)iph,
646						  iph->ihl);
647	} else {
648		skb->protocol = htons(ETH_P_IPV6);
649	}
650
651	if (skb_is_gso(skb))
652		return handle_gso_encap(skb, ipv4, len);
653
654	return 0;
655}
656
657static int __init bpf_lwt_init(void)
658{
659	return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
660}
661
662subsys_initcall(bpf_lwt_init)
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
  3 */
  4
 
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/skbuff.h>
  8#include <linux/types.h>
  9#include <linux/bpf.h>
 10#include <net/lwtunnel.h>
 11#include <net/gre.h>
 
 12#include <net/ip6_route.h>
 13#include <net/ipv6_stubs.h>
 
 14
 15struct bpf_lwt_prog {
 16	struct bpf_prog *prog;
 17	char *name;
 18};
 19
 20struct bpf_lwt {
 21	struct bpf_lwt_prog in;
 22	struct bpf_lwt_prog out;
 23	struct bpf_lwt_prog xmit;
 24	int family;
 25};
 26
 27#define MAX_PROG_NAME 256
 28
 29static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
 30{
 31	return (struct bpf_lwt *)lwt->data;
 32}
 33
 34#define NO_REDIRECT false
 35#define CAN_REDIRECT true
 36
 37static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
 38		       struct dst_entry *dst, bool can_redirect)
 39{
 
 40	int ret;
 41
 42	/* Preempt disable is needed to protect per-cpu redirect_info between
 43	 * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
 44	 * access to maps strictly require a rcu_read_lock() for protection,
 45	 * mixing with BH RCU lock doesn't work.
 46	 */
 47	preempt_disable();
 
 48	bpf_compute_data_pointers(skb);
 49	ret = bpf_prog_run_save_cb(lwt->prog, skb);
 50
 51	switch (ret) {
 52	case BPF_OK:
 53	case BPF_LWT_REROUTE:
 54		break;
 55
 56	case BPF_REDIRECT:
 57		if (unlikely(!can_redirect)) {
 58			pr_warn_once("Illegal redirect return code in prog %s\n",
 59				     lwt->name ? : "<unknown>");
 60			ret = BPF_OK;
 61		} else {
 62			skb_reset_mac_header(skb);
 63			ret = skb_do_redirect(skb);
 64			if (ret == 0)
 65				ret = BPF_REDIRECT;
 66		}
 67		break;
 68
 69	case BPF_DROP:
 70		kfree_skb(skb);
 71		ret = -EPERM;
 72		break;
 73
 74	default:
 75		pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret);
 76		kfree_skb(skb);
 77		ret = -EINVAL;
 78		break;
 79	}
 80
 81	preempt_enable();
 
 82
 83	return ret;
 84}
 85
 86static int bpf_lwt_input_reroute(struct sk_buff *skb)
 87{
 
 88	int err = -EINVAL;
 89
 90	if (skb->protocol == htons(ETH_P_IP)) {
 91		struct net_device *dev = skb_dst(skb)->dev;
 92		struct iphdr *iph = ip_hdr(skb);
 93
 94		dev_hold(dev);
 95		skb_dst_drop(skb);
 96		err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
 97					   iph->tos, dev);
 
 98		dev_put(dev);
 99	} else if (skb->protocol == htons(ETH_P_IPV6)) {
100		skb_dst_drop(skb);
101		err = ipv6_stub->ipv6_route_input(skb);
102	} else {
103		err = -EAFNOSUPPORT;
104	}
105
106	if (err)
107		goto err;
108	return dst_input(skb);
109
110err:
111	kfree_skb(skb);
112	return err;
113}
114
115static int bpf_input(struct sk_buff *skb)
116{
117	struct dst_entry *dst = skb_dst(skb);
118	struct bpf_lwt *bpf;
119	int ret;
120
121	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
122	if (bpf->in.prog) {
123		ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
124		if (ret < 0)
125			return ret;
126		if (ret == BPF_LWT_REROUTE)
127			return bpf_lwt_input_reroute(skb);
128	}
129
130	if (unlikely(!dst->lwtstate->orig_input)) {
131		kfree_skb(skb);
132		return -EINVAL;
133	}
134
135	return dst->lwtstate->orig_input(skb);
136}
137
138static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
139{
140	struct dst_entry *dst = skb_dst(skb);
141	struct bpf_lwt *bpf;
142	int ret;
143
144	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
145	if (bpf->out.prog) {
146		ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT);
147		if (ret < 0)
148			return ret;
149	}
150
151	if (unlikely(!dst->lwtstate->orig_output)) {
152		pr_warn_once("orig_output not set on dst for prog %s\n",
153			     bpf->out.name);
154		kfree_skb(skb);
155		return -EINVAL;
156	}
157
158	return dst->lwtstate->orig_output(net, sk, skb);
159}
160
161static int xmit_check_hhlen(struct sk_buff *skb)
162{
163	int hh_len = skb_dst(skb)->dev->hard_header_len;
164
165	if (skb_headroom(skb) < hh_len) {
166		int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
167
168		if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC))
169			return -ENOMEM;
170	}
171
172	return 0;
173}
174
175static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
176{
177	struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
178	int oif = l3mdev ? l3mdev->ifindex : 0;
179	struct dst_entry *dst = NULL;
180	int err = -EAFNOSUPPORT;
181	struct sock *sk;
182	struct net *net;
183	bool ipv4;
184
185	if (skb->protocol == htons(ETH_P_IP))
186		ipv4 = true;
187	else if (skb->protocol == htons(ETH_P_IPV6))
188		ipv4 = false;
189	else
190		goto err;
191
192	sk = sk_to_full_sk(skb->sk);
193	if (sk) {
194		if (sk->sk_bound_dev_if)
195			oif = sk->sk_bound_dev_if;
196		net = sock_net(sk);
197	} else {
198		net = dev_net(skb_dst(skb)->dev);
199	}
200
201	if (ipv4) {
202		struct iphdr *iph = ip_hdr(skb);
203		struct flowi4 fl4 = {};
204		struct rtable *rt;
205
206		fl4.flowi4_oif = oif;
207		fl4.flowi4_mark = skb->mark;
208		fl4.flowi4_uid = sock_net_uid(net, sk);
209		fl4.flowi4_tos = RT_TOS(iph->tos);
210		fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
211		fl4.flowi4_proto = iph->protocol;
212		fl4.daddr = iph->daddr;
213		fl4.saddr = iph->saddr;
214
215		rt = ip_route_output_key(net, &fl4);
216		if (IS_ERR(rt)) {
217			err = PTR_ERR(rt);
218			goto err;
219		}
220		dst = &rt->dst;
221	} else {
222		struct ipv6hdr *iph6 = ipv6_hdr(skb);
223		struct flowi6 fl6 = {};
224
225		fl6.flowi6_oif = oif;
226		fl6.flowi6_mark = skb->mark;
227		fl6.flowi6_uid = sock_net_uid(net, sk);
228		fl6.flowlabel = ip6_flowinfo(iph6);
229		fl6.flowi6_proto = iph6->nexthdr;
230		fl6.daddr = iph6->daddr;
231		fl6.saddr = iph6->saddr;
232
233		err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6);
234		if (unlikely(err))
235			goto err;
236		if (IS_ERR(dst)) {
237			err = PTR_ERR(dst);
238			goto err;
239		}
240	}
241	if (unlikely(dst->error)) {
242		err = dst->error;
243		dst_release(dst);
244		goto err;
245	}
246
247	/* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
248	 * was done for the previous dst, so we are doing it here again, in
249	 * case the new dst needs much more space. The call below is a noop
250	 * if there is enough header space in skb.
251	 */
252	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
253	if (unlikely(err))
254		goto err;
255
256	skb_dst_drop(skb);
257	skb_dst_set(skb, dst);
258
259	err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
260	if (unlikely(err))
261		return err;
262
263	/* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
264	return LWTUNNEL_XMIT_DONE;
265
266err:
267	kfree_skb(skb);
268	return err;
269}
270
271static int bpf_xmit(struct sk_buff *skb)
272{
273	struct dst_entry *dst = skb_dst(skb);
274	struct bpf_lwt *bpf;
275
276	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
277	if (bpf->xmit.prog) {
 
278		__be16 proto = skb->protocol;
279		int ret;
280
281		ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
282		switch (ret) {
283		case BPF_OK:
284			/* If the header changed, e.g. via bpf_lwt_push_encap,
285			 * BPF_LWT_REROUTE below should have been used if the
286			 * protocol was also changed.
287			 */
288			if (skb->protocol != proto) {
289				kfree_skb(skb);
290				return -EINVAL;
291			}
292			/* If the header was expanded, headroom might be too
293			 * small for L2 header to come, expand as needed.
294			 */
295			ret = xmit_check_hhlen(skb);
296			if (unlikely(ret))
297				return ret;
298
299			return LWTUNNEL_XMIT_CONTINUE;
300		case BPF_REDIRECT:
301			return LWTUNNEL_XMIT_DONE;
302		case BPF_LWT_REROUTE:
303			return bpf_lwt_xmit_reroute(skb);
304		default:
305			return ret;
306		}
307	}
308
309	return LWTUNNEL_XMIT_CONTINUE;
310}
311
312static void bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog)
313{
314	if (prog->prog)
315		bpf_prog_put(prog->prog);
316
317	kfree(prog->name);
318}
319
320static void bpf_destroy_state(struct lwtunnel_state *lwt)
321{
322	struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
323
324	bpf_lwt_prog_destroy(&bpf->in);
325	bpf_lwt_prog_destroy(&bpf->out);
326	bpf_lwt_prog_destroy(&bpf->xmit);
327}
328
329static const struct nla_policy bpf_prog_policy[LWT_BPF_PROG_MAX + 1] = {
330	[LWT_BPF_PROG_FD]   = { .type = NLA_U32, },
331	[LWT_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
332				.len = MAX_PROG_NAME },
333};
334
335static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
336			  enum bpf_prog_type type)
337{
338	struct nlattr *tb[LWT_BPF_PROG_MAX + 1];
339	struct bpf_prog *p;
340	int ret;
341	u32 fd;
342
343	ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
344					  bpf_prog_policy, NULL);
345	if (ret < 0)
346		return ret;
347
348	if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
349		return -EINVAL;
350
351	prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
352	if (!prog->name)
353		return -ENOMEM;
354
355	fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
356	p = bpf_prog_get_type(fd, type);
357	if (IS_ERR(p))
358		return PTR_ERR(p);
359
360	prog->prog = p;
361
362	return 0;
363}
364
365static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
366	[LWT_BPF_IN]		= { .type = NLA_NESTED, },
367	[LWT_BPF_OUT]		= { .type = NLA_NESTED, },
368	[LWT_BPF_XMIT]		= { .type = NLA_NESTED, },
369	[LWT_BPF_XMIT_HEADROOM]	= { .type = NLA_U32 },
370};
371
372static int bpf_build_state(struct nlattr *nla,
373			   unsigned int family, const void *cfg,
374			   struct lwtunnel_state **ts,
375			   struct netlink_ext_ack *extack)
376{
377	struct nlattr *tb[LWT_BPF_MAX + 1];
378	struct lwtunnel_state *newts;
379	struct bpf_lwt *bpf;
380	int ret;
381
382	if (family != AF_INET && family != AF_INET6)
383		return -EAFNOSUPPORT;
384
385	ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
386					  extack);
387	if (ret < 0)
388		return ret;
389
390	if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT])
391		return -EINVAL;
392
393	newts = lwtunnel_state_alloc(sizeof(*bpf));
394	if (!newts)
395		return -ENOMEM;
396
397	newts->type = LWTUNNEL_ENCAP_BPF;
398	bpf = bpf_lwt_lwtunnel(newts);
399
400	if (tb[LWT_BPF_IN]) {
401		newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
402		ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in,
403				     BPF_PROG_TYPE_LWT_IN);
404		if (ret  < 0)
405			goto errout;
406	}
407
408	if (tb[LWT_BPF_OUT]) {
409		newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
410		ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out,
411				     BPF_PROG_TYPE_LWT_OUT);
412		if (ret < 0)
413			goto errout;
414	}
415
416	if (tb[LWT_BPF_XMIT]) {
417		newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
418		ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit,
419				     BPF_PROG_TYPE_LWT_XMIT);
420		if (ret < 0)
421			goto errout;
422	}
423
424	if (tb[LWT_BPF_XMIT_HEADROOM]) {
425		u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]);
426
427		if (headroom > LWT_BPF_MAX_HEADROOM) {
428			ret = -ERANGE;
429			goto errout;
430		}
431
432		newts->headroom = headroom;
433	}
434
435	bpf->family = family;
436	*ts = newts;
437
438	return 0;
439
440errout:
441	bpf_destroy_state(newts);
442	kfree(newts);
443	return ret;
444}
445
446static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
447			     struct bpf_lwt_prog *prog)
448{
449	struct nlattr *nest;
450
451	if (!prog->prog)
452		return 0;
453
454	nest = nla_nest_start_noflag(skb, attr);
455	if (!nest)
456		return -EMSGSIZE;
457
458	if (prog->name &&
459	    nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name))
460		return -EMSGSIZE;
461
462	return nla_nest_end(skb, nest);
463}
464
465static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt)
466{
467	struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
468
469	if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 ||
470	    bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 ||
471	    bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0)
472		return -EMSGSIZE;
473
474	return 0;
475}
476
477static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
478{
479	int nest_len = nla_total_size(sizeof(struct nlattr)) +
480		       nla_total_size(MAX_PROG_NAME) + /* LWT_BPF_PROG_NAME */
481		       0;
482
483	return nest_len + /* LWT_BPF_IN */
484	       nest_len + /* LWT_BPF_OUT */
485	       nest_len + /* LWT_BPF_XMIT */
486	       0;
487}
488
489static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
490{
491	/* FIXME:
492	 * The LWT state is currently rebuilt for delete requests which
493	 * results in a new bpf_prog instance. Comparing names for now.
494	 */
495	if (!a->name && !b->name)
496		return 0;
497
498	if (!a->name || !b->name)
499		return 1;
500
501	return strcmp(a->name, b->name);
502}
503
504static int bpf_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
505{
506	struct bpf_lwt *a_bpf = bpf_lwt_lwtunnel(a);
507	struct bpf_lwt *b_bpf = bpf_lwt_lwtunnel(b);
508
509	return bpf_lwt_prog_cmp(&a_bpf->in, &b_bpf->in) ||
510	       bpf_lwt_prog_cmp(&a_bpf->out, &b_bpf->out) ||
511	       bpf_lwt_prog_cmp(&a_bpf->xmit, &b_bpf->xmit);
512}
513
514static const struct lwtunnel_encap_ops bpf_encap_ops = {
515	.build_state	= bpf_build_state,
516	.destroy_state	= bpf_destroy_state,
517	.input		= bpf_input,
518	.output		= bpf_output,
519	.xmit		= bpf_xmit,
520	.fill_encap	= bpf_fill_encap_info,
521	.get_encap_size = bpf_encap_nlsize,
522	.cmp_encap	= bpf_encap_cmp,
523	.owner		= THIS_MODULE,
524};
525
526static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
527			   int encap_len)
528{
529	struct skb_shared_info *shinfo = skb_shinfo(skb);
530
531	gso_type |= SKB_GSO_DODGY;
532	shinfo->gso_type |= gso_type;
533	skb_decrease_gso_size(shinfo, encap_len);
534	shinfo->gso_segs = 0;
535	return 0;
536}
537
538static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
539{
540	int next_hdr_offset;
541	void *next_hdr;
542	__u8 protocol;
543
544	/* SCTP and UDP_L4 gso need more nuanced handling than what
545	 * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
546	 * So at the moment only TCP GSO packets are let through.
547	 */
548	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
549		return -ENOTSUPP;
550
551	if (ipv4) {
552		protocol = ip_hdr(skb)->protocol;
553		next_hdr_offset = sizeof(struct iphdr);
554		next_hdr = skb_network_header(skb) + next_hdr_offset;
555	} else {
556		protocol = ipv6_hdr(skb)->nexthdr;
557		next_hdr_offset = sizeof(struct ipv6hdr);
558		next_hdr = skb_network_header(skb) + next_hdr_offset;
559	}
560
561	switch (protocol) {
562	case IPPROTO_GRE:
563		next_hdr_offset += sizeof(struct gre_base_hdr);
564		if (next_hdr_offset > encap_len)
565			return -EINVAL;
566
567		if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
568			return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
569					       encap_len);
570		return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
571
572	case IPPROTO_UDP:
573		next_hdr_offset += sizeof(struct udphdr);
574		if (next_hdr_offset > encap_len)
575			return -EINVAL;
576
577		if (((struct udphdr *)next_hdr)->check)
578			return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
579					       encap_len);
580		return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
581
582	case IPPROTO_IP:
583	case IPPROTO_IPV6:
584		if (ipv4)
585			return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
586		else
587			return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
588
589	default:
590		return -EPROTONOSUPPORT;
591	}
592}
593
594int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
595{
596	struct iphdr *iph;
597	bool ipv4;
598	int err;
599
600	if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
601		return -EINVAL;
602
603	/* validate protocol and length */
604	iph = (struct iphdr *)hdr;
605	if (iph->version == 4) {
606		ipv4 = true;
607		if (unlikely(len < iph->ihl * 4))
608			return -EINVAL;
609	} else if (iph->version == 6) {
610		ipv4 = false;
611		if (unlikely(len < sizeof(struct ipv6hdr)))
612			return -EINVAL;
613	} else {
614		return -EINVAL;
615	}
616
617	if (ingress)
618		err = skb_cow_head(skb, len + skb->mac_len);
619	else
620		err = skb_cow_head(skb,
621				   len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
622	if (unlikely(err))
623		return err;
624
625	/* push the encap headers and fix pointers */
626	skb_reset_inner_headers(skb);
627	skb_reset_inner_mac_header(skb);  /* mac header is not yet set */
628	skb_set_inner_protocol(skb, skb->protocol);
629	skb->encapsulation = 1;
630	skb_push(skb, len);
631	if (ingress)
632		skb_postpush_rcsum(skb, iph, len);
633	skb_reset_network_header(skb);
634	memcpy(skb_network_header(skb), hdr, len);
635	bpf_compute_data_pointers(skb);
636	skb_clear_hash(skb);
637
638	if (ipv4) {
639		skb->protocol = htons(ETH_P_IP);
640		iph = ip_hdr(skb);
641
642		if (!iph->check)
643			iph->check = ip_fast_csum((unsigned char *)iph,
644						  iph->ihl);
645	} else {
646		skb->protocol = htons(ETH_P_IPV6);
647	}
648
649	if (skb_is_gso(skb))
650		return handle_gso_encap(skb, ipv4, len);
651
652	return 0;
653}
654
655static int __init bpf_lwt_init(void)
656{
657	return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
658}
659
660subsys_initcall(bpf_lwt_init)