Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#define _GNU_SOURCE
  3#include <test_progs.h>
  4#include <network_helpers.h>
 
  5#include <linux/if_tun.h>
  6#include <sys/uio.h>
  7
  8#include "bpf_flow.skel.h"
  9
 10#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
 11
 12#ifndef IP_MF
 13#define IP_MF 0x2000
 14#endif
 15
 16#define CHECK_FLOW_KEYS(desc, got, expected)				\
 17	_CHECK(memcmp(&got, &expected, sizeof(got)) != 0,		\
 18	      desc,							\
 19	      topts.duration,						\
 20	      "nhoff=%u/%u "						\
 21	      "thoff=%u/%u "						\
 22	      "addr_proto=0x%x/0x%x "					\
 23	      "is_frag=%u/%u "						\
 24	      "is_first_frag=%u/%u "					\
 25	      "is_encap=%u/%u "						\
 26	      "ip_proto=0x%x/0x%x "					\
 27	      "n_proto=0x%x/0x%x "					\
 28	      "flow_label=0x%x/0x%x "					\
 29	      "sport=%u/%u "						\
 30	      "dport=%u/%u\n",						\
 31	      got.nhoff, expected.nhoff,				\
 32	      got.thoff, expected.thoff,				\
 33	      got.addr_proto, expected.addr_proto,			\
 34	      got.is_frag, expected.is_frag,				\
 35	      got.is_first_frag, expected.is_first_frag,		\
 36	      got.is_encap, expected.is_encap,				\
 37	      got.ip_proto, expected.ip_proto,				\
 38	      got.n_proto, expected.n_proto,				\
 39	      got.flow_label, expected.flow_label,			\
 40	      got.sport, expected.sport,				\
 41	      got.dport, expected.dport)
 42
 43struct ipv4_pkt {
 44	struct ethhdr eth;
 45	struct iphdr iph;
 46	struct tcphdr tcp;
 47} __packed;
 48
 49struct ipip_pkt {
 50	struct ethhdr eth;
 51	struct iphdr iph;
 52	struct iphdr iph_inner;
 53	struct tcphdr tcp;
 54} __packed;
 55
 56struct svlan_ipv4_pkt {
 57	struct ethhdr eth;
 58	__u16 vlan_tci;
 59	__u16 vlan_proto;
 60	struct iphdr iph;
 61	struct tcphdr tcp;
 62} __packed;
 63
 64struct ipv6_pkt {
 65	struct ethhdr eth;
 66	struct ipv6hdr iph;
 67	struct tcphdr tcp;
 68} __packed;
 69
 70struct ipv6_frag_pkt {
 71	struct ethhdr eth;
 72	struct ipv6hdr iph;
 73	struct frag_hdr {
 74		__u8 nexthdr;
 75		__u8 reserved;
 76		__be16 frag_off;
 77		__be32 identification;
 78	} ipf;
 79	struct tcphdr tcp;
 80} __packed;
 81
 82struct dvlan_ipv6_pkt {
 83	struct ethhdr eth;
 84	__u16 vlan_tci;
 85	__u16 vlan_proto;
 86	__u16 vlan_tci2;
 87	__u16 vlan_proto2;
 88	struct ipv6hdr iph;
 89	struct tcphdr tcp;
 90} __packed;
 91
 92struct test {
 93	const char *name;
 94	union {
 95		struct ipv4_pkt ipv4;
 96		struct svlan_ipv4_pkt svlan_ipv4;
 97		struct ipip_pkt ipip;
 98		struct ipv6_pkt ipv6;
 99		struct ipv6_frag_pkt ipv6_frag;
100		struct dvlan_ipv6_pkt dvlan_ipv6;
101	} pkt;
102	struct bpf_flow_keys keys;
103	__u32 flags;
104	__u32 retval;
105};
106
107#define VLAN_HLEN	4
108
109static __u32 duration;
110struct test tests[] = {
111	{
112		.name = "ipv4",
113		.pkt.ipv4 = {
114			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
115			.iph.ihl = 5,
116			.iph.protocol = IPPROTO_TCP,
117			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
118			.tcp.doff = 5,
119			.tcp.source = 80,
120			.tcp.dest = 8080,
121		},
122		.keys = {
123			.nhoff = ETH_HLEN,
124			.thoff = ETH_HLEN + sizeof(struct iphdr),
125			.addr_proto = ETH_P_IP,
126			.ip_proto = IPPROTO_TCP,
127			.n_proto = __bpf_constant_htons(ETH_P_IP),
128			.sport = 80,
129			.dport = 8080,
130		},
131		.retval = BPF_OK,
132	},
133	{
134		.name = "ipv6",
135		.pkt.ipv6 = {
136			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
137			.iph.nexthdr = IPPROTO_TCP,
138			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
139			.tcp.doff = 5,
140			.tcp.source = 80,
141			.tcp.dest = 8080,
142		},
143		.keys = {
144			.nhoff = ETH_HLEN,
145			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
146			.addr_proto = ETH_P_IPV6,
147			.ip_proto = IPPROTO_TCP,
148			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
149			.sport = 80,
150			.dport = 8080,
151		},
152		.retval = BPF_OK,
153	},
154	{
155		.name = "802.1q-ipv4",
156		.pkt.svlan_ipv4 = {
157			.eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
158			.vlan_proto = __bpf_constant_htons(ETH_P_IP),
159			.iph.ihl = 5,
160			.iph.protocol = IPPROTO_TCP,
161			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
162			.tcp.doff = 5,
163			.tcp.source = 80,
164			.tcp.dest = 8080,
165		},
166		.keys = {
167			.nhoff = ETH_HLEN + VLAN_HLEN,
168			.thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
169			.addr_proto = ETH_P_IP,
170			.ip_proto = IPPROTO_TCP,
171			.n_proto = __bpf_constant_htons(ETH_P_IP),
172			.sport = 80,
173			.dport = 8080,
174		},
175		.retval = BPF_OK,
176	},
177	{
178		.name = "802.1ad-ipv6",
179		.pkt.dvlan_ipv6 = {
180			.eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
181			.vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
182			.vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
183			.iph.nexthdr = IPPROTO_TCP,
184			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
185			.tcp.doff = 5,
186			.tcp.source = 80,
187			.tcp.dest = 8080,
188		},
189		.keys = {
190			.nhoff = ETH_HLEN + VLAN_HLEN * 2,
191			.thoff = ETH_HLEN + VLAN_HLEN * 2 +
192				sizeof(struct ipv6hdr),
193			.addr_proto = ETH_P_IPV6,
194			.ip_proto = IPPROTO_TCP,
195			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
196			.sport = 80,
197			.dport = 8080,
198		},
199		.retval = BPF_OK,
200	},
201	{
202		.name = "ipv4-frag",
203		.pkt.ipv4 = {
204			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
205			.iph.ihl = 5,
206			.iph.protocol = IPPROTO_TCP,
207			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
208			.iph.frag_off = __bpf_constant_htons(IP_MF),
209			.tcp.doff = 5,
210			.tcp.source = 80,
211			.tcp.dest = 8080,
212		},
213		.keys = {
214			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
215			.nhoff = ETH_HLEN,
216			.thoff = ETH_HLEN + sizeof(struct iphdr),
217			.addr_proto = ETH_P_IP,
218			.ip_proto = IPPROTO_TCP,
219			.n_proto = __bpf_constant_htons(ETH_P_IP),
220			.is_frag = true,
221			.is_first_frag = true,
222			.sport = 80,
223			.dport = 8080,
224		},
225		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
226		.retval = BPF_OK,
227	},
228	{
229		.name = "ipv4-no-frag",
230		.pkt.ipv4 = {
231			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
232			.iph.ihl = 5,
233			.iph.protocol = IPPROTO_TCP,
234			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
235			.iph.frag_off = __bpf_constant_htons(IP_MF),
236			.tcp.doff = 5,
237			.tcp.source = 80,
238			.tcp.dest = 8080,
239		},
240		.keys = {
241			.nhoff = ETH_HLEN,
242			.thoff = ETH_HLEN + sizeof(struct iphdr),
243			.addr_proto = ETH_P_IP,
244			.ip_proto = IPPROTO_TCP,
245			.n_proto = __bpf_constant_htons(ETH_P_IP),
246			.is_frag = true,
247			.is_first_frag = true,
248		},
249		.retval = BPF_OK,
250	},
251	{
252		.name = "ipv6-frag",
253		.pkt.ipv6_frag = {
254			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
255			.iph.nexthdr = IPPROTO_FRAGMENT,
256			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
257			.ipf.nexthdr = IPPROTO_TCP,
258			.tcp.doff = 5,
259			.tcp.source = 80,
260			.tcp.dest = 8080,
261		},
262		.keys = {
263			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
264			.nhoff = ETH_HLEN,
265			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
266				sizeof(struct frag_hdr),
267			.addr_proto = ETH_P_IPV6,
268			.ip_proto = IPPROTO_TCP,
269			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
270			.is_frag = true,
271			.is_first_frag = true,
272			.sport = 80,
273			.dport = 8080,
274		},
275		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
276		.retval = BPF_OK,
277	},
278	{
279		.name = "ipv6-no-frag",
280		.pkt.ipv6_frag = {
281			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
282			.iph.nexthdr = IPPROTO_FRAGMENT,
283			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
284			.ipf.nexthdr = IPPROTO_TCP,
285			.tcp.doff = 5,
286			.tcp.source = 80,
287			.tcp.dest = 8080,
288		},
289		.keys = {
290			.nhoff = ETH_HLEN,
291			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
292				sizeof(struct frag_hdr),
293			.addr_proto = ETH_P_IPV6,
294			.ip_proto = IPPROTO_TCP,
295			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
296			.is_frag = true,
297			.is_first_frag = true,
298		},
299		.retval = BPF_OK,
300	},
301	{
302		.name = "ipv6-flow-label",
303		.pkt.ipv6 = {
304			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
305			.iph.nexthdr = IPPROTO_TCP,
306			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
307			.iph.flow_lbl = { 0xb, 0xee, 0xef },
308			.tcp.doff = 5,
309			.tcp.source = 80,
310			.tcp.dest = 8080,
311		},
312		.keys = {
313			.nhoff = ETH_HLEN,
314			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
315			.addr_proto = ETH_P_IPV6,
316			.ip_proto = IPPROTO_TCP,
317			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
318			.sport = 80,
319			.dport = 8080,
320			.flow_label = __bpf_constant_htonl(0xbeeef),
321		},
322		.retval = BPF_OK,
323	},
324	{
325		.name = "ipv6-no-flow-label",
326		.pkt.ipv6 = {
327			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
328			.iph.nexthdr = IPPROTO_TCP,
329			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
330			.iph.flow_lbl = { 0xb, 0xee, 0xef },
331			.tcp.doff = 5,
332			.tcp.source = 80,
333			.tcp.dest = 8080,
334		},
335		.keys = {
336			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
337			.nhoff = ETH_HLEN,
338			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
339			.addr_proto = ETH_P_IPV6,
340			.ip_proto = IPPROTO_TCP,
341			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
342			.flow_label = __bpf_constant_htonl(0xbeeef),
343		},
344		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
345		.retval = BPF_OK,
346	},
347	{
348		.name = "ipv6-empty-flow-label",
349		.pkt.ipv6 = {
350			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
351			.iph.nexthdr = IPPROTO_TCP,
352			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
353			.iph.flow_lbl = { 0x00, 0x00, 0x00 },
354			.tcp.doff = 5,
355			.tcp.source = 80,
356			.tcp.dest = 8080,
357		},
358		.keys = {
359			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
360			.nhoff = ETH_HLEN,
361			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
362			.addr_proto = ETH_P_IPV6,
363			.ip_proto = IPPROTO_TCP,
364			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
365			.sport = 80,
366			.dport = 8080,
367		},
368		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
369		.retval = BPF_OK,
370	},
371	{
372		.name = "ipip-encap",
373		.pkt.ipip = {
374			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
375			.iph.ihl = 5,
376			.iph.protocol = IPPROTO_IPIP,
377			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
378			.iph_inner.ihl = 5,
379			.iph_inner.protocol = IPPROTO_TCP,
380			.iph_inner.tot_len =
381				__bpf_constant_htons(MAGIC_BYTES -
382				sizeof(struct iphdr)),
383			.tcp.doff = 5,
384			.tcp.source = 80,
385			.tcp.dest = 8080,
386		},
387		.keys = {
388			.nhoff = ETH_HLEN,
389			.thoff = ETH_HLEN + sizeof(struct iphdr) +
390				sizeof(struct iphdr),
391			.addr_proto = ETH_P_IP,
392			.ip_proto = IPPROTO_TCP,
393			.n_proto = __bpf_constant_htons(ETH_P_IP),
394			.is_encap = true,
395			.sport = 80,
396			.dport = 8080,
397		},
398		.retval = BPF_OK,
399	},
400	{
401		.name = "ipip-no-encap",
402		.pkt.ipip = {
403			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
404			.iph.ihl = 5,
405			.iph.protocol = IPPROTO_IPIP,
406			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
407			.iph_inner.ihl = 5,
408			.iph_inner.protocol = IPPROTO_TCP,
409			.iph_inner.tot_len =
410				__bpf_constant_htons(MAGIC_BYTES -
411				sizeof(struct iphdr)),
412			.tcp.doff = 5,
413			.tcp.source = 80,
414			.tcp.dest = 8080,
415		},
416		.keys = {
417			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
418			.nhoff = ETH_HLEN,
419			.thoff = ETH_HLEN + sizeof(struct iphdr),
420			.addr_proto = ETH_P_IP,
421			.ip_proto = IPPROTO_IPIP,
422			.n_proto = __bpf_constant_htons(ETH_P_IP),
423			.is_encap = true,
424		},
425		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
426		.retval = BPF_OK,
427	},
428	{
429		.name = "ipip-encap-dissector-continue",
430		.pkt.ipip = {
431			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
432			.iph.ihl = 5,
433			.iph.protocol = IPPROTO_IPIP,
434			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
435			.iph.saddr = __bpf_constant_htonl(FLOW_CONTINUE_SADDR),
436			.iph_inner.ihl = 5,
437			.iph_inner.protocol = IPPROTO_TCP,
438			.iph_inner.tot_len =
439				__bpf_constant_htons(MAGIC_BYTES -
440				sizeof(struct iphdr)),
441			.tcp.doff = 5,
442			.tcp.source = 99,
443			.tcp.dest = 9090,
444		},
445		.retval = BPF_FLOW_DISSECTOR_CONTINUE,
446	},
447};
448
449static int create_tap(const char *ifname)
450{
451	struct ifreq ifr = {
452		.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
453	};
454	int fd, ret;
455
456	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
457
458	fd = open("/dev/net/tun", O_RDWR);
459	if (fd < 0)
460		return -1;
461
462	ret = ioctl(fd, TUNSETIFF, &ifr);
463	if (ret)
464		return -1;
465
466	return fd;
467}
468
469static int tx_tap(int fd, void *pkt, size_t len)
470{
471	struct iovec iov[] = {
472		{
473			.iov_len = len,
474			.iov_base = pkt,
475		},
476	};
477	return writev(fd, iov, ARRAY_SIZE(iov));
478}
479
480static int ifup(const char *ifname)
481{
482	struct ifreq ifr = {};
483	int sk, ret;
484
485	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
486
487	sk = socket(PF_INET, SOCK_DGRAM, 0);
488	if (sk < 0)
489		return -1;
490
491	ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
492	if (ret) {
493		close(sk);
494		return -1;
495	}
496
497	ifr.ifr_flags |= IFF_UP;
498	ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
499	if (ret) {
500		close(sk);
501		return -1;
502	}
503
504	close(sk);
505	return 0;
506}
507
508static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
509{
510	int i, err, map_fd, prog_fd;
511	struct bpf_program *prog;
512	char prog_name[32];
513
514	map_fd = bpf_map__fd(prog_array);
515	if (map_fd < 0)
516		return -1;
 
 
 
 
 
 
 
 
 
 
 
517
518	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
519		snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
 
 
 
520
521		prog = bpf_object__find_program_by_name(obj, prog_name);
522		if (!prog)
523			return -1;
524
525		prog_fd = bpf_program__fd(prog);
526		if (prog_fd < 0)
527			return -1;
528
529		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
530		if (err)
531			return -1;
532	}
533	return 0;
534}
535
536static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
537{
538	int i, err, keys_fd;
 
 
539
540	keys_fd = bpf_map__fd(keys);
541	if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
542		return;
 
 
 
 
543
544	for (i = 0; i < ARRAY_SIZE(tests); i++) {
545		/* Keep in sync with 'flags' from eth_get_headlen. */
546		__u32 eth_get_headlen_flags =
547			BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
548		LIBBPF_OPTS(bpf_test_run_opts, topts);
549		struct bpf_flow_keys flow_keys = {};
550		__u32 key = (__u32)(tests[i].keys.sport) << 16 |
551			    tests[i].keys.dport;
552
553		/* For skb-less case we can't pass input flags; run
554		 * only the tests that have a matching set of flags.
555		 */
556
557		if (tests[i].flags != eth_get_headlen_flags)
558			continue;
559
560		err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
561		CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
562
563		/* check the stored flow_keys only if BPF_OK expected */
564		if (tests[i].retval != BPF_OK)
565			continue;
566
567		err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
568		ASSERT_OK(err, "bpf_map_lookup_elem");
569
 
570		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
571
572		err = bpf_map_delete_elem(keys_fd, &key);
573		ASSERT_OK(err, "bpf_map_delete_elem");
574	}
575}
576
577static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
578{
579	int err, prog_fd;
580
581	prog_fd = bpf_program__fd(skel->progs._dissect);
582	if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
583		return;
584
585	err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
586	if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
587		return;
588
589	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
590
591	err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
592	CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
593}
594
595static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
596{
597	struct bpf_link *link;
598	int err, net_fd;
599
600	net_fd = open("/proc/self/ns/net", O_RDONLY);
601	if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
602		return;
603
604	link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
605	if (!ASSERT_OK_PTR(link, "attach_netns"))
606		goto out_close;
607
608	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
609
610	err = bpf_link__destroy(link);
611	CHECK(err, "bpf_link__destroy", "err %d\n", err);
612out_close:
613	close(net_fd);
614}
615
616void test_flow_dissector(void)
617{
618	int i, err, prog_fd, keys_fd = -1, tap_fd;
619	struct bpf_flow *skel;
620
621	skel = bpf_flow__open_and_load();
622	if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
623		return;
624
625	prog_fd = bpf_program__fd(skel->progs._dissect);
626	if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
627		goto out_destroy_skel;
628	keys_fd = bpf_map__fd(skel->maps.last_dissection);
629	if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
630		goto out_destroy_skel;
631	err = init_prog_array(skel->obj, skel->maps.jmp_table);
632	if (CHECK(err, "init_prog_array", "err %d\n", err))
633		goto out_destroy_skel;
634
635	for (i = 0; i < ARRAY_SIZE(tests); i++) {
636		struct bpf_flow_keys flow_keys;
637		LIBBPF_OPTS(bpf_test_run_opts, topts,
638			.data_in = &tests[i].pkt,
639			.data_size_in = sizeof(tests[i].pkt),
640			.data_out = &flow_keys,
641		);
642		static struct bpf_flow_keys ctx = {};
643
644		if (tests[i].flags) {
645			topts.ctx_in = &ctx;
646			topts.ctx_size_in = sizeof(ctx);
647			ctx.flags = tests[i].flags;
648		}
649
650		err = bpf_prog_test_run_opts(prog_fd, &topts);
651		ASSERT_OK(err, "test_run");
652		ASSERT_EQ(topts.retval, tests[i].retval, "test_run retval");
653
654		/* check the resulting flow_keys only if BPF_OK returned */
655		if (topts.retval != BPF_OK)
656			continue;
657		ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
658			  "test_run data_size_out");
659		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
660	}
661
662	/* Do the same tests but for skb-less flow dissector.
663	 * We use a known path in the net/tun driver that calls
664	 * eth_get_headlen and we manually export bpf_flow_keys
665	 * via BPF map in this case.
666	 */
667
668	tap_fd = create_tap("tap0");
669	CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
670	err = ifup("tap0");
671	CHECK(err, "ifup", "err %d errno %d\n", err, errno);
672
673	/* Test direct prog attachment */
674	test_skb_less_prog_attach(skel, tap_fd);
675	/* Test indirect prog attachment via link */
676	test_skb_less_link_create(skel, tap_fd);
677
678	close(tap_fd);
679out_destroy_skel:
680	bpf_flow__destroy(skel);
681}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
 
  2#include <test_progs.h>
  3#include <error.h>
  4#include <linux/if.h>
  5#include <linux/if_tun.h>
  6#include <sys/uio.h>
  7
 
 
 
 
  8#ifndef IP_MF
  9#define IP_MF 0x2000
 10#endif
 11
 12#define CHECK_FLOW_KEYS(desc, got, expected)				\
 13	CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0,		\
 14	      desc,							\
 
 15	      "nhoff=%u/%u "						\
 16	      "thoff=%u/%u "						\
 17	      "addr_proto=0x%x/0x%x "					\
 18	      "is_frag=%u/%u "						\
 19	      "is_first_frag=%u/%u "					\
 20	      "is_encap=%u/%u "						\
 21	      "ip_proto=0x%x/0x%x "					\
 22	      "n_proto=0x%x/0x%x "					\
 23	      "flow_label=0x%x/0x%x "					\
 24	      "sport=%u/%u "						\
 25	      "dport=%u/%u\n",						\
 26	      got.nhoff, expected.nhoff,				\
 27	      got.thoff, expected.thoff,				\
 28	      got.addr_proto, expected.addr_proto,			\
 29	      got.is_frag, expected.is_frag,				\
 30	      got.is_first_frag, expected.is_first_frag,		\
 31	      got.is_encap, expected.is_encap,				\
 32	      got.ip_proto, expected.ip_proto,				\
 33	      got.n_proto, expected.n_proto,				\
 34	      got.flow_label, expected.flow_label,			\
 35	      got.sport, expected.sport,				\
 36	      got.dport, expected.dport)
 37
 38struct ipv4_pkt {
 39	struct ethhdr eth;
 40	struct iphdr iph;
 41	struct tcphdr tcp;
 42} __packed;
 43
 44struct ipip_pkt {
 45	struct ethhdr eth;
 46	struct iphdr iph;
 47	struct iphdr iph_inner;
 48	struct tcphdr tcp;
 49} __packed;
 50
 51struct svlan_ipv4_pkt {
 52	struct ethhdr eth;
 53	__u16 vlan_tci;
 54	__u16 vlan_proto;
 55	struct iphdr iph;
 56	struct tcphdr tcp;
 57} __packed;
 58
 59struct ipv6_pkt {
 60	struct ethhdr eth;
 61	struct ipv6hdr iph;
 62	struct tcphdr tcp;
 63} __packed;
 64
 65struct ipv6_frag_pkt {
 66	struct ethhdr eth;
 67	struct ipv6hdr iph;
 68	struct frag_hdr {
 69		__u8 nexthdr;
 70		__u8 reserved;
 71		__be16 frag_off;
 72		__be32 identification;
 73	} ipf;
 74	struct tcphdr tcp;
 75} __packed;
 76
 77struct dvlan_ipv6_pkt {
 78	struct ethhdr eth;
 79	__u16 vlan_tci;
 80	__u16 vlan_proto;
 81	__u16 vlan_tci2;
 82	__u16 vlan_proto2;
 83	struct ipv6hdr iph;
 84	struct tcphdr tcp;
 85} __packed;
 86
 87struct test {
 88	const char *name;
 89	union {
 90		struct ipv4_pkt ipv4;
 91		struct svlan_ipv4_pkt svlan_ipv4;
 92		struct ipip_pkt ipip;
 93		struct ipv6_pkt ipv6;
 94		struct ipv6_frag_pkt ipv6_frag;
 95		struct dvlan_ipv6_pkt dvlan_ipv6;
 96	} pkt;
 97	struct bpf_flow_keys keys;
 98	__u32 flags;
 
 99};
100
101#define VLAN_HLEN	4
102
 
103struct test tests[] = {
104	{
105		.name = "ipv4",
106		.pkt.ipv4 = {
107			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
108			.iph.ihl = 5,
109			.iph.protocol = IPPROTO_TCP,
110			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
111			.tcp.doff = 5,
112			.tcp.source = 80,
113			.tcp.dest = 8080,
114		},
115		.keys = {
116			.nhoff = ETH_HLEN,
117			.thoff = ETH_HLEN + sizeof(struct iphdr),
118			.addr_proto = ETH_P_IP,
119			.ip_proto = IPPROTO_TCP,
120			.n_proto = __bpf_constant_htons(ETH_P_IP),
121			.sport = 80,
122			.dport = 8080,
123		},
 
124	},
125	{
126		.name = "ipv6",
127		.pkt.ipv6 = {
128			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
129			.iph.nexthdr = IPPROTO_TCP,
130			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
131			.tcp.doff = 5,
132			.tcp.source = 80,
133			.tcp.dest = 8080,
134		},
135		.keys = {
136			.nhoff = ETH_HLEN,
137			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
138			.addr_proto = ETH_P_IPV6,
139			.ip_proto = IPPROTO_TCP,
140			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
141			.sport = 80,
142			.dport = 8080,
143		},
 
144	},
145	{
146		.name = "802.1q-ipv4",
147		.pkt.svlan_ipv4 = {
148			.eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
149			.vlan_proto = __bpf_constant_htons(ETH_P_IP),
150			.iph.ihl = 5,
151			.iph.protocol = IPPROTO_TCP,
152			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
153			.tcp.doff = 5,
154			.tcp.source = 80,
155			.tcp.dest = 8080,
156		},
157		.keys = {
158			.nhoff = ETH_HLEN + VLAN_HLEN,
159			.thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
160			.addr_proto = ETH_P_IP,
161			.ip_proto = IPPROTO_TCP,
162			.n_proto = __bpf_constant_htons(ETH_P_IP),
163			.sport = 80,
164			.dport = 8080,
165		},
 
166	},
167	{
168		.name = "802.1ad-ipv6",
169		.pkt.dvlan_ipv6 = {
170			.eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
171			.vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
172			.vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
173			.iph.nexthdr = IPPROTO_TCP,
174			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
175			.tcp.doff = 5,
176			.tcp.source = 80,
177			.tcp.dest = 8080,
178		},
179		.keys = {
180			.nhoff = ETH_HLEN + VLAN_HLEN * 2,
181			.thoff = ETH_HLEN + VLAN_HLEN * 2 +
182				sizeof(struct ipv6hdr),
183			.addr_proto = ETH_P_IPV6,
184			.ip_proto = IPPROTO_TCP,
185			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
186			.sport = 80,
187			.dport = 8080,
188		},
 
189	},
190	{
191		.name = "ipv4-frag",
192		.pkt.ipv4 = {
193			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
194			.iph.ihl = 5,
195			.iph.protocol = IPPROTO_TCP,
196			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
197			.iph.frag_off = __bpf_constant_htons(IP_MF),
198			.tcp.doff = 5,
199			.tcp.source = 80,
200			.tcp.dest = 8080,
201		},
202		.keys = {
203			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
204			.nhoff = ETH_HLEN,
205			.thoff = ETH_HLEN + sizeof(struct iphdr),
206			.addr_proto = ETH_P_IP,
207			.ip_proto = IPPROTO_TCP,
208			.n_proto = __bpf_constant_htons(ETH_P_IP),
209			.is_frag = true,
210			.is_first_frag = true,
211			.sport = 80,
212			.dport = 8080,
213		},
214		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
 
215	},
216	{
217		.name = "ipv4-no-frag",
218		.pkt.ipv4 = {
219			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
220			.iph.ihl = 5,
221			.iph.protocol = IPPROTO_TCP,
222			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
223			.iph.frag_off = __bpf_constant_htons(IP_MF),
224			.tcp.doff = 5,
225			.tcp.source = 80,
226			.tcp.dest = 8080,
227		},
228		.keys = {
229			.nhoff = ETH_HLEN,
230			.thoff = ETH_HLEN + sizeof(struct iphdr),
231			.addr_proto = ETH_P_IP,
232			.ip_proto = IPPROTO_TCP,
233			.n_proto = __bpf_constant_htons(ETH_P_IP),
234			.is_frag = true,
235			.is_first_frag = true,
236		},
 
237	},
238	{
239		.name = "ipv6-frag",
240		.pkt.ipv6_frag = {
241			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
242			.iph.nexthdr = IPPROTO_FRAGMENT,
243			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
244			.ipf.nexthdr = IPPROTO_TCP,
245			.tcp.doff = 5,
246			.tcp.source = 80,
247			.tcp.dest = 8080,
248		},
249		.keys = {
250			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
251			.nhoff = ETH_HLEN,
252			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
253				sizeof(struct frag_hdr),
254			.addr_proto = ETH_P_IPV6,
255			.ip_proto = IPPROTO_TCP,
256			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
257			.is_frag = true,
258			.is_first_frag = true,
259			.sport = 80,
260			.dport = 8080,
261		},
262		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
 
263	},
264	{
265		.name = "ipv6-no-frag",
266		.pkt.ipv6_frag = {
267			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
268			.iph.nexthdr = IPPROTO_FRAGMENT,
269			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
270			.ipf.nexthdr = IPPROTO_TCP,
271			.tcp.doff = 5,
272			.tcp.source = 80,
273			.tcp.dest = 8080,
274		},
275		.keys = {
276			.nhoff = ETH_HLEN,
277			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
278				sizeof(struct frag_hdr),
279			.addr_proto = ETH_P_IPV6,
280			.ip_proto = IPPROTO_TCP,
281			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
282			.is_frag = true,
283			.is_first_frag = true,
284		},
 
285	},
286	{
287		.name = "ipv6-flow-label",
288		.pkt.ipv6 = {
289			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
290			.iph.nexthdr = IPPROTO_TCP,
291			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
292			.iph.flow_lbl = { 0xb, 0xee, 0xef },
293			.tcp.doff = 5,
294			.tcp.source = 80,
295			.tcp.dest = 8080,
296		},
297		.keys = {
298			.nhoff = ETH_HLEN,
299			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
300			.addr_proto = ETH_P_IPV6,
301			.ip_proto = IPPROTO_TCP,
302			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
303			.sport = 80,
304			.dport = 8080,
305			.flow_label = __bpf_constant_htonl(0xbeeef),
306		},
 
307	},
308	{
309		.name = "ipv6-no-flow-label",
310		.pkt.ipv6 = {
311			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
312			.iph.nexthdr = IPPROTO_TCP,
313			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
314			.iph.flow_lbl = { 0xb, 0xee, 0xef },
315			.tcp.doff = 5,
316			.tcp.source = 80,
317			.tcp.dest = 8080,
318		},
319		.keys = {
320			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
321			.nhoff = ETH_HLEN,
322			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
323			.addr_proto = ETH_P_IPV6,
324			.ip_proto = IPPROTO_TCP,
325			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
326			.flow_label = __bpf_constant_htonl(0xbeeef),
327		},
328		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329	},
330	{
331		.name = "ipip-encap",
332		.pkt.ipip = {
333			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
334			.iph.ihl = 5,
335			.iph.protocol = IPPROTO_IPIP,
336			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
337			.iph_inner.ihl = 5,
338			.iph_inner.protocol = IPPROTO_TCP,
339			.iph_inner.tot_len =
340				__bpf_constant_htons(MAGIC_BYTES) -
341				sizeof(struct iphdr),
342			.tcp.doff = 5,
343			.tcp.source = 80,
344			.tcp.dest = 8080,
345		},
346		.keys = {
347			.nhoff = ETH_HLEN,
348			.thoff = ETH_HLEN + sizeof(struct iphdr) +
349				sizeof(struct iphdr),
350			.addr_proto = ETH_P_IP,
351			.ip_proto = IPPROTO_TCP,
352			.n_proto = __bpf_constant_htons(ETH_P_IP),
353			.is_encap = true,
354			.sport = 80,
355			.dport = 8080,
356		},
 
357	},
358	{
359		.name = "ipip-no-encap",
360		.pkt.ipip = {
361			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
362			.iph.ihl = 5,
363			.iph.protocol = IPPROTO_IPIP,
364			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
365			.iph_inner.ihl = 5,
366			.iph_inner.protocol = IPPROTO_TCP,
367			.iph_inner.tot_len =
368				__bpf_constant_htons(MAGIC_BYTES) -
369				sizeof(struct iphdr),
370			.tcp.doff = 5,
371			.tcp.source = 80,
372			.tcp.dest = 8080,
373		},
374		.keys = {
375			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
376			.nhoff = ETH_HLEN,
377			.thoff = ETH_HLEN + sizeof(struct iphdr),
378			.addr_proto = ETH_P_IP,
379			.ip_proto = IPPROTO_IPIP,
380			.n_proto = __bpf_constant_htons(ETH_P_IP),
381			.is_encap = true,
382		},
383		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384	},
385};
386
387static int create_tap(const char *ifname)
388{
389	struct ifreq ifr = {
390		.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
391	};
392	int fd, ret;
393
394	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
395
396	fd = open("/dev/net/tun", O_RDWR);
397	if (fd < 0)
398		return -1;
399
400	ret = ioctl(fd, TUNSETIFF, &ifr);
401	if (ret)
402		return -1;
403
404	return fd;
405}
406
407static int tx_tap(int fd, void *pkt, size_t len)
408{
409	struct iovec iov[] = {
410		{
411			.iov_len = len,
412			.iov_base = pkt,
413		},
414	};
415	return writev(fd, iov, ARRAY_SIZE(iov));
416}
417
418static int ifup(const char *ifname)
419{
420	struct ifreq ifr = {};
421	int sk, ret;
422
423	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
424
425	sk = socket(PF_INET, SOCK_DGRAM, 0);
426	if (sk < 0)
427		return -1;
428
429	ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
430	if (ret) {
431		close(sk);
432		return -1;
433	}
434
435	ifr.ifr_flags |= IFF_UP;
436	ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
437	if (ret) {
438		close(sk);
439		return -1;
440	}
441
442	close(sk);
443	return 0;
444}
445
446void test_flow_dissector(void)
447{
448	int i, err, prog_fd, keys_fd = -1, tap_fd;
449	struct bpf_object *obj;
450	__u32 duration = 0;
451
452	err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
453			    "jmp_table", "last_dissection", &prog_fd, &keys_fd);
454	if (CHECK_FAIL(err))
455		return;
456
457	for (i = 0; i < ARRAY_SIZE(tests); i++) {
458		struct bpf_flow_keys flow_keys;
459		struct bpf_prog_test_run_attr tattr = {
460			.prog_fd = prog_fd,
461			.data_in = &tests[i].pkt,
462			.data_size_in = sizeof(tests[i].pkt),
463			.data_out = &flow_keys,
464		};
465		static struct bpf_flow_keys ctx = {};
466
467		if (tests[i].flags) {
468			tattr.ctx_in = &ctx;
469			tattr.ctx_size_in = sizeof(ctx);
470			ctx.flags = tests[i].flags;
471		}
472
473		err = bpf_prog_test_run_xattr(&tattr);
474		CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
475			   err || tattr.retval != 1,
476			   tests[i].name,
477			   "err %d errno %d retval %d duration %d size %u/%lu\n",
478			   err, errno, tattr.retval, tattr.duration,
479			   tattr.data_size_out, sizeof(flow_keys));
480		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
 
 
 
481	}
 
 
482
483	/* Do the same tests but for skb-less flow dissector.
484	 * We use a known path in the net/tun driver that calls
485	 * eth_get_headlen and we manually export bpf_flow_keys
486	 * via BPF map in this case.
487	 */
488
489	err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
490	CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno);
491
492	tap_fd = create_tap("tap0");
493	CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
494	err = ifup("tap0");
495	CHECK(err, "ifup", "err %d errno %d\n", err, errno);
496
497	for (i = 0; i < ARRAY_SIZE(tests); i++) {
498		/* Keep in sync with 'flags' from eth_get_headlen. */
499		__u32 eth_get_headlen_flags =
500			BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
501		struct bpf_prog_test_run_attr tattr = {};
502		struct bpf_flow_keys flow_keys = {};
503		__u32 key = (__u32)(tests[i].keys.sport) << 16 |
504			    tests[i].keys.dport;
505
506		/* For skb-less case we can't pass input flags; run
507		 * only the tests that have a matching set of flags.
508		 */
509
510		if (tests[i].flags != eth_get_headlen_flags)
511			continue;
512
513		err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
514		CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
515
 
 
 
 
516		err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
517		CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
518
519		CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
520		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
521
522		err = bpf_map_delete_elem(keys_fd, &key);
523		CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
524	}
525
526	bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
527	bpf_object__close(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
528}