Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <test_progs.h>
  3#include <network_helpers.h>
  4#include <error.h>
  5#include <linux/if.h>
  6#include <linux/if_tun.h>
  7#include <sys/uio.h>
  8
  9#include "bpf_flow.skel.h"
 10
 11#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
 12
 13#ifndef IP_MF
 14#define IP_MF 0x2000
 15#endif
 16
 17#define CHECK_FLOW_KEYS(desc, got, expected)				\
 18	_CHECK(memcmp(&got, &expected, sizeof(got)) != 0,		\
 19	      desc,							\
 20	      topts.duration,						\
 21	      "nhoff=%u/%u "						\
 22	      "thoff=%u/%u "						\
 23	      "addr_proto=0x%x/0x%x "					\
 24	      "is_frag=%u/%u "						\
 25	      "is_first_frag=%u/%u "					\
 26	      "is_encap=%u/%u "						\
 27	      "ip_proto=0x%x/0x%x "					\
 28	      "n_proto=0x%x/0x%x "					\
 29	      "flow_label=0x%x/0x%x "					\
 30	      "sport=%u/%u "						\
 31	      "dport=%u/%u\n",						\
 32	      got.nhoff, expected.nhoff,				\
 33	      got.thoff, expected.thoff,				\
 34	      got.addr_proto, expected.addr_proto,			\
 35	      got.is_frag, expected.is_frag,				\
 36	      got.is_first_frag, expected.is_first_frag,		\
 37	      got.is_encap, expected.is_encap,				\
 38	      got.ip_proto, expected.ip_proto,				\
 39	      got.n_proto, expected.n_proto,				\
 40	      got.flow_label, expected.flow_label,			\
 41	      got.sport, expected.sport,				\
 42	      got.dport, expected.dport)
 43
 44struct ipv4_pkt {
 45	struct ethhdr eth;
 46	struct iphdr iph;
 47	struct tcphdr tcp;
 48} __packed;
 49
 50struct ipip_pkt {
 51	struct ethhdr eth;
 52	struct iphdr iph;
 53	struct iphdr iph_inner;
 54	struct tcphdr tcp;
 55} __packed;
 56
 57struct svlan_ipv4_pkt {
 58	struct ethhdr eth;
 59	__u16 vlan_tci;
 60	__u16 vlan_proto;
 61	struct iphdr iph;
 62	struct tcphdr tcp;
 63} __packed;
 64
 65struct ipv6_pkt {
 66	struct ethhdr eth;
 67	struct ipv6hdr iph;
 68	struct tcphdr tcp;
 69} __packed;
 70
 71struct ipv6_frag_pkt {
 72	struct ethhdr eth;
 73	struct ipv6hdr iph;
 74	struct frag_hdr {
 75		__u8 nexthdr;
 76		__u8 reserved;
 77		__be16 frag_off;
 78		__be32 identification;
 79	} ipf;
 80	struct tcphdr tcp;
 81} __packed;
 82
 83struct dvlan_ipv6_pkt {
 84	struct ethhdr eth;
 85	__u16 vlan_tci;
 86	__u16 vlan_proto;
 87	__u16 vlan_tci2;
 88	__u16 vlan_proto2;
 89	struct ipv6hdr iph;
 90	struct tcphdr tcp;
 91} __packed;
 92
 93struct test {
 94	const char *name;
 95	union {
 96		struct ipv4_pkt ipv4;
 97		struct svlan_ipv4_pkt svlan_ipv4;
 98		struct ipip_pkt ipip;
 99		struct ipv6_pkt ipv6;
100		struct ipv6_frag_pkt ipv6_frag;
101		struct dvlan_ipv6_pkt dvlan_ipv6;
102	} pkt;
103	struct bpf_flow_keys keys;
104	__u32 flags;
105	__u32 retval;
106};
107
108#define VLAN_HLEN	4
109
110static __u32 duration;
111struct test tests[] = {
112	{
113		.name = "ipv4",
114		.pkt.ipv4 = {
115			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
116			.iph.ihl = 5,
117			.iph.protocol = IPPROTO_TCP,
118			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
119			.tcp.doff = 5,
120			.tcp.source = 80,
121			.tcp.dest = 8080,
122		},
123		.keys = {
124			.nhoff = ETH_HLEN,
125			.thoff = ETH_HLEN + sizeof(struct iphdr),
126			.addr_proto = ETH_P_IP,
127			.ip_proto = IPPROTO_TCP,
128			.n_proto = __bpf_constant_htons(ETH_P_IP),
129			.sport = 80,
130			.dport = 8080,
131		},
132		.retval = BPF_OK,
133	},
134	{
135		.name = "ipv6",
136		.pkt.ipv6 = {
137			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
138			.iph.nexthdr = IPPROTO_TCP,
139			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
140			.tcp.doff = 5,
141			.tcp.source = 80,
142			.tcp.dest = 8080,
143		},
144		.keys = {
145			.nhoff = ETH_HLEN,
146			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
147			.addr_proto = ETH_P_IPV6,
148			.ip_proto = IPPROTO_TCP,
149			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
150			.sport = 80,
151			.dport = 8080,
152		},
153		.retval = BPF_OK,
154	},
155	{
156		.name = "802.1q-ipv4",
157		.pkt.svlan_ipv4 = {
158			.eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
159			.vlan_proto = __bpf_constant_htons(ETH_P_IP),
160			.iph.ihl = 5,
161			.iph.protocol = IPPROTO_TCP,
162			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
163			.tcp.doff = 5,
164			.tcp.source = 80,
165			.tcp.dest = 8080,
166		},
167		.keys = {
168			.nhoff = ETH_HLEN + VLAN_HLEN,
169			.thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
170			.addr_proto = ETH_P_IP,
171			.ip_proto = IPPROTO_TCP,
172			.n_proto = __bpf_constant_htons(ETH_P_IP),
173			.sport = 80,
174			.dport = 8080,
175		},
176		.retval = BPF_OK,
177	},
178	{
179		.name = "802.1ad-ipv6",
180		.pkt.dvlan_ipv6 = {
181			.eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
182			.vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
183			.vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
184			.iph.nexthdr = IPPROTO_TCP,
185			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
186			.tcp.doff = 5,
187			.tcp.source = 80,
188			.tcp.dest = 8080,
189		},
190		.keys = {
191			.nhoff = ETH_HLEN + VLAN_HLEN * 2,
192			.thoff = ETH_HLEN + VLAN_HLEN * 2 +
193				sizeof(struct ipv6hdr),
194			.addr_proto = ETH_P_IPV6,
195			.ip_proto = IPPROTO_TCP,
196			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
197			.sport = 80,
198			.dport = 8080,
199		},
200		.retval = BPF_OK,
201	},
202	{
203		.name = "ipv4-frag",
204		.pkt.ipv4 = {
205			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
206			.iph.ihl = 5,
207			.iph.protocol = IPPROTO_TCP,
208			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
209			.iph.frag_off = __bpf_constant_htons(IP_MF),
210			.tcp.doff = 5,
211			.tcp.source = 80,
212			.tcp.dest = 8080,
213		},
214		.keys = {
215			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
216			.nhoff = ETH_HLEN,
217			.thoff = ETH_HLEN + sizeof(struct iphdr),
218			.addr_proto = ETH_P_IP,
219			.ip_proto = IPPROTO_TCP,
220			.n_proto = __bpf_constant_htons(ETH_P_IP),
221			.is_frag = true,
222			.is_first_frag = true,
223			.sport = 80,
224			.dport = 8080,
225		},
226		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
227		.retval = BPF_OK,
228	},
229	{
230		.name = "ipv4-no-frag",
231		.pkt.ipv4 = {
232			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
233			.iph.ihl = 5,
234			.iph.protocol = IPPROTO_TCP,
235			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
236			.iph.frag_off = __bpf_constant_htons(IP_MF),
237			.tcp.doff = 5,
238			.tcp.source = 80,
239			.tcp.dest = 8080,
240		},
241		.keys = {
242			.nhoff = ETH_HLEN,
243			.thoff = ETH_HLEN + sizeof(struct iphdr),
244			.addr_proto = ETH_P_IP,
245			.ip_proto = IPPROTO_TCP,
246			.n_proto = __bpf_constant_htons(ETH_P_IP),
247			.is_frag = true,
248			.is_first_frag = true,
249		},
250		.retval = BPF_OK,
251	},
252	{
253		.name = "ipv6-frag",
254		.pkt.ipv6_frag = {
255			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
256			.iph.nexthdr = IPPROTO_FRAGMENT,
257			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
258			.ipf.nexthdr = IPPROTO_TCP,
259			.tcp.doff = 5,
260			.tcp.source = 80,
261			.tcp.dest = 8080,
262		},
263		.keys = {
264			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
265			.nhoff = ETH_HLEN,
266			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
267				sizeof(struct frag_hdr),
268			.addr_proto = ETH_P_IPV6,
269			.ip_proto = IPPROTO_TCP,
270			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
271			.is_frag = true,
272			.is_first_frag = true,
273			.sport = 80,
274			.dport = 8080,
275		},
276		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
277		.retval = BPF_OK,
278	},
279	{
280		.name = "ipv6-no-frag",
281		.pkt.ipv6_frag = {
282			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
283			.iph.nexthdr = IPPROTO_FRAGMENT,
284			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
285			.ipf.nexthdr = IPPROTO_TCP,
286			.tcp.doff = 5,
287			.tcp.source = 80,
288			.tcp.dest = 8080,
289		},
290		.keys = {
291			.nhoff = ETH_HLEN,
292			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
293				sizeof(struct frag_hdr),
294			.addr_proto = ETH_P_IPV6,
295			.ip_proto = IPPROTO_TCP,
296			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
297			.is_frag = true,
298			.is_first_frag = true,
299		},
300		.retval = BPF_OK,
301	},
302	{
303		.name = "ipv6-flow-label",
304		.pkt.ipv6 = {
305			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
306			.iph.nexthdr = IPPROTO_TCP,
307			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
308			.iph.flow_lbl = { 0xb, 0xee, 0xef },
309			.tcp.doff = 5,
310			.tcp.source = 80,
311			.tcp.dest = 8080,
312		},
313		.keys = {
314			.nhoff = ETH_HLEN,
315			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
316			.addr_proto = ETH_P_IPV6,
317			.ip_proto = IPPROTO_TCP,
318			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
319			.sport = 80,
320			.dport = 8080,
321			.flow_label = __bpf_constant_htonl(0xbeeef),
322		},
323		.retval = BPF_OK,
324	},
325	{
326		.name = "ipv6-no-flow-label",
327		.pkt.ipv6 = {
328			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
329			.iph.nexthdr = IPPROTO_TCP,
330			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
331			.iph.flow_lbl = { 0xb, 0xee, 0xef },
332			.tcp.doff = 5,
333			.tcp.source = 80,
334			.tcp.dest = 8080,
335		},
336		.keys = {
337			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
338			.nhoff = ETH_HLEN,
339			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
340			.addr_proto = ETH_P_IPV6,
341			.ip_proto = IPPROTO_TCP,
342			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
343			.flow_label = __bpf_constant_htonl(0xbeeef),
344		},
345		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
346		.retval = BPF_OK,
347	},
348	{
349		.name = "ipv6-empty-flow-label",
350		.pkt.ipv6 = {
351			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
352			.iph.nexthdr = IPPROTO_TCP,
353			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
354			.iph.flow_lbl = { 0x00, 0x00, 0x00 },
355			.tcp.doff = 5,
356			.tcp.source = 80,
357			.tcp.dest = 8080,
358		},
359		.keys = {
360			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
361			.nhoff = ETH_HLEN,
362			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
363			.addr_proto = ETH_P_IPV6,
364			.ip_proto = IPPROTO_TCP,
365			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
366			.sport = 80,
367			.dport = 8080,
368		},
369		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
370		.retval = BPF_OK,
371	},
372	{
373		.name = "ipip-encap",
374		.pkt.ipip = {
375			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
376			.iph.ihl = 5,
377			.iph.protocol = IPPROTO_IPIP,
378			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
379			.iph_inner.ihl = 5,
380			.iph_inner.protocol = IPPROTO_TCP,
381			.iph_inner.tot_len =
382				__bpf_constant_htons(MAGIC_BYTES) -
383				sizeof(struct iphdr),
384			.tcp.doff = 5,
385			.tcp.source = 80,
386			.tcp.dest = 8080,
387		},
388		.keys = {
389			.nhoff = ETH_HLEN,
390			.thoff = ETH_HLEN + sizeof(struct iphdr) +
391				sizeof(struct iphdr),
392			.addr_proto = ETH_P_IP,
393			.ip_proto = IPPROTO_TCP,
394			.n_proto = __bpf_constant_htons(ETH_P_IP),
395			.is_encap = true,
396			.sport = 80,
397			.dport = 8080,
398		},
399		.retval = BPF_OK,
400	},
401	{
402		.name = "ipip-no-encap",
403		.pkt.ipip = {
404			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
405			.iph.ihl = 5,
406			.iph.protocol = IPPROTO_IPIP,
407			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
408			.iph_inner.ihl = 5,
409			.iph_inner.protocol = IPPROTO_TCP,
410			.iph_inner.tot_len =
411				__bpf_constant_htons(MAGIC_BYTES) -
412				sizeof(struct iphdr),
413			.tcp.doff = 5,
414			.tcp.source = 80,
415			.tcp.dest = 8080,
416		},
417		.keys = {
418			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
419			.nhoff = ETH_HLEN,
420			.thoff = ETH_HLEN + sizeof(struct iphdr),
421			.addr_proto = ETH_P_IP,
422			.ip_proto = IPPROTO_IPIP,
423			.n_proto = __bpf_constant_htons(ETH_P_IP),
424			.is_encap = true,
425		},
426		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
427		.retval = BPF_OK,
428	},
429	{
430		.name = "ipip-encap-dissector-continue",
431		.pkt.ipip = {
432			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
433			.iph.ihl = 5,
434			.iph.protocol = IPPROTO_IPIP,
435			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
436			.iph.saddr = __bpf_constant_htonl(FLOW_CONTINUE_SADDR),
437			.iph_inner.ihl = 5,
438			.iph_inner.protocol = IPPROTO_TCP,
439			.iph_inner.tot_len =
440				__bpf_constant_htons(MAGIC_BYTES) -
441				sizeof(struct iphdr),
442			.tcp.doff = 5,
443			.tcp.source = 99,
444			.tcp.dest = 9090,
445		},
446		.retval = BPF_FLOW_DISSECTOR_CONTINUE,
447	},
448};
449
450static int create_tap(const char *ifname)
451{
452	struct ifreq ifr = {
453		.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
454	};
455	int fd, ret;
456
457	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
458
459	fd = open("/dev/net/tun", O_RDWR);
460	if (fd < 0)
461		return -1;
462
463	ret = ioctl(fd, TUNSETIFF, &ifr);
464	if (ret)
465		return -1;
466
467	return fd;
468}
469
470static int tx_tap(int fd, void *pkt, size_t len)
471{
472	struct iovec iov[] = {
473		{
474			.iov_len = len,
475			.iov_base = pkt,
476		},
477	};
478	return writev(fd, iov, ARRAY_SIZE(iov));
479}
480
481static int ifup(const char *ifname)
482{
483	struct ifreq ifr = {};
484	int sk, ret;
485
486	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
487
488	sk = socket(PF_INET, SOCK_DGRAM, 0);
489	if (sk < 0)
490		return -1;
491
492	ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
493	if (ret) {
494		close(sk);
495		return -1;
496	}
497
498	ifr.ifr_flags |= IFF_UP;
499	ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
500	if (ret) {
501		close(sk);
502		return -1;
503	}
504
505	close(sk);
506	return 0;
507}
508
509static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
510{
511	int i, err, map_fd, prog_fd;
512	struct bpf_program *prog;
513	char prog_name[32];
514
515	map_fd = bpf_map__fd(prog_array);
516	if (map_fd < 0)
517		return -1;
518
519	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
520		snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
521
522		prog = bpf_object__find_program_by_name(obj, prog_name);
523		if (!prog)
524			return -1;
525
526		prog_fd = bpf_program__fd(prog);
527		if (prog_fd < 0)
528			return -1;
529
530		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
531		if (err)
532			return -1;
533	}
534	return 0;
535}
536
537static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
538{
539	int i, err, keys_fd;
540
541	keys_fd = bpf_map__fd(keys);
542	if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
543		return;
544
545	for (i = 0; i < ARRAY_SIZE(tests); i++) {
546		/* Keep in sync with 'flags' from eth_get_headlen. */
547		__u32 eth_get_headlen_flags =
548			BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
549		LIBBPF_OPTS(bpf_test_run_opts, topts);
550		struct bpf_flow_keys flow_keys = {};
551		__u32 key = (__u32)(tests[i].keys.sport) << 16 |
552			    tests[i].keys.dport;
553
554		/* For skb-less case we can't pass input flags; run
555		 * only the tests that have a matching set of flags.
556		 */
557
558		if (tests[i].flags != eth_get_headlen_flags)
559			continue;
560
561		err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
562		CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
563
564		/* check the stored flow_keys only if BPF_OK expected */
565		if (tests[i].retval != BPF_OK)
566			continue;
567
568		err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
569		ASSERT_OK(err, "bpf_map_lookup_elem");
570
 
571		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
572
573		err = bpf_map_delete_elem(keys_fd, &key);
574		ASSERT_OK(err, "bpf_map_delete_elem");
575	}
576}
577
578static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
579{
580	int err, prog_fd;
581
582	prog_fd = bpf_program__fd(skel->progs._dissect);
583	if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
584		return;
585
586	err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
587	if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
588		return;
589
590	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
591
592	err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
593	CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
594}
595
596static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
597{
598	struct bpf_link *link;
599	int err, net_fd;
600
601	net_fd = open("/proc/self/ns/net", O_RDONLY);
602	if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
603		return;
604
605	link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
606	if (!ASSERT_OK_PTR(link, "attach_netns"))
607		goto out_close;
608
609	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
610
611	err = bpf_link__destroy(link);
612	CHECK(err, "bpf_link__destroy", "err %d\n", err);
613out_close:
614	close(net_fd);
615}
616
617void test_flow_dissector(void)
618{
619	int i, err, prog_fd, keys_fd = -1, tap_fd;
620	struct bpf_flow *skel;
621
622	skel = bpf_flow__open_and_load();
623	if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
624		return;
625
626	prog_fd = bpf_program__fd(skel->progs._dissect);
627	if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
628		goto out_destroy_skel;
629	keys_fd = bpf_map__fd(skel->maps.last_dissection);
630	if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
631		goto out_destroy_skel;
632	err = init_prog_array(skel->obj, skel->maps.jmp_table);
633	if (CHECK(err, "init_prog_array", "err %d\n", err))
634		goto out_destroy_skel;
635
636	for (i = 0; i < ARRAY_SIZE(tests); i++) {
637		struct bpf_flow_keys flow_keys;
638		LIBBPF_OPTS(bpf_test_run_opts, topts,
 
639			.data_in = &tests[i].pkt,
640			.data_size_in = sizeof(tests[i].pkt),
641			.data_out = &flow_keys,
642		);
643		static struct bpf_flow_keys ctx = {};
644
645		if (tests[i].flags) {
646			topts.ctx_in = &ctx;
647			topts.ctx_size_in = sizeof(ctx);
648			ctx.flags = tests[i].flags;
649		}
650
651		err = bpf_prog_test_run_opts(prog_fd, &topts);
652		ASSERT_OK(err, "test_run");
653		ASSERT_EQ(topts.retval, tests[i].retval, "test_run retval");
654
655		/* check the resulting flow_keys only if BPF_OK returned */
656		if (topts.retval != BPF_OK)
657			continue;
658		ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
659			  "test_run data_size_out");
660		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
661	}
662
663	/* Do the same tests but for skb-less flow dissector.
664	 * We use a known path in the net/tun driver that calls
665	 * eth_get_headlen and we manually export bpf_flow_keys
666	 * via BPF map in this case.
667	 */
668
669	tap_fd = create_tap("tap0");
670	CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
671	err = ifup("tap0");
672	CHECK(err, "ifup", "err %d errno %d\n", err, errno);
673
674	/* Test direct prog attachment */
675	test_skb_less_prog_attach(skel, tap_fd);
676	/* Test indirect prog attachment via link */
677	test_skb_less_link_create(skel, tap_fd);
678
679	close(tap_fd);
680out_destroy_skel:
681	bpf_flow__destroy(skel);
682}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include <test_progs.h>
  3#include <network_helpers.h>
  4#include <error.h>
  5#include <linux/if.h>
  6#include <linux/if_tun.h>
  7#include <sys/uio.h>
  8
  9#include "bpf_flow.skel.h"
 10
 
 
 11#ifndef IP_MF
 12#define IP_MF 0x2000
 13#endif
 14
 15#define CHECK_FLOW_KEYS(desc, got, expected)				\
 16	CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0,		\
 17	      desc,							\
 
 18	      "nhoff=%u/%u "						\
 19	      "thoff=%u/%u "						\
 20	      "addr_proto=0x%x/0x%x "					\
 21	      "is_frag=%u/%u "						\
 22	      "is_first_frag=%u/%u "					\
 23	      "is_encap=%u/%u "						\
 24	      "ip_proto=0x%x/0x%x "					\
 25	      "n_proto=0x%x/0x%x "					\
 26	      "flow_label=0x%x/0x%x "					\
 27	      "sport=%u/%u "						\
 28	      "dport=%u/%u\n",						\
 29	      got.nhoff, expected.nhoff,				\
 30	      got.thoff, expected.thoff,				\
 31	      got.addr_proto, expected.addr_proto,			\
 32	      got.is_frag, expected.is_frag,				\
 33	      got.is_first_frag, expected.is_first_frag,		\
 34	      got.is_encap, expected.is_encap,				\
 35	      got.ip_proto, expected.ip_proto,				\
 36	      got.n_proto, expected.n_proto,				\
 37	      got.flow_label, expected.flow_label,			\
 38	      got.sport, expected.sport,				\
 39	      got.dport, expected.dport)
 40
 41struct ipv4_pkt {
 42	struct ethhdr eth;
 43	struct iphdr iph;
 44	struct tcphdr tcp;
 45} __packed;
 46
 47struct ipip_pkt {
 48	struct ethhdr eth;
 49	struct iphdr iph;
 50	struct iphdr iph_inner;
 51	struct tcphdr tcp;
 52} __packed;
 53
 54struct svlan_ipv4_pkt {
 55	struct ethhdr eth;
 56	__u16 vlan_tci;
 57	__u16 vlan_proto;
 58	struct iphdr iph;
 59	struct tcphdr tcp;
 60} __packed;
 61
 62struct ipv6_pkt {
 63	struct ethhdr eth;
 64	struct ipv6hdr iph;
 65	struct tcphdr tcp;
 66} __packed;
 67
 68struct ipv6_frag_pkt {
 69	struct ethhdr eth;
 70	struct ipv6hdr iph;
 71	struct frag_hdr {
 72		__u8 nexthdr;
 73		__u8 reserved;
 74		__be16 frag_off;
 75		__be32 identification;
 76	} ipf;
 77	struct tcphdr tcp;
 78} __packed;
 79
 80struct dvlan_ipv6_pkt {
 81	struct ethhdr eth;
 82	__u16 vlan_tci;
 83	__u16 vlan_proto;
 84	__u16 vlan_tci2;
 85	__u16 vlan_proto2;
 86	struct ipv6hdr iph;
 87	struct tcphdr tcp;
 88} __packed;
 89
 90struct test {
 91	const char *name;
 92	union {
 93		struct ipv4_pkt ipv4;
 94		struct svlan_ipv4_pkt svlan_ipv4;
 95		struct ipip_pkt ipip;
 96		struct ipv6_pkt ipv6;
 97		struct ipv6_frag_pkt ipv6_frag;
 98		struct dvlan_ipv6_pkt dvlan_ipv6;
 99	} pkt;
100	struct bpf_flow_keys keys;
101	__u32 flags;
 
102};
103
104#define VLAN_HLEN	4
105
106static __u32 duration;
107struct test tests[] = {
108	{
109		.name = "ipv4",
110		.pkt.ipv4 = {
111			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
112			.iph.ihl = 5,
113			.iph.protocol = IPPROTO_TCP,
114			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
115			.tcp.doff = 5,
116			.tcp.source = 80,
117			.tcp.dest = 8080,
118		},
119		.keys = {
120			.nhoff = ETH_HLEN,
121			.thoff = ETH_HLEN + sizeof(struct iphdr),
122			.addr_proto = ETH_P_IP,
123			.ip_proto = IPPROTO_TCP,
124			.n_proto = __bpf_constant_htons(ETH_P_IP),
125			.sport = 80,
126			.dport = 8080,
127		},
 
128	},
129	{
130		.name = "ipv6",
131		.pkt.ipv6 = {
132			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
133			.iph.nexthdr = IPPROTO_TCP,
134			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
135			.tcp.doff = 5,
136			.tcp.source = 80,
137			.tcp.dest = 8080,
138		},
139		.keys = {
140			.nhoff = ETH_HLEN,
141			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
142			.addr_proto = ETH_P_IPV6,
143			.ip_proto = IPPROTO_TCP,
144			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
145			.sport = 80,
146			.dport = 8080,
147		},
 
148	},
149	{
150		.name = "802.1q-ipv4",
151		.pkt.svlan_ipv4 = {
152			.eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
153			.vlan_proto = __bpf_constant_htons(ETH_P_IP),
154			.iph.ihl = 5,
155			.iph.protocol = IPPROTO_TCP,
156			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
157			.tcp.doff = 5,
158			.tcp.source = 80,
159			.tcp.dest = 8080,
160		},
161		.keys = {
162			.nhoff = ETH_HLEN + VLAN_HLEN,
163			.thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
164			.addr_proto = ETH_P_IP,
165			.ip_proto = IPPROTO_TCP,
166			.n_proto = __bpf_constant_htons(ETH_P_IP),
167			.sport = 80,
168			.dport = 8080,
169		},
 
170	},
171	{
172		.name = "802.1ad-ipv6",
173		.pkt.dvlan_ipv6 = {
174			.eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
175			.vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
176			.vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
177			.iph.nexthdr = IPPROTO_TCP,
178			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
179			.tcp.doff = 5,
180			.tcp.source = 80,
181			.tcp.dest = 8080,
182		},
183		.keys = {
184			.nhoff = ETH_HLEN + VLAN_HLEN * 2,
185			.thoff = ETH_HLEN + VLAN_HLEN * 2 +
186				sizeof(struct ipv6hdr),
187			.addr_proto = ETH_P_IPV6,
188			.ip_proto = IPPROTO_TCP,
189			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
190			.sport = 80,
191			.dport = 8080,
192		},
 
193	},
194	{
195		.name = "ipv4-frag",
196		.pkt.ipv4 = {
197			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
198			.iph.ihl = 5,
199			.iph.protocol = IPPROTO_TCP,
200			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
201			.iph.frag_off = __bpf_constant_htons(IP_MF),
202			.tcp.doff = 5,
203			.tcp.source = 80,
204			.tcp.dest = 8080,
205		},
206		.keys = {
207			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
208			.nhoff = ETH_HLEN,
209			.thoff = ETH_HLEN + sizeof(struct iphdr),
210			.addr_proto = ETH_P_IP,
211			.ip_proto = IPPROTO_TCP,
212			.n_proto = __bpf_constant_htons(ETH_P_IP),
213			.is_frag = true,
214			.is_first_frag = true,
215			.sport = 80,
216			.dport = 8080,
217		},
218		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
 
219	},
220	{
221		.name = "ipv4-no-frag",
222		.pkt.ipv4 = {
223			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
224			.iph.ihl = 5,
225			.iph.protocol = IPPROTO_TCP,
226			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
227			.iph.frag_off = __bpf_constant_htons(IP_MF),
228			.tcp.doff = 5,
229			.tcp.source = 80,
230			.tcp.dest = 8080,
231		},
232		.keys = {
233			.nhoff = ETH_HLEN,
234			.thoff = ETH_HLEN + sizeof(struct iphdr),
235			.addr_proto = ETH_P_IP,
236			.ip_proto = IPPROTO_TCP,
237			.n_proto = __bpf_constant_htons(ETH_P_IP),
238			.is_frag = true,
239			.is_first_frag = true,
240		},
 
241	},
242	{
243		.name = "ipv6-frag",
244		.pkt.ipv6_frag = {
245			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
246			.iph.nexthdr = IPPROTO_FRAGMENT,
247			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
248			.ipf.nexthdr = IPPROTO_TCP,
249			.tcp.doff = 5,
250			.tcp.source = 80,
251			.tcp.dest = 8080,
252		},
253		.keys = {
254			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
255			.nhoff = ETH_HLEN,
256			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
257				sizeof(struct frag_hdr),
258			.addr_proto = ETH_P_IPV6,
259			.ip_proto = IPPROTO_TCP,
260			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
261			.is_frag = true,
262			.is_first_frag = true,
263			.sport = 80,
264			.dport = 8080,
265		},
266		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
 
267	},
268	{
269		.name = "ipv6-no-frag",
270		.pkt.ipv6_frag = {
271			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
272			.iph.nexthdr = IPPROTO_FRAGMENT,
273			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
274			.ipf.nexthdr = IPPROTO_TCP,
275			.tcp.doff = 5,
276			.tcp.source = 80,
277			.tcp.dest = 8080,
278		},
279		.keys = {
280			.nhoff = ETH_HLEN,
281			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
282				sizeof(struct frag_hdr),
283			.addr_proto = ETH_P_IPV6,
284			.ip_proto = IPPROTO_TCP,
285			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
286			.is_frag = true,
287			.is_first_frag = true,
288		},
 
289	},
290	{
291		.name = "ipv6-flow-label",
292		.pkt.ipv6 = {
293			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
294			.iph.nexthdr = IPPROTO_TCP,
295			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
296			.iph.flow_lbl = { 0xb, 0xee, 0xef },
297			.tcp.doff = 5,
298			.tcp.source = 80,
299			.tcp.dest = 8080,
300		},
301		.keys = {
302			.nhoff = ETH_HLEN,
303			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
304			.addr_proto = ETH_P_IPV6,
305			.ip_proto = IPPROTO_TCP,
306			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
307			.sport = 80,
308			.dport = 8080,
309			.flow_label = __bpf_constant_htonl(0xbeeef),
310		},
 
311	},
312	{
313		.name = "ipv6-no-flow-label",
314		.pkt.ipv6 = {
315			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
316			.iph.nexthdr = IPPROTO_TCP,
317			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
318			.iph.flow_lbl = { 0xb, 0xee, 0xef },
319			.tcp.doff = 5,
320			.tcp.source = 80,
321			.tcp.dest = 8080,
322		},
323		.keys = {
324			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
325			.nhoff = ETH_HLEN,
326			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
327			.addr_proto = ETH_P_IPV6,
328			.ip_proto = IPPROTO_TCP,
329			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
330			.flow_label = __bpf_constant_htonl(0xbeeef),
331		},
332		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333	},
334	{
335		.name = "ipip-encap",
336		.pkt.ipip = {
337			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
338			.iph.ihl = 5,
339			.iph.protocol = IPPROTO_IPIP,
340			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
341			.iph_inner.ihl = 5,
342			.iph_inner.protocol = IPPROTO_TCP,
343			.iph_inner.tot_len =
344				__bpf_constant_htons(MAGIC_BYTES) -
345				sizeof(struct iphdr),
346			.tcp.doff = 5,
347			.tcp.source = 80,
348			.tcp.dest = 8080,
349		},
350		.keys = {
351			.nhoff = ETH_HLEN,
352			.thoff = ETH_HLEN + sizeof(struct iphdr) +
353				sizeof(struct iphdr),
354			.addr_proto = ETH_P_IP,
355			.ip_proto = IPPROTO_TCP,
356			.n_proto = __bpf_constant_htons(ETH_P_IP),
357			.is_encap = true,
358			.sport = 80,
359			.dport = 8080,
360		},
 
361	},
362	{
363		.name = "ipip-no-encap",
364		.pkt.ipip = {
365			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
366			.iph.ihl = 5,
367			.iph.protocol = IPPROTO_IPIP,
368			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
369			.iph_inner.ihl = 5,
370			.iph_inner.protocol = IPPROTO_TCP,
371			.iph_inner.tot_len =
372				__bpf_constant_htons(MAGIC_BYTES) -
373				sizeof(struct iphdr),
374			.tcp.doff = 5,
375			.tcp.source = 80,
376			.tcp.dest = 8080,
377		},
378		.keys = {
379			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
380			.nhoff = ETH_HLEN,
381			.thoff = ETH_HLEN + sizeof(struct iphdr),
382			.addr_proto = ETH_P_IP,
383			.ip_proto = IPPROTO_IPIP,
384			.n_proto = __bpf_constant_htons(ETH_P_IP),
385			.is_encap = true,
386		},
387		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388	},
389};
390
391static int create_tap(const char *ifname)
392{
393	struct ifreq ifr = {
394		.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
395	};
396	int fd, ret;
397
398	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
399
400	fd = open("/dev/net/tun", O_RDWR);
401	if (fd < 0)
402		return -1;
403
404	ret = ioctl(fd, TUNSETIFF, &ifr);
405	if (ret)
406		return -1;
407
408	return fd;
409}
410
411static int tx_tap(int fd, void *pkt, size_t len)
412{
413	struct iovec iov[] = {
414		{
415			.iov_len = len,
416			.iov_base = pkt,
417		},
418	};
419	return writev(fd, iov, ARRAY_SIZE(iov));
420}
421
422static int ifup(const char *ifname)
423{
424	struct ifreq ifr = {};
425	int sk, ret;
426
427	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
428
429	sk = socket(PF_INET, SOCK_DGRAM, 0);
430	if (sk < 0)
431		return -1;
432
433	ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
434	if (ret) {
435		close(sk);
436		return -1;
437	}
438
439	ifr.ifr_flags |= IFF_UP;
440	ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
441	if (ret) {
442		close(sk);
443		return -1;
444	}
445
446	close(sk);
447	return 0;
448}
449
450static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
451{
452	int i, err, map_fd, prog_fd;
453	struct bpf_program *prog;
454	char prog_name[32];
455
456	map_fd = bpf_map__fd(prog_array);
457	if (map_fd < 0)
458		return -1;
459
460	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
461		snprintf(prog_name, sizeof(prog_name), "flow_dissector/%i", i);
462
463		prog = bpf_object__find_program_by_title(obj, prog_name);
464		if (!prog)
465			return -1;
466
467		prog_fd = bpf_program__fd(prog);
468		if (prog_fd < 0)
469			return -1;
470
471		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
472		if (err)
473			return -1;
474	}
475	return 0;
476}
477
478static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
479{
480	int i, err, keys_fd;
481
482	keys_fd = bpf_map__fd(keys);
483	if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
484		return;
485
486	for (i = 0; i < ARRAY_SIZE(tests); i++) {
487		/* Keep in sync with 'flags' from eth_get_headlen. */
488		__u32 eth_get_headlen_flags =
489			BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
490		struct bpf_prog_test_run_attr tattr = {};
491		struct bpf_flow_keys flow_keys = {};
492		__u32 key = (__u32)(tests[i].keys.sport) << 16 |
493			    tests[i].keys.dport;
494
495		/* For skb-less case we can't pass input flags; run
496		 * only the tests that have a matching set of flags.
497		 */
498
499		if (tests[i].flags != eth_get_headlen_flags)
500			continue;
501
502		err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
503		CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
504
 
 
 
 
505		err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
506		CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
507
508		CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
509		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
510
511		err = bpf_map_delete_elem(keys_fd, &key);
512		CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err);
513	}
514}
515
516static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
517{
518	int err, prog_fd;
519
520	prog_fd = bpf_program__fd(skel->progs._dissect);
521	if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
522		return;
523
524	err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
525	if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
526		return;
527
528	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
529
530	err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
531	CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
532}
533
534static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
535{
536	struct bpf_link *link;
537	int err, net_fd;
538
539	net_fd = open("/proc/self/ns/net", O_RDONLY);
540	if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
541		return;
542
543	link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
544	if (CHECK(IS_ERR(link), "attach_netns", "err %ld\n", PTR_ERR(link)))
545		goto out_close;
546
547	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
548
549	err = bpf_link__destroy(link);
550	CHECK(err, "bpf_link__destroy", "err %d\n", err);
551out_close:
552	close(net_fd);
553}
554
555void test_flow_dissector(void)
556{
557	int i, err, prog_fd, keys_fd = -1, tap_fd;
558	struct bpf_flow *skel;
559
560	skel = bpf_flow__open_and_load();
561	if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
562		return;
563
564	prog_fd = bpf_program__fd(skel->progs._dissect);
565	if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
566		goto out_destroy_skel;
567	keys_fd = bpf_map__fd(skel->maps.last_dissection);
568	if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
569		goto out_destroy_skel;
570	err = init_prog_array(skel->obj, skel->maps.jmp_table);
571	if (CHECK(err, "init_prog_array", "err %d\n", err))
572		goto out_destroy_skel;
573
574	for (i = 0; i < ARRAY_SIZE(tests); i++) {
575		struct bpf_flow_keys flow_keys;
576		struct bpf_prog_test_run_attr tattr = {
577			.prog_fd = prog_fd,
578			.data_in = &tests[i].pkt,
579			.data_size_in = sizeof(tests[i].pkt),
580			.data_out = &flow_keys,
581		};
582		static struct bpf_flow_keys ctx = {};
583
584		if (tests[i].flags) {
585			tattr.ctx_in = &ctx;
586			tattr.ctx_size_in = sizeof(ctx);
587			ctx.flags = tests[i].flags;
588		}
589
590		err = bpf_prog_test_run_xattr(&tattr);
591		CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
592			   err || tattr.retval != 1,
593			   tests[i].name,
594			   "err %d errno %d retval %d duration %d size %u/%zu\n",
595			   err, errno, tattr.retval, tattr.duration,
596			   tattr.data_size_out, sizeof(flow_keys));
 
 
597		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
598	}
599
600	/* Do the same tests but for skb-less flow dissector.
601	 * We use a known path in the net/tun driver that calls
602	 * eth_get_headlen and we manually export bpf_flow_keys
603	 * via BPF map in this case.
604	 */
605
606	tap_fd = create_tap("tap0");
607	CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
608	err = ifup("tap0");
609	CHECK(err, "ifup", "err %d errno %d\n", err, errno);
610
611	/* Test direct prog attachment */
612	test_skb_less_prog_attach(skel, tap_fd);
613	/* Test indirect prog attachment via link */
614	test_skb_less_link_create(skel, tap_fd);
615
616	close(tap_fd);
617out_destroy_skel:
618	bpf_flow__destroy(skel);
619}