Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (c) 2020 Cloudflare
  3#include <error.h>
  4#include <netinet/tcp.h>
  5#include <sys/epoll.h>
  6
  7#include "test_progs.h"
  8#include "test_skmsg_load_helpers.skel.h"
  9#include "test_sockmap_update.skel.h"
 10#include "test_sockmap_invalid_update.skel.h"
 11#include "test_sockmap_skb_verdict_attach.skel.h"
 12#include "test_sockmap_progs_query.skel.h"
 13#include "test_sockmap_pass_prog.skel.h"
 14#include "test_sockmap_drop_prog.skel.h"
 15#include "bpf_iter_sockmap.skel.h"
 16
 17#include "sockmap_helpers.h"
 18
 19#define TCP_REPAIR		19	/* TCP sock is under repair right now */
 20
 21#define TCP_REPAIR_ON		1
 22#define TCP_REPAIR_OFF_NO_WP	-1	/* Turn off without window probes */
 23
 24static int connected_socket_v4(void)
 25{
 26	struct sockaddr_in addr = {
 27		.sin_family = AF_INET,
 28		.sin_port = htons(80),
 29		.sin_addr = { inet_addr("127.0.0.1") },
 30	};
 31	socklen_t len = sizeof(addr);
 32	int s, repair, err;
 33
 34	s = socket(AF_INET, SOCK_STREAM, 0);
 35	if (!ASSERT_GE(s, 0, "socket"))
 36		goto error;
 37
 38	repair = TCP_REPAIR_ON;
 39	err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
 40	if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
 41		goto error;
 42
 43	err = connect(s, (struct sockaddr *)&addr, len);
 44	if (!ASSERT_OK(err, "connect"))
 45		goto error;
 46
 47	repair = TCP_REPAIR_OFF_NO_WP;
 48	err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
 49	if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
 50		goto error;
 51
 52	return s;
 53error:
 54	perror(__func__);
 55	close(s);
 56	return -1;
 57}
 58
 59static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
 60{
 61	__u32 i, max_entries = bpf_map__max_entries(src);
 62	int err, src_fd, dst_fd;
 63
 64	src_fd = bpf_map__fd(src);
 65	dst_fd = bpf_map__fd(dst);
 66
 67	for (i = 0; i < max_entries; i++) {
 68		__u64 src_cookie, dst_cookie;
 69
 70		err = bpf_map_lookup_elem(src_fd, &i, &src_cookie);
 71		if (err && errno == ENOENT) {
 72			err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
 73			ASSERT_ERR(err, "map_lookup_elem(dst)");
 74			ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)");
 
 75			continue;
 76		}
 77		if (!ASSERT_OK(err, "lookup_elem(src)"))
 78			continue;
 79
 80		err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
 81		if (!ASSERT_OK(err, "lookup_elem(dst)"))
 82			continue;
 83
 84		ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch");
 
 85	}
 86}
 87
 88/* Create a map, populate it with one socket, and free the map. */
 89static void test_sockmap_create_update_free(enum bpf_map_type map_type)
 90{
 91	const int zero = 0;
 92	int s, map, err;
 93
 94	s = connected_socket_v4();
 95	if (!ASSERT_GE(s, 0, "connected_socket_v4"))
 96		return;
 97
 98	map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
 99	if (!ASSERT_GE(map, 0, "bpf_map_create"))
 
100		goto out;
 
101
102	err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
103	if (!ASSERT_OK(err, "bpf_map_update"))
 
104		goto out;
 
105
106out:
107	close(map);
108	close(s);
109}
110
111static void test_skmsg_helpers(enum bpf_map_type map_type)
112{
113	struct test_skmsg_load_helpers *skel;
114	int err, map, verdict;
115
116	skel = test_skmsg_load_helpers__open_and_load();
117	if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
 
118		return;
 
119
120	verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
121	map = bpf_map__fd(skel->maps.sock_map);
122
123	err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
124	if (!ASSERT_OK(err, "bpf_prog_attach"))
 
125		goto out;
 
126
127	err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT);
128	if (!ASSERT_OK(err, "bpf_prog_detach2"))
 
129		goto out;
 
130out:
131	test_skmsg_load_helpers__destroy(skel);
132}
133
134static void test_sockmap_update(enum bpf_map_type map_type)
135{
136	int err, prog, src;
 
137	struct test_sockmap_update *skel;
138	struct bpf_map *dst_map;
139	const __u32 zero = 0;
140	char dummy[14] = {0};
141	LIBBPF_OPTS(bpf_test_run_opts, topts,
142		.data_in = dummy,
143		.data_size_in = sizeof(dummy),
144		.repeat = 1,
145	);
146	__s64 sk;
147
148	sk = connected_socket_v4();
149	if (!ASSERT_NEQ(sk, -1, "connected_socket_v4"))
150		return;
151
152	skel = test_sockmap_update__open_and_load();
153	if (!ASSERT_OK_PTR(skel, "open_and_load"))
154		goto close_sk;
155
156	prog = bpf_program__fd(skel->progs.copy_sock_map);
157	src = bpf_map__fd(skel->maps.src);
158	if (map_type == BPF_MAP_TYPE_SOCKMAP)
159		dst_map = skel->maps.dst_sock_map;
160	else
161		dst_map = skel->maps.dst_sock_hash;
162
163	err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
164	if (!ASSERT_OK(err, "update_elem(src)"))
165		goto out;
166
167	err = bpf_prog_test_run_opts(prog, &topts);
168	if (!ASSERT_OK(err, "test_run"))
169		goto out;
170	if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
 
 
 
 
 
 
171		goto out;
172
173	compare_cookies(skel->maps.src, dst_map);
174
175out:
176	test_sockmap_update__destroy(skel);
177close_sk:
178	close(sk);
179}
180
181static void test_sockmap_invalid_update(void)
182{
183	struct test_sockmap_invalid_update *skel;
 
184
185	skel = test_sockmap_invalid_update__open_and_load();
186	if (!ASSERT_NULL(skel, "open_and_load"))
187		test_sockmap_invalid_update__destroy(skel);
188}
189
190static void test_sockmap_copy(enum bpf_map_type map_type)
191{
192	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
193	int err, len, src_fd, iter_fd;
194	union bpf_iter_link_info linfo = {};
195	__u32 i, num_sockets, num_elems;
196	struct bpf_iter_sockmap *skel;
197	__s64 *sock_fd = NULL;
198	struct bpf_link *link;
199	struct bpf_map *src;
200	char buf[64];
201
202	skel = bpf_iter_sockmap__open_and_load();
203	if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
204		return;
205
206	if (map_type == BPF_MAP_TYPE_SOCKMAP) {
207		src = skel->maps.sockmap;
208		num_elems = bpf_map__max_entries(src);
209		num_sockets = num_elems - 1;
210	} else {
211		src = skel->maps.sockhash;
212		num_elems = bpf_map__max_entries(src) - 1;
213		num_sockets = num_elems;
214	}
215
216	sock_fd = calloc(num_sockets, sizeof(*sock_fd));
217	if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)"))
218		goto out;
219
220	for (i = 0; i < num_sockets; i++)
221		sock_fd[i] = -1;
222
223	src_fd = bpf_map__fd(src);
224
225	for (i = 0; i < num_sockets; i++) {
226		sock_fd[i] = connected_socket_v4();
227		if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4"))
228			goto out;
229
230		err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
231		if (!ASSERT_OK(err, "map_update"))
232			goto out;
233	}
234
235	linfo.map.map_fd = src_fd;
236	opts.link_info = &linfo;
237	opts.link_info_len = sizeof(linfo);
238	link = bpf_program__attach_iter(skel->progs.copy, &opts);
239	if (!ASSERT_OK_PTR(link, "attach_iter"))
240		goto out;
241
242	iter_fd = bpf_iter_create(bpf_link__fd(link));
243	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
244		goto free_link;
245
246	/* do some tests */
247	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
248		;
249	if (!ASSERT_GE(len, 0, "read"))
250		goto close_iter;
251
252	/* test results */
253	if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems"))
 
254		goto close_iter;
255
256	if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks"))
 
257		goto close_iter;
258
259	compare_cookies(src, skel->maps.dst);
260
261close_iter:
262	close(iter_fd);
263free_link:
264	bpf_link__destroy(link);
265out:
266	for (i = 0; sock_fd && i < num_sockets; i++)
267		if (sock_fd[i] >= 0)
268			close(sock_fd[i]);
269	if (sock_fd)
270		free(sock_fd);
271	bpf_iter_sockmap__destroy(skel);
272}
273
274static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
275					    enum bpf_attach_type second)
276{
277	struct test_sockmap_skb_verdict_attach *skel;
278	int err, map, verdict;
279
280	skel = test_sockmap_skb_verdict_attach__open_and_load();
281	if (!ASSERT_OK_PTR(skel, "open_and_load"))
 
282		return;
 
283
284	verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
285	map = bpf_map__fd(skel->maps.sock_map);
286
287	err = bpf_prog_attach(verdict, map, first, 0);
288	if (!ASSERT_OK(err, "bpf_prog_attach"))
 
289		goto out;
 
290
291	err = bpf_prog_attach(verdict, map, second, 0);
292	ASSERT_EQ(err, -EBUSY, "prog_attach_fail");
293
294	err = bpf_prog_detach2(verdict, map, first);
295	if (!ASSERT_OK(err, "bpf_prog_detach2"))
296		goto out;
297out:
298	test_sockmap_skb_verdict_attach__destroy(skel);
299}
300
301static __u32 query_prog_id(int prog_fd)
302{
303	struct bpf_prog_info info = {};
304	__u32 info_len = sizeof(info);
305	int err;
306
307	err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
308	if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
309	    !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd"))
310		return 0;
311
312	return info.id;
313}
314
315static void test_sockmap_progs_query(enum bpf_attach_type attach_type)
316{
317	struct test_sockmap_progs_query *skel;
318	int err, map_fd, verdict_fd;
319	__u32 attach_flags = 0;
320	__u32 prog_ids[3] = {};
321	__u32 prog_cnt = 3;
322
323	skel = test_sockmap_progs_query__open_and_load();
324	if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load"))
325		return;
326
327	map_fd = bpf_map__fd(skel->maps.sock_map);
328
329	if (attach_type == BPF_SK_MSG_VERDICT)
330		verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict);
331	else
332		verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict);
333
334	err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
335			     &attach_flags, prog_ids, &prog_cnt);
336	ASSERT_OK(err, "bpf_prog_query failed");
337	ASSERT_EQ(attach_flags,  0, "wrong attach_flags on query");
338	ASSERT_EQ(prog_cnt, 0, "wrong program count on query");
339
340	err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0);
341	if (!ASSERT_OK(err, "bpf_prog_attach failed"))
342		goto out;
343
344	prog_cnt = 1;
345	err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
346			     &attach_flags, prog_ids, &prog_cnt);
347	ASSERT_OK(err, "bpf_prog_query failed");
348	ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
349	ASSERT_EQ(prog_cnt, 1, "wrong program count on query");
350	ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd),
351		  "wrong prog_ids on query");
352
353	bpf_prog_detach2(verdict_fd, map_fd, attach_type);
354out:
355	test_sockmap_progs_query__destroy(skel);
356}
357
358#define MAX_EVENTS 10
359static void test_sockmap_skb_verdict_shutdown(void)
360{
361	struct epoll_event ev, events[MAX_EVENTS];
362	int n, err, map, verdict, s, c1 = -1, p1 = -1;
363	struct test_sockmap_pass_prog *skel;
364	int epollfd;
365	int zero = 0;
366	char b;
367
368	skel = test_sockmap_pass_prog__open_and_load();
369	if (!ASSERT_OK_PTR(skel, "open_and_load"))
370		return;
371
372	verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
373	map = bpf_map__fd(skel->maps.sock_map_rx);
374
375	err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
376	if (!ASSERT_OK(err, "bpf_prog_attach"))
377		goto out;
378
379	s = socket_loopback(AF_INET, SOCK_STREAM);
380	if (s < 0)
381		goto out;
382	err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
383	if (err < 0)
384		goto out;
385
386	err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
387	if (err < 0)
388		goto out_close;
389
390	shutdown(p1, SHUT_WR);
391
392	ev.events = EPOLLIN;
393	ev.data.fd = c1;
394
395	epollfd = epoll_create1(0);
396	if (!ASSERT_GT(epollfd, -1, "epoll_create(0)"))
397		goto out_close;
398	err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev);
399	if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)"))
400		goto out_close;
401	err = epoll_wait(epollfd, events, MAX_EVENTS, -1);
402	if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
403		goto out_close;
404
405	n = recv(c1, &b, 1, SOCK_NONBLOCK);
406	ASSERT_EQ(n, 0, "recv_timeout(fin)");
407out_close:
408	close(c1);
409	close(p1);
410out:
411	test_sockmap_pass_prog__destroy(skel);
412}
413
414static void test_sockmap_skb_verdict_fionread(bool pass_prog)
415{
416	int expected, zero = 0, sent, recvd, avail;
417	int err, map, verdict, s, c0 = -1, c1 = -1, p0 = -1, p1 = -1;
418	struct test_sockmap_pass_prog *pass = NULL;
419	struct test_sockmap_drop_prog *drop = NULL;
420	char buf[256] = "0123456789";
421
422	if (pass_prog) {
423		pass = test_sockmap_pass_prog__open_and_load();
424		if (!ASSERT_OK_PTR(pass, "open_and_load"))
425			return;
426		verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
427		map = bpf_map__fd(pass->maps.sock_map_rx);
428		expected = sizeof(buf);
429	} else {
430		drop = test_sockmap_drop_prog__open_and_load();
431		if (!ASSERT_OK_PTR(drop, "open_and_load"))
432			return;
433		verdict = bpf_program__fd(drop->progs.prog_skb_verdict);
434		map = bpf_map__fd(drop->maps.sock_map_rx);
435		/* On drop data is consumed immediately and copied_seq inc'd */
436		expected = 0;
437	}
438
439
440	err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
441	if (!ASSERT_OK(err, "bpf_prog_attach"))
442		goto out;
443
444	s = socket_loopback(AF_INET, SOCK_STREAM);
445	if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
446		goto out;
447	err = create_socket_pairs(s, AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1);
448	if (!ASSERT_OK(err, "create_socket_pairs(s)"))
449		goto out;
450
451	err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
452	if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
453		goto out_close;
454
455	sent = xsend(p1, &buf, sizeof(buf), 0);
456	ASSERT_EQ(sent, sizeof(buf), "xsend(p0)");
457	err = ioctl(c1, FIONREAD, &avail);
458	ASSERT_OK(err, "ioctl(FIONREAD) error");
459	ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
460	/* On DROP test there will be no data to read */
461	if (pass_prog) {
462		recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC);
463		ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
464	}
465
466out_close:
467	close(c0);
468	close(p0);
469	close(c1);
470	close(p1);
471out:
472	if (pass_prog)
473		test_sockmap_pass_prog__destroy(pass);
474	else
475		test_sockmap_drop_prog__destroy(drop);
476}
477
478static void test_sockmap_skb_verdict_peek(void)
479{
480	int err, map, verdict, s, c1, p1, zero = 0, sent, recvd, avail;
481	struct test_sockmap_pass_prog *pass;
482	char snd[256] = "0123456789";
483	char rcv[256] = "0";
484
485	pass = test_sockmap_pass_prog__open_and_load();
486	if (!ASSERT_OK_PTR(pass, "open_and_load"))
487		return;
488	verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
489	map = bpf_map__fd(pass->maps.sock_map_rx);
490
491	err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
492	if (!ASSERT_OK(err, "bpf_prog_attach"))
493		goto out;
494
495	s = socket_loopback(AF_INET, SOCK_STREAM);
496	if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
497		goto out;
498
499	err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
500	if (!ASSERT_OK(err, "create_pairs(s)"))
501		goto out;
502
503	err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
504	if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
505		goto out_close;
506
507	sent = xsend(p1, snd, sizeof(snd), 0);
508	ASSERT_EQ(sent, sizeof(snd), "xsend(p1)");
509	recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK);
510	ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)");
511	err = ioctl(c1, FIONREAD, &avail);
512	ASSERT_OK(err, "ioctl(FIONREAD) error");
513	ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)");
514	recvd = recv(c1, rcv, sizeof(rcv), 0);
515	ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)");
516	err = ioctl(c1, FIONREAD, &avail);
517	ASSERT_OK(err, "ioctl(FIONREAD) error");
518	ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)");
519
520out_close:
521	close(c1);
522	close(p1);
523out:
524	test_sockmap_pass_prog__destroy(pass);
525}
526
527static void test_sockmap_unconnected_unix(void)
528{
529	int err, map, stream = 0, dgram = 0, zero = 0;
530	struct test_sockmap_pass_prog *skel;
531
532	skel = test_sockmap_pass_prog__open_and_load();
533	if (!ASSERT_OK_PTR(skel, "open_and_load"))
534		return;
535
536	map = bpf_map__fd(skel->maps.sock_map_rx);
537
538	stream = xsocket(AF_UNIX, SOCK_STREAM, 0);
539	if (stream < 0)
540		return;
541
542	dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
543	if (dgram < 0) {
544		close(stream);
545		return;
546	}
547
548	err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY);
549	ASSERT_ERR(err, "bpf_map_update_elem(stream)");
550
551	err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
552	ASSERT_OK(err, "bpf_map_update_elem(dgram)");
553
554	close(stream);
555	close(dgram);
556}
557
558static void test_sockmap_many_socket(void)
559{
560	struct test_sockmap_pass_prog *skel;
561	int stream[2], dgram, udp, tcp;
562	int i, err, map, entry = 0;
563
564	skel = test_sockmap_pass_prog__open_and_load();
565	if (!ASSERT_OK_PTR(skel, "open_and_load"))
566		return;
567
568	map = bpf_map__fd(skel->maps.sock_map_rx);
569
570	dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
571	if (dgram < 0) {
572		test_sockmap_pass_prog__destroy(skel);
573		return;
574	}
575
576	tcp = connected_socket_v4();
577	if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
578		close(dgram);
579		test_sockmap_pass_prog__destroy(skel);
580		return;
581	}
582
583	udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
584	if (udp < 0) {
585		close(dgram);
586		close(tcp);
587		test_sockmap_pass_prog__destroy(skel);
588		return;
589	}
590
591	err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
592	ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
593	if (err)
594		goto out;
595
596	for (i = 0; i < 2; i++, entry++) {
597		err = bpf_map_update_elem(map, &entry, &stream[0], BPF_ANY);
598		ASSERT_OK(err, "bpf_map_update_elem(stream)");
599	}
600	for (i = 0; i < 2; i++, entry++) {
601		err = bpf_map_update_elem(map, &entry, &dgram, BPF_ANY);
602		ASSERT_OK(err, "bpf_map_update_elem(dgram)");
603	}
604	for (i = 0; i < 2; i++, entry++) {
605		err = bpf_map_update_elem(map, &entry, &udp, BPF_ANY);
606		ASSERT_OK(err, "bpf_map_update_elem(udp)");
607	}
608	for (i = 0; i < 2; i++, entry++) {
609		err = bpf_map_update_elem(map, &entry, &tcp, BPF_ANY);
610		ASSERT_OK(err, "bpf_map_update_elem(tcp)");
611	}
612	for (entry--; entry >= 0; entry--) {
613		err = bpf_map_delete_elem(map, &entry);
614		ASSERT_OK(err, "bpf_map_delete_elem(entry)");
615	}
616
617	close(stream[0]);
618	close(stream[1]);
619out:
620	close(dgram);
621	close(tcp);
622	close(udp);
623	test_sockmap_pass_prog__destroy(skel);
624}
625
626static void test_sockmap_many_maps(void)
627{
628	struct test_sockmap_pass_prog *skel;
629	int stream[2], dgram, udp, tcp;
630	int i, err, map[2], entry = 0;
631
632	skel = test_sockmap_pass_prog__open_and_load();
633	if (!ASSERT_OK_PTR(skel, "open_and_load"))
634		return;
635
636	map[0] = bpf_map__fd(skel->maps.sock_map_rx);
637	map[1] = bpf_map__fd(skel->maps.sock_map_tx);
638
639	dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
640	if (dgram < 0) {
641		test_sockmap_pass_prog__destroy(skel);
642		return;
643	}
644
645	tcp = connected_socket_v4();
646	if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
647		close(dgram);
648		test_sockmap_pass_prog__destroy(skel);
649		return;
650	}
651
652	udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
653	if (udp < 0) {
654		close(dgram);
655		close(tcp);
656		test_sockmap_pass_prog__destroy(skel);
657		return;
658	}
659
660	err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
661	ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
662	if (err)
663		goto out;
664
665	for (i = 0; i < 2; i++, entry++) {
666		err = bpf_map_update_elem(map[i], &entry, &stream[0], BPF_ANY);
667		ASSERT_OK(err, "bpf_map_update_elem(stream)");
668	}
669	for (i = 0; i < 2; i++, entry++) {
670		err = bpf_map_update_elem(map[i], &entry, &dgram, BPF_ANY);
671		ASSERT_OK(err, "bpf_map_update_elem(dgram)");
672	}
673	for (i = 0; i < 2; i++, entry++) {
674		err = bpf_map_update_elem(map[i], &entry, &udp, BPF_ANY);
675		ASSERT_OK(err, "bpf_map_update_elem(udp)");
676	}
677	for (i = 0; i < 2; i++, entry++) {
678		err = bpf_map_update_elem(map[i], &entry, &tcp, BPF_ANY);
679		ASSERT_OK(err, "bpf_map_update_elem(tcp)");
680	}
681	for (entry--; entry >= 0; entry--) {
682		err = bpf_map_delete_elem(map[1], &entry);
683		entry--;
684		ASSERT_OK(err, "bpf_map_delete_elem(entry)");
685		err = bpf_map_delete_elem(map[0], &entry);
686		ASSERT_OK(err, "bpf_map_delete_elem(entry)");
687	}
688
689	close(stream[0]);
690	close(stream[1]);
691out:
692	close(dgram);
693	close(tcp);
694	close(udp);
695	test_sockmap_pass_prog__destroy(skel);
696}
697
698static void test_sockmap_same_sock(void)
699{
700	struct test_sockmap_pass_prog *skel;
701	int stream[2], dgram, udp, tcp;
702	int i, err, map, zero = 0;
703
704	skel = test_sockmap_pass_prog__open_and_load();
705	if (!ASSERT_OK_PTR(skel, "open_and_load"))
706		return;
707
708	map = bpf_map__fd(skel->maps.sock_map_rx);
709
710	dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
711	if (dgram < 0) {
712		test_sockmap_pass_prog__destroy(skel);
713		return;
714	}
715
716	tcp = connected_socket_v4();
717	if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
718		close(dgram);
719		test_sockmap_pass_prog__destroy(skel);
720		return;
721	}
722
723	udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
724	if (udp < 0) {
725		close(dgram);
726		close(tcp);
727		test_sockmap_pass_prog__destroy(skel);
728		return;
729	}
730
731	err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
732	ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
733	if (err)
734		goto out;
735
736	for (i = 0; i < 2; i++) {
737		err = bpf_map_update_elem(map, &zero, &stream[0], BPF_ANY);
738		ASSERT_OK(err, "bpf_map_update_elem(stream)");
739	}
740	for (i = 0; i < 2; i++) {
741		err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
742		ASSERT_OK(err, "bpf_map_update_elem(dgram)");
743	}
744	for (i = 0; i < 2; i++) {
745		err = bpf_map_update_elem(map, &zero, &udp, BPF_ANY);
746		ASSERT_OK(err, "bpf_map_update_elem(udp)");
747	}
748	for (i = 0; i < 2; i++) {
749		err = bpf_map_update_elem(map, &zero, &tcp, BPF_ANY);
750		ASSERT_OK(err, "bpf_map_update_elem(tcp)");
751	}
752
753	err = bpf_map_delete_elem(map, &zero);
754	ASSERT_OK(err, "bpf_map_delete_elem(entry)");
755
756	close(stream[0]);
757	close(stream[1]);
758out:
759	close(dgram);
760	close(tcp);
761	close(udp);
762	test_sockmap_pass_prog__destroy(skel);
763}
764
765void test_sockmap_basic(void)
766{
767	if (test__start_subtest("sockmap create_update_free"))
768		test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
769	if (test__start_subtest("sockhash create_update_free"))
770		test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
771	if (test__start_subtest("sockmap sk_msg load helpers"))
772		test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
773	if (test__start_subtest("sockhash sk_msg load helpers"))
774		test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
775	if (test__start_subtest("sockmap update"))
776		test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
777	if (test__start_subtest("sockhash update"))
778		test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
779	if (test__start_subtest("sockmap update in unsafe context"))
780		test_sockmap_invalid_update();
781	if (test__start_subtest("sockmap copy"))
782		test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
783	if (test__start_subtest("sockhash copy"))
784		test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
785	if (test__start_subtest("sockmap skb_verdict attach")) {
786		test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT,
787						BPF_SK_SKB_STREAM_VERDICT);
788		test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,
789						BPF_SK_SKB_VERDICT);
790	}
791	if (test__start_subtest("sockmap msg_verdict progs query"))
792		test_sockmap_progs_query(BPF_SK_MSG_VERDICT);
793	if (test__start_subtest("sockmap stream_parser progs query"))
794		test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER);
795	if (test__start_subtest("sockmap stream_verdict progs query"))
796		test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT);
797	if (test__start_subtest("sockmap skb_verdict progs query"))
798		test_sockmap_progs_query(BPF_SK_SKB_VERDICT);
799	if (test__start_subtest("sockmap skb_verdict shutdown"))
800		test_sockmap_skb_verdict_shutdown();
801	if (test__start_subtest("sockmap skb_verdict fionread"))
802		test_sockmap_skb_verdict_fionread(true);
803	if (test__start_subtest("sockmap skb_verdict fionread on drop"))
804		test_sockmap_skb_verdict_fionread(false);
805	if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
806		test_sockmap_skb_verdict_peek();
807	if (test__start_subtest("sockmap unconnected af_unix"))
808		test_sockmap_unconnected_unix();
809	if (test__start_subtest("sockmap one socket to many map entries"))
810		test_sockmap_many_socket();
811	if (test__start_subtest("sockmap one socket to many maps"))
812		test_sockmap_many_maps();
813	if (test__start_subtest("sockmap same socket replace"))
814		test_sockmap_same_sock();
815}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (c) 2020 Cloudflare
  3#include <error.h>
  4#include <netinet/tcp.h>
 
  5
  6#include "test_progs.h"
  7#include "test_skmsg_load_helpers.skel.h"
  8#include "test_sockmap_update.skel.h"
  9#include "test_sockmap_invalid_update.skel.h"
 10#include "test_sockmap_skb_verdict_attach.skel.h"
 
 
 
 11#include "bpf_iter_sockmap.skel.h"
 12
 
 
 13#define TCP_REPAIR		19	/* TCP sock is under repair right now */
 14
 15#define TCP_REPAIR_ON		1
 16#define TCP_REPAIR_OFF_NO_WP	-1	/* Turn off without window probes */
 17
 18static int connected_socket_v4(void)
 19{
 20	struct sockaddr_in addr = {
 21		.sin_family = AF_INET,
 22		.sin_port = htons(80),
 23		.sin_addr = { inet_addr("127.0.0.1") },
 24	};
 25	socklen_t len = sizeof(addr);
 26	int s, repair, err;
 27
 28	s = socket(AF_INET, SOCK_STREAM, 0);
 29	if (CHECK_FAIL(s == -1))
 30		goto error;
 31
 32	repair = TCP_REPAIR_ON;
 33	err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
 34	if (CHECK_FAIL(err))
 35		goto error;
 36
 37	err = connect(s, (struct sockaddr *)&addr, len);
 38	if (CHECK_FAIL(err))
 39		goto error;
 40
 41	repair = TCP_REPAIR_OFF_NO_WP;
 42	err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
 43	if (CHECK_FAIL(err))
 44		goto error;
 45
 46	return s;
 47error:
 48	perror(__func__);
 49	close(s);
 50	return -1;
 51}
 52
 53static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
 54{
 55	__u32 i, max_entries = bpf_map__max_entries(src);
 56	int err, duration = 0, src_fd, dst_fd;
 57
 58	src_fd = bpf_map__fd(src);
 59	dst_fd = bpf_map__fd(dst);
 60
 61	for (i = 0; i < max_entries; i++) {
 62		__u64 src_cookie, dst_cookie;
 63
 64		err = bpf_map_lookup_elem(src_fd, &i, &src_cookie);
 65		if (err && errno == ENOENT) {
 66			err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
 67			CHECK(!err, "map_lookup_elem(dst)", "element %u not deleted\n", i);
 68			CHECK(err && errno != ENOENT, "map_lookup_elem(dst)", "%s\n",
 69			      strerror(errno));
 70			continue;
 71		}
 72		if (CHECK(err, "lookup_elem(src)", "%s\n", strerror(errno)))
 73			continue;
 74
 75		err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
 76		if (CHECK(err, "lookup_elem(dst)", "%s\n", strerror(errno)))
 77			continue;
 78
 79		CHECK(dst_cookie != src_cookie, "cookie mismatch",
 80		      "%llu != %llu (pos %u)\n", dst_cookie, src_cookie, i);
 81	}
 82}
 83
 84/* Create a map, populate it with one socket, and free the map. */
 85static void test_sockmap_create_update_free(enum bpf_map_type map_type)
 86{
 87	const int zero = 0;
 88	int s, map, err;
 89
 90	s = connected_socket_v4();
 91	if (CHECK_FAIL(s < 0))
 92		return;
 93
 94	map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
 95	if (CHECK_FAIL(map < 0)) {
 96		perror("bpf_create_map");
 97		goto out;
 98	}
 99
100	err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
101	if (CHECK_FAIL(err)) {
102		perror("bpf_map_update");
103		goto out;
104	}
105
106out:
107	close(map);
108	close(s);
109}
110
111static void test_skmsg_helpers(enum bpf_map_type map_type)
112{
113	struct test_skmsg_load_helpers *skel;
114	int err, map, verdict;
115
116	skel = test_skmsg_load_helpers__open_and_load();
117	if (CHECK_FAIL(!skel)) {
118		perror("test_skmsg_load_helpers__open_and_load");
119		return;
120	}
121
122	verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
123	map = bpf_map__fd(skel->maps.sock_map);
124
125	err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
126	if (CHECK_FAIL(err)) {
127		perror("bpf_prog_attach");
128		goto out;
129	}
130
131	err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT);
132	if (CHECK_FAIL(err)) {
133		perror("bpf_prog_detach2");
134		goto out;
135	}
136out:
137	test_skmsg_load_helpers__destroy(skel);
138}
139
140static void test_sockmap_update(enum bpf_map_type map_type)
141{
142	struct bpf_prog_test_run_attr tattr;
143	int err, prog, src, duration = 0;
144	struct test_sockmap_update *skel;
145	struct bpf_map *dst_map;
146	const __u32 zero = 0;
147	char dummy[14] = {0};
 
 
 
 
 
148	__s64 sk;
149
150	sk = connected_socket_v4();
151	if (CHECK(sk == -1, "connected_socket_v4", "cannot connect\n"))
152		return;
153
154	skel = test_sockmap_update__open_and_load();
155	if (CHECK(!skel, "open_and_load", "cannot load skeleton\n"))
156		goto close_sk;
157
158	prog = bpf_program__fd(skel->progs.copy_sock_map);
159	src = bpf_map__fd(skel->maps.src);
160	if (map_type == BPF_MAP_TYPE_SOCKMAP)
161		dst_map = skel->maps.dst_sock_map;
162	else
163		dst_map = skel->maps.dst_sock_hash;
164
165	err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
166	if (CHECK(err, "update_elem(src)", "errno=%u\n", errno))
167		goto out;
168
169	tattr = (struct bpf_prog_test_run_attr){
170		.prog_fd = prog,
171		.repeat = 1,
172		.data_in = dummy,
173		.data_size_in = sizeof(dummy),
174	};
175
176	err = bpf_prog_test_run_xattr(&tattr);
177	if (CHECK_ATTR(err || !tattr.retval, "bpf_prog_test_run",
178		       "errno=%u retval=%u\n", errno, tattr.retval))
179		goto out;
180
181	compare_cookies(skel->maps.src, dst_map);
182
183out:
184	test_sockmap_update__destroy(skel);
185close_sk:
186	close(sk);
187}
188
189static void test_sockmap_invalid_update(void)
190{
191	struct test_sockmap_invalid_update *skel;
192	int duration = 0;
193
194	skel = test_sockmap_invalid_update__open_and_load();
195	if (CHECK(skel, "open_and_load", "verifier accepted map_update\n"))
196		test_sockmap_invalid_update__destroy(skel);
197}
198
199static void test_sockmap_copy(enum bpf_map_type map_type)
200{
201	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
202	int err, len, src_fd, iter_fd, duration = 0;
203	union bpf_iter_link_info linfo = {};
204	__u32 i, num_sockets, num_elems;
205	struct bpf_iter_sockmap *skel;
206	__s64 *sock_fd = NULL;
207	struct bpf_link *link;
208	struct bpf_map *src;
209	char buf[64];
210
211	skel = bpf_iter_sockmap__open_and_load();
212	if (CHECK(!skel, "bpf_iter_sockmap__open_and_load", "skeleton open_and_load failed\n"))
213		return;
214
215	if (map_type == BPF_MAP_TYPE_SOCKMAP) {
216		src = skel->maps.sockmap;
217		num_elems = bpf_map__max_entries(src);
218		num_sockets = num_elems - 1;
219	} else {
220		src = skel->maps.sockhash;
221		num_elems = bpf_map__max_entries(src) - 1;
222		num_sockets = num_elems;
223	}
224
225	sock_fd = calloc(num_sockets, sizeof(*sock_fd));
226	if (CHECK(!sock_fd, "calloc(sock_fd)", "failed to allocate\n"))
227		goto out;
228
229	for (i = 0; i < num_sockets; i++)
230		sock_fd[i] = -1;
231
232	src_fd = bpf_map__fd(src);
233
234	for (i = 0; i < num_sockets; i++) {
235		sock_fd[i] = connected_socket_v4();
236		if (CHECK(sock_fd[i] == -1, "connected_socket_v4", "cannot connect\n"))
237			goto out;
238
239		err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
240		if (CHECK(err, "map_update", "failed: %s\n", strerror(errno)))
241			goto out;
242	}
243
244	linfo.map.map_fd = src_fd;
245	opts.link_info = &linfo;
246	opts.link_info_len = sizeof(linfo);
247	link = bpf_program__attach_iter(skel->progs.copy, &opts);
248	if (!ASSERT_OK_PTR(link, "attach_iter"))
249		goto out;
250
251	iter_fd = bpf_iter_create(bpf_link__fd(link));
252	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
253		goto free_link;
254
255	/* do some tests */
256	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
257		;
258	if (CHECK(len < 0, "read", "failed: %s\n", strerror(errno)))
259		goto close_iter;
260
261	/* test results */
262	if (CHECK(skel->bss->elems != num_elems, "elems", "got %u expected %u\n",
263		  skel->bss->elems, num_elems))
264		goto close_iter;
265
266	if (CHECK(skel->bss->socks != num_sockets, "socks", "got %u expected %u\n",
267		  skel->bss->socks, num_sockets))
268		goto close_iter;
269
270	compare_cookies(src, skel->maps.dst);
271
272close_iter:
273	close(iter_fd);
274free_link:
275	bpf_link__destroy(link);
276out:
277	for (i = 0; sock_fd && i < num_sockets; i++)
278		if (sock_fd[i] >= 0)
279			close(sock_fd[i]);
280	if (sock_fd)
281		free(sock_fd);
282	bpf_iter_sockmap__destroy(skel);
283}
284
285static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
286					    enum bpf_attach_type second)
287{
288	struct test_sockmap_skb_verdict_attach *skel;
289	int err, map, verdict;
290
291	skel = test_sockmap_skb_verdict_attach__open_and_load();
292	if (CHECK_FAIL(!skel)) {
293		perror("test_sockmap_skb_verdict_attach__open_and_load");
294		return;
295	}
296
297	verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
298	map = bpf_map__fd(skel->maps.sock_map);
299
300	err = bpf_prog_attach(verdict, map, first, 0);
301	if (CHECK_FAIL(err)) {
302		perror("bpf_prog_attach");
303		goto out;
304	}
305
306	err = bpf_prog_attach(verdict, map, second, 0);
307	ASSERT_EQ(err, -EBUSY, "prog_attach_fail");
308
309	err = bpf_prog_detach2(verdict, map, first);
310	if (CHECK_FAIL(err)) {
311		perror("bpf_prog_detach2");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
313	}
 
 
 
 
 
 
 
 
 
 
 
314out:
315	test_sockmap_skb_verdict_attach__destroy(skel);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316}
317
318void test_sockmap_basic(void)
319{
320	if (test__start_subtest("sockmap create_update_free"))
321		test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
322	if (test__start_subtest("sockhash create_update_free"))
323		test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
324	if (test__start_subtest("sockmap sk_msg load helpers"))
325		test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
326	if (test__start_subtest("sockhash sk_msg load helpers"))
327		test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
328	if (test__start_subtest("sockmap update"))
329		test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
330	if (test__start_subtest("sockhash update"))
331		test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
332	if (test__start_subtest("sockmap update in unsafe context"))
333		test_sockmap_invalid_update();
334	if (test__start_subtest("sockmap copy"))
335		test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
336	if (test__start_subtest("sockhash copy"))
337		test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
338	if (test__start_subtest("sockmap skb_verdict attach")) {
339		test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT,
340						BPF_SK_SKB_STREAM_VERDICT);
341		test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,
342						BPF_SK_SKB_VERDICT);
343	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344}