Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2020 Cloudflare
3#include <error.h>
4#include <netinet/tcp.h>
5#include <sys/epoll.h>
6
7#include "test_progs.h"
8#include "test_skmsg_load_helpers.skel.h"
9#include "test_sockmap_update.skel.h"
10#include "test_sockmap_invalid_update.skel.h"
11#include "test_sockmap_skb_verdict_attach.skel.h"
12#include "test_sockmap_progs_query.skel.h"
13#include "test_sockmap_pass_prog.skel.h"
14#include "test_sockmap_drop_prog.skel.h"
15#include "test_sockmap_change_tail.skel.h"
16#include "bpf_iter_sockmap.skel.h"
17
18#include "sockmap_helpers.h"
19
20#define TCP_REPAIR 19 /* TCP sock is under repair right now */
21
22#define TCP_REPAIR_ON 1
23#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
24
25static int connected_socket_v4(void)
26{
27 struct sockaddr_in addr = {
28 .sin_family = AF_INET,
29 .sin_port = htons(80),
30 .sin_addr = { inet_addr("127.0.0.1") },
31 };
32 socklen_t len = sizeof(addr);
33 int s, repair, err;
34
35 s = socket(AF_INET, SOCK_STREAM, 0);
36 if (!ASSERT_GE(s, 0, "socket"))
37 goto error;
38
39 repair = TCP_REPAIR_ON;
40 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
41 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
42 goto error;
43
44 err = connect(s, (struct sockaddr *)&addr, len);
45 if (!ASSERT_OK(err, "connect"))
46 goto error;
47
48 repair = TCP_REPAIR_OFF_NO_WP;
49 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
50 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
51 goto error;
52
53 return s;
54error:
55 perror(__func__);
56 close(s);
57 return -1;
58}
59
60static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
61{
62 __u32 i, max_entries = bpf_map__max_entries(src);
63 int err, src_fd, dst_fd;
64
65 src_fd = bpf_map__fd(src);
66 dst_fd = bpf_map__fd(dst);
67
68 for (i = 0; i < max_entries; i++) {
69 __u64 src_cookie, dst_cookie;
70
71 err = bpf_map_lookup_elem(src_fd, &i, &src_cookie);
72 if (err && errno == ENOENT) {
73 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
74 ASSERT_ERR(err, "map_lookup_elem(dst)");
75 ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)");
76 continue;
77 }
78 if (!ASSERT_OK(err, "lookup_elem(src)"))
79 continue;
80
81 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
82 if (!ASSERT_OK(err, "lookup_elem(dst)"))
83 continue;
84
85 ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch");
86 }
87}
88
89/* Create a map, populate it with one socket, and free the map. */
90static void test_sockmap_create_update_free(enum bpf_map_type map_type)
91{
92 const int zero = 0;
93 int s, map, err;
94
95 s = connected_socket_v4();
96 if (!ASSERT_GE(s, 0, "connected_socket_v4"))
97 return;
98
99 map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
100 if (!ASSERT_GE(map, 0, "bpf_map_create"))
101 goto out;
102
103 err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
104 if (!ASSERT_OK(err, "bpf_map_update"))
105 goto out;
106
107out:
108 close(map);
109 close(s);
110}
111
112static void test_sockmap_vsock_delete_on_close(void)
113{
114 int err, c, p, map;
115 const int zero = 0;
116
117 err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p);
118 if (!ASSERT_OK(err, "create_pair(AF_VSOCK)"))
119 return;
120
121 map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int),
122 sizeof(int), 1, NULL);
123 if (!ASSERT_GE(map, 0, "bpf_map_create")) {
124 close(c);
125 goto out;
126 }
127
128 err = bpf_map_update_elem(map, &zero, &c, BPF_NOEXIST);
129 close(c);
130 if (!ASSERT_OK(err, "bpf_map_update"))
131 goto out;
132
133 err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST);
134 ASSERT_OK(err, "after close(), bpf_map_update");
135
136out:
137 close(p);
138 close(map);
139}
140
141static void test_skmsg_helpers(enum bpf_map_type map_type)
142{
143 struct test_skmsg_load_helpers *skel;
144 int err, map, verdict;
145
146 skel = test_skmsg_load_helpers__open_and_load();
147 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
148 return;
149
150 verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
151 map = bpf_map__fd(skel->maps.sock_map);
152
153 err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
154 if (!ASSERT_OK(err, "bpf_prog_attach"))
155 goto out;
156
157 err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT);
158 if (!ASSERT_OK(err, "bpf_prog_detach2"))
159 goto out;
160out:
161 test_skmsg_load_helpers__destroy(skel);
162}
163
164static void test_skmsg_helpers_with_link(enum bpf_map_type map_type)
165{
166 struct bpf_program *prog, *prog_clone, *prog_clone2;
167 DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts);
168 struct test_skmsg_load_helpers *skel;
169 struct bpf_link *link, *link2;
170 int err, map;
171
172 skel = test_skmsg_load_helpers__open_and_load();
173 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
174 return;
175
176 prog = skel->progs.prog_msg_verdict;
177 prog_clone = skel->progs.prog_msg_verdict_clone;
178 prog_clone2 = skel->progs.prog_msg_verdict_clone2;
179 map = bpf_map__fd(skel->maps.sock_map);
180
181 link = bpf_program__attach_sockmap(prog, map);
182 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
183 goto out;
184
185 /* Fail since bpf_link for the same prog has been created. */
186 err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_MSG_VERDICT, 0);
187 if (!ASSERT_ERR(err, "bpf_prog_attach"))
188 goto out;
189
190 /* Fail since bpf_link for the same prog type has been created. */
191 link2 = bpf_program__attach_sockmap(prog_clone, map);
192 if (!ASSERT_ERR_PTR(link2, "bpf_program__attach_sockmap")) {
193 bpf_link__detach(link2);
194 goto out;
195 }
196
197 err = bpf_link__update_program(link, prog_clone);
198 if (!ASSERT_OK(err, "bpf_link__update_program"))
199 goto out;
200
201 /* Fail since a prog with different type attempts to do update. */
202 err = bpf_link__update_program(link, skel->progs.prog_skb_verdict);
203 if (!ASSERT_ERR(err, "bpf_link__update_program"))
204 goto out;
205
206 /* Fail since the old prog does not match the one in the kernel. */
207 opts.old_prog_fd = bpf_program__fd(prog_clone2);
208 opts.flags = BPF_F_REPLACE;
209 err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts);
210 if (!ASSERT_ERR(err, "bpf_link_update"))
211 goto out;
212
213 opts.old_prog_fd = bpf_program__fd(prog_clone);
214 opts.flags = BPF_F_REPLACE;
215 err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts);
216 if (!ASSERT_OK(err, "bpf_link_update"))
217 goto out;
218out:
219 bpf_link__detach(link);
220 test_skmsg_load_helpers__destroy(skel);
221}
222
223static void test_sockmap_update(enum bpf_map_type map_type)
224{
225 int err, prog, src;
226 struct test_sockmap_update *skel;
227 struct bpf_map *dst_map;
228 const __u32 zero = 0;
229 char dummy[14] = {0};
230 LIBBPF_OPTS(bpf_test_run_opts, topts,
231 .data_in = dummy,
232 .data_size_in = sizeof(dummy),
233 .repeat = 1,
234 );
235 __s64 sk;
236
237 sk = connected_socket_v4();
238 if (!ASSERT_NEQ(sk, -1, "connected_socket_v4"))
239 return;
240
241 skel = test_sockmap_update__open_and_load();
242 if (!ASSERT_OK_PTR(skel, "open_and_load"))
243 goto close_sk;
244
245 prog = bpf_program__fd(skel->progs.copy_sock_map);
246 src = bpf_map__fd(skel->maps.src);
247 if (map_type == BPF_MAP_TYPE_SOCKMAP)
248 dst_map = skel->maps.dst_sock_map;
249 else
250 dst_map = skel->maps.dst_sock_hash;
251
252 err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
253 if (!ASSERT_OK(err, "update_elem(src)"))
254 goto out;
255
256 err = bpf_prog_test_run_opts(prog, &topts);
257 if (!ASSERT_OK(err, "test_run"))
258 goto out;
259 if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
260 goto out;
261
262 compare_cookies(skel->maps.src, dst_map);
263
264out:
265 test_sockmap_update__destroy(skel);
266close_sk:
267 close(sk);
268}
269
270static void test_sockmap_invalid_update(void)
271{
272 struct test_sockmap_invalid_update *skel;
273
274 skel = test_sockmap_invalid_update__open_and_load();
275 if (!ASSERT_NULL(skel, "open_and_load"))
276 test_sockmap_invalid_update__destroy(skel);
277}
278
279static void test_sockmap_copy(enum bpf_map_type map_type)
280{
281 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
282 int err, len, src_fd, iter_fd;
283 union bpf_iter_link_info linfo = {};
284 __u32 i, num_sockets, num_elems;
285 struct bpf_iter_sockmap *skel;
286 __s64 *sock_fd = NULL;
287 struct bpf_link *link;
288 struct bpf_map *src;
289 char buf[64];
290
291 skel = bpf_iter_sockmap__open_and_load();
292 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
293 return;
294
295 if (map_type == BPF_MAP_TYPE_SOCKMAP) {
296 src = skel->maps.sockmap;
297 num_elems = bpf_map__max_entries(src);
298 num_sockets = num_elems - 1;
299 } else {
300 src = skel->maps.sockhash;
301 num_elems = bpf_map__max_entries(src) - 1;
302 num_sockets = num_elems;
303 }
304
305 sock_fd = calloc(num_sockets, sizeof(*sock_fd));
306 if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)"))
307 goto out;
308
309 for (i = 0; i < num_sockets; i++)
310 sock_fd[i] = -1;
311
312 src_fd = bpf_map__fd(src);
313
314 for (i = 0; i < num_sockets; i++) {
315 sock_fd[i] = connected_socket_v4();
316 if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4"))
317 goto out;
318
319 err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
320 if (!ASSERT_OK(err, "map_update"))
321 goto out;
322 }
323
324 linfo.map.map_fd = src_fd;
325 opts.link_info = &linfo;
326 opts.link_info_len = sizeof(linfo);
327 link = bpf_program__attach_iter(skel->progs.copy, &opts);
328 if (!ASSERT_OK_PTR(link, "attach_iter"))
329 goto out;
330
331 iter_fd = bpf_iter_create(bpf_link__fd(link));
332 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
333 goto free_link;
334
335 /* do some tests */
336 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
337 ;
338 if (!ASSERT_GE(len, 0, "read"))
339 goto close_iter;
340
341 /* test results */
342 if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems"))
343 goto close_iter;
344
345 if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks"))
346 goto close_iter;
347
348 compare_cookies(src, skel->maps.dst);
349
350close_iter:
351 close(iter_fd);
352free_link:
353 bpf_link__destroy(link);
354out:
355 for (i = 0; sock_fd && i < num_sockets; i++)
356 if (sock_fd[i] >= 0)
357 close(sock_fd[i]);
358 if (sock_fd)
359 free(sock_fd);
360 bpf_iter_sockmap__destroy(skel);
361}
362
363static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
364 enum bpf_attach_type second)
365{
366 struct test_sockmap_skb_verdict_attach *skel;
367 int err, map, verdict;
368
369 skel = test_sockmap_skb_verdict_attach__open_and_load();
370 if (!ASSERT_OK_PTR(skel, "open_and_load"))
371 return;
372
373 verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
374 map = bpf_map__fd(skel->maps.sock_map);
375
376 err = bpf_prog_attach(verdict, map, first, 0);
377 if (!ASSERT_OK(err, "bpf_prog_attach"))
378 goto out;
379
380 err = bpf_prog_attach(verdict, map, second, 0);
381 ASSERT_EQ(err, -EBUSY, "prog_attach_fail");
382
383 err = bpf_prog_detach2(verdict, map, first);
384 if (!ASSERT_OK(err, "bpf_prog_detach2"))
385 goto out;
386out:
387 test_sockmap_skb_verdict_attach__destroy(skel);
388}
389
390static void test_sockmap_skb_verdict_attach_with_link(void)
391{
392 struct test_sockmap_skb_verdict_attach *skel;
393 struct bpf_program *prog;
394 struct bpf_link *link;
395 int err, map;
396
397 skel = test_sockmap_skb_verdict_attach__open_and_load();
398 if (!ASSERT_OK_PTR(skel, "open_and_load"))
399 return;
400 prog = skel->progs.prog_skb_verdict;
401 map = bpf_map__fd(skel->maps.sock_map);
402 link = bpf_program__attach_sockmap(prog, map);
403 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
404 goto out;
405
406 bpf_link__detach(link);
407
408 err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
409 if (!ASSERT_OK(err, "bpf_prog_attach"))
410 goto out;
411
412 /* Fail since attaching with the same prog/map has been done. */
413 link = bpf_program__attach_sockmap(prog, map);
414 if (!ASSERT_ERR_PTR(link, "bpf_program__attach_sockmap"))
415 bpf_link__detach(link);
416
417 err = bpf_prog_detach2(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT);
418 if (!ASSERT_OK(err, "bpf_prog_detach2"))
419 goto out;
420out:
421 test_sockmap_skb_verdict_attach__destroy(skel);
422}
423
424static __u32 query_prog_id(int prog_fd)
425{
426 struct bpf_prog_info info = {};
427 __u32 info_len = sizeof(info);
428 int err;
429
430 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
431 if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
432 !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd"))
433 return 0;
434
435 return info.id;
436}
437
438static void test_sockmap_progs_query(enum bpf_attach_type attach_type)
439{
440 struct test_sockmap_progs_query *skel;
441 int err, map_fd, verdict_fd;
442 __u32 attach_flags = 0;
443 __u32 prog_ids[3] = {};
444 __u32 prog_cnt = 3;
445
446 skel = test_sockmap_progs_query__open_and_load();
447 if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load"))
448 return;
449
450 map_fd = bpf_map__fd(skel->maps.sock_map);
451
452 if (attach_type == BPF_SK_MSG_VERDICT)
453 verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict);
454 else
455 verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict);
456
457 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
458 &attach_flags, prog_ids, &prog_cnt);
459 ASSERT_OK(err, "bpf_prog_query failed");
460 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
461 ASSERT_EQ(prog_cnt, 0, "wrong program count on query");
462
463 err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0);
464 if (!ASSERT_OK(err, "bpf_prog_attach failed"))
465 goto out;
466
467 prog_cnt = 1;
468 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
469 &attach_flags, prog_ids, &prog_cnt);
470 ASSERT_OK(err, "bpf_prog_query failed");
471 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
472 ASSERT_EQ(prog_cnt, 1, "wrong program count on query");
473 ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd),
474 "wrong prog_ids on query");
475
476 bpf_prog_detach2(verdict_fd, map_fd, attach_type);
477out:
478 test_sockmap_progs_query__destroy(skel);
479}
480
481#define MAX_EVENTS 10
482static void test_sockmap_skb_verdict_shutdown(void)
483{
484 int n, err, map, verdict, c1 = -1, p1 = -1;
485 struct epoll_event ev, events[MAX_EVENTS];
486 struct test_sockmap_pass_prog *skel;
487 int zero = 0;
488 int epollfd;
489 char b;
490
491 skel = test_sockmap_pass_prog__open_and_load();
492 if (!ASSERT_OK_PTR(skel, "open_and_load"))
493 return;
494
495 verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
496 map = bpf_map__fd(skel->maps.sock_map_rx);
497
498 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
499 if (!ASSERT_OK(err, "bpf_prog_attach"))
500 goto out;
501
502 err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1);
503 if (err < 0)
504 goto out;
505
506 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
507 if (err < 0)
508 goto out_close;
509
510 shutdown(p1, SHUT_WR);
511
512 ev.events = EPOLLIN;
513 ev.data.fd = c1;
514
515 epollfd = epoll_create1(0);
516 if (!ASSERT_GT(epollfd, -1, "epoll_create(0)"))
517 goto out_close;
518 err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev);
519 if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)"))
520 goto out_close;
521 err = epoll_wait(epollfd, events, MAX_EVENTS, -1);
522 if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
523 goto out_close;
524
525 n = recv(c1, &b, 1, SOCK_NONBLOCK);
526 ASSERT_EQ(n, 0, "recv_timeout(fin)");
527out_close:
528 close(c1);
529 close(p1);
530out:
531 test_sockmap_pass_prog__destroy(skel);
532}
533
534static void test_sockmap_stream_pass(void)
535{
536 int zero = 0, sent, recvd;
537 int verdict, parser;
538 int err, map;
539 int c = -1, p = -1;
540 struct test_sockmap_pass_prog *pass = NULL;
541 char snd[256] = "0123456789";
542 char rcv[256] = "0";
543
544 pass = test_sockmap_pass_prog__open_and_load();
545 verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
546 parser = bpf_program__fd(pass->progs.prog_skb_parser);
547 map = bpf_map__fd(pass->maps.sock_map_rx);
548
549 err = bpf_prog_attach(parser, map, BPF_SK_SKB_STREAM_PARSER, 0);
550 if (!ASSERT_OK(err, "bpf_prog_attach stream parser"))
551 goto out;
552
553 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
554 if (!ASSERT_OK(err, "bpf_prog_attach stream verdict"))
555 goto out;
556
557 err = create_pair(AF_INET, SOCK_STREAM, &c, &p);
558 if (err)
559 goto out;
560
561 /* sk_data_ready of 'p' will be replaced by strparser handler */
562 err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST);
563 if (!ASSERT_OK(err, "bpf_map_update_elem(p)"))
564 goto out_close;
565
566 /*
567 * as 'prog_skb_parser' return the original skb len and
568 * 'prog_skb_verdict' return SK_PASS, the kernel will just
569 * pass it through to original socket 'p'
570 */
571 sent = xsend(c, snd, sizeof(snd), 0);
572 ASSERT_EQ(sent, sizeof(snd), "xsend(c)");
573
574 recvd = recv_timeout(p, rcv, sizeof(rcv), SOCK_NONBLOCK,
575 IO_TIMEOUT_SEC);
576 ASSERT_EQ(recvd, sizeof(rcv), "recv_timeout(p)");
577
578out_close:
579 close(c);
580 close(p);
581
582out:
583 test_sockmap_pass_prog__destroy(pass);
584}
585
586static void test_sockmap_skb_verdict_fionread(bool pass_prog)
587{
588 int err, map, verdict, c0 = -1, c1 = -1, p0 = -1, p1 = -1;
589 int expected, zero = 0, sent, recvd, avail;
590 struct test_sockmap_pass_prog *pass = NULL;
591 struct test_sockmap_drop_prog *drop = NULL;
592 char buf[256] = "0123456789";
593
594 if (pass_prog) {
595 pass = test_sockmap_pass_prog__open_and_load();
596 if (!ASSERT_OK_PTR(pass, "open_and_load"))
597 return;
598 verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
599 map = bpf_map__fd(pass->maps.sock_map_rx);
600 expected = sizeof(buf);
601 } else {
602 drop = test_sockmap_drop_prog__open_and_load();
603 if (!ASSERT_OK_PTR(drop, "open_and_load"))
604 return;
605 verdict = bpf_program__fd(drop->progs.prog_skb_verdict);
606 map = bpf_map__fd(drop->maps.sock_map_rx);
607 /* On drop data is consumed immediately and copied_seq inc'd */
608 expected = 0;
609 }
610
611
612 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
613 if (!ASSERT_OK(err, "bpf_prog_attach"))
614 goto out;
615
616 err = create_socket_pairs(AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1);
617 if (!ASSERT_OK(err, "create_socket_pairs()"))
618 goto out;
619
620 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
621 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
622 goto out_close;
623
624 sent = xsend(p1, &buf, sizeof(buf), 0);
625 ASSERT_EQ(sent, sizeof(buf), "xsend(p0)");
626 err = ioctl(c1, FIONREAD, &avail);
627 ASSERT_OK(err, "ioctl(FIONREAD) error");
628 ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
629 /* On DROP test there will be no data to read */
630 if (pass_prog) {
631 recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC);
632 ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
633 }
634
635out_close:
636 close(c0);
637 close(p0);
638 close(c1);
639 close(p1);
640out:
641 if (pass_prog)
642 test_sockmap_pass_prog__destroy(pass);
643 else
644 test_sockmap_drop_prog__destroy(drop);
645}
646
647static void test_sockmap_skb_verdict_change_tail(void)
648{
649 struct test_sockmap_change_tail *skel;
650 int err, map, verdict;
651 int c1, p1, sent, recvd;
652 int zero = 0;
653 char buf[2];
654
655 skel = test_sockmap_change_tail__open_and_load();
656 if (!ASSERT_OK_PTR(skel, "open_and_load"))
657 return;
658 verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
659 map = bpf_map__fd(skel->maps.sock_map_rx);
660
661 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
662 if (!ASSERT_OK(err, "bpf_prog_attach"))
663 goto out;
664 err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1);
665 if (!ASSERT_OK(err, "create_pair()"))
666 goto out;
667 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
668 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
669 goto out_close;
670 sent = xsend(p1, "Tr", 2, 0);
671 ASSERT_EQ(sent, 2, "xsend(p1)");
672 recvd = recv(c1, buf, 2, 0);
673 ASSERT_EQ(recvd, 1, "recv(c1)");
674 ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret");
675
676 sent = xsend(p1, "G", 1, 0);
677 ASSERT_EQ(sent, 1, "xsend(p1)");
678 recvd = recv(c1, buf, 2, 0);
679 ASSERT_EQ(recvd, 2, "recv(c1)");
680 ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret");
681
682 sent = xsend(p1, "E", 1, 0);
683 ASSERT_EQ(sent, 1, "xsend(p1)");
684 recvd = recv(c1, buf, 1, 0);
685 ASSERT_EQ(recvd, 1, "recv(c1)");
686 ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret");
687
688out_close:
689 close(c1);
690 close(p1);
691out:
692 test_sockmap_change_tail__destroy(skel);
693}
694
695static void test_sockmap_skb_verdict_peek_helper(int map)
696{
697 int err, c1, p1, zero = 0, sent, recvd, avail;
698 char snd[256] = "0123456789";
699 char rcv[256] = "0";
700
701 err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1);
702 if (!ASSERT_OK(err, "create_pair()"))
703 return;
704
705 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
706 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
707 goto out_close;
708
709 sent = xsend(p1, snd, sizeof(snd), 0);
710 ASSERT_EQ(sent, sizeof(snd), "xsend(p1)");
711 recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK);
712 ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)");
713 err = ioctl(c1, FIONREAD, &avail);
714 ASSERT_OK(err, "ioctl(FIONREAD) error");
715 ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)");
716 recvd = recv(c1, rcv, sizeof(rcv), 0);
717 ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)");
718 err = ioctl(c1, FIONREAD, &avail);
719 ASSERT_OK(err, "ioctl(FIONREAD) error");
720 ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)");
721
722out_close:
723 close(c1);
724 close(p1);
725}
726
727static void test_sockmap_skb_verdict_peek(void)
728{
729 struct test_sockmap_pass_prog *pass;
730 int err, map, verdict;
731
732 pass = test_sockmap_pass_prog__open_and_load();
733 if (!ASSERT_OK_PTR(pass, "open_and_load"))
734 return;
735 verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
736 map = bpf_map__fd(pass->maps.sock_map_rx);
737
738 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
739 if (!ASSERT_OK(err, "bpf_prog_attach"))
740 goto out;
741
742 test_sockmap_skb_verdict_peek_helper(map);
743
744out:
745 test_sockmap_pass_prog__destroy(pass);
746}
747
748static void test_sockmap_skb_verdict_peek_with_link(void)
749{
750 struct test_sockmap_pass_prog *pass;
751 struct bpf_program *prog;
752 struct bpf_link *link;
753 int err, map;
754
755 pass = test_sockmap_pass_prog__open_and_load();
756 if (!ASSERT_OK_PTR(pass, "open_and_load"))
757 return;
758 prog = pass->progs.prog_skb_verdict;
759 map = bpf_map__fd(pass->maps.sock_map_rx);
760 link = bpf_program__attach_sockmap(prog, map);
761 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
762 goto out;
763
764 err = bpf_link__update_program(link, pass->progs.prog_skb_verdict_clone);
765 if (!ASSERT_OK(err, "bpf_link__update_program"))
766 goto out;
767
768 /* Fail since a prog with different attach type attempts to do update. */
769 err = bpf_link__update_program(link, pass->progs.prog_skb_parser);
770 if (!ASSERT_ERR(err, "bpf_link__update_program"))
771 goto out;
772
773 test_sockmap_skb_verdict_peek_helper(map);
774 ASSERT_EQ(pass->bss->clone_called, 1, "clone_called");
775out:
776 bpf_link__detach(link);
777 test_sockmap_pass_prog__destroy(pass);
778}
779
780static void test_sockmap_unconnected_unix(void)
781{
782 int err, map, stream = 0, dgram = 0, zero = 0;
783 struct test_sockmap_pass_prog *skel;
784
785 skel = test_sockmap_pass_prog__open_and_load();
786 if (!ASSERT_OK_PTR(skel, "open_and_load"))
787 return;
788
789 map = bpf_map__fd(skel->maps.sock_map_rx);
790
791 stream = xsocket(AF_UNIX, SOCK_STREAM, 0);
792 if (stream < 0)
793 return;
794
795 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
796 if (dgram < 0) {
797 close(stream);
798 return;
799 }
800
801 err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY);
802 ASSERT_ERR(err, "bpf_map_update_elem(stream)");
803
804 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
805 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
806
807 close(stream);
808 close(dgram);
809}
810
811static void test_sockmap_many_socket(void)
812{
813 struct test_sockmap_pass_prog *skel;
814 int stream[2], dgram, udp, tcp;
815 int i, err, map, entry = 0;
816
817 skel = test_sockmap_pass_prog__open_and_load();
818 if (!ASSERT_OK_PTR(skel, "open_and_load"))
819 return;
820
821 map = bpf_map__fd(skel->maps.sock_map_rx);
822
823 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
824 if (dgram < 0) {
825 test_sockmap_pass_prog__destroy(skel);
826 return;
827 }
828
829 tcp = connected_socket_v4();
830 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
831 close(dgram);
832 test_sockmap_pass_prog__destroy(skel);
833 return;
834 }
835
836 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
837 if (udp < 0) {
838 close(dgram);
839 close(tcp);
840 test_sockmap_pass_prog__destroy(skel);
841 return;
842 }
843
844 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
845 ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
846 if (err)
847 goto out;
848
849 for (i = 0; i < 2; i++, entry++) {
850 err = bpf_map_update_elem(map, &entry, &stream[0], BPF_ANY);
851 ASSERT_OK(err, "bpf_map_update_elem(stream)");
852 }
853 for (i = 0; i < 2; i++, entry++) {
854 err = bpf_map_update_elem(map, &entry, &dgram, BPF_ANY);
855 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
856 }
857 for (i = 0; i < 2; i++, entry++) {
858 err = bpf_map_update_elem(map, &entry, &udp, BPF_ANY);
859 ASSERT_OK(err, "bpf_map_update_elem(udp)");
860 }
861 for (i = 0; i < 2; i++, entry++) {
862 err = bpf_map_update_elem(map, &entry, &tcp, BPF_ANY);
863 ASSERT_OK(err, "bpf_map_update_elem(tcp)");
864 }
865 for (entry--; entry >= 0; entry--) {
866 err = bpf_map_delete_elem(map, &entry);
867 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
868 }
869
870 close(stream[0]);
871 close(stream[1]);
872out:
873 close(dgram);
874 close(tcp);
875 close(udp);
876 test_sockmap_pass_prog__destroy(skel);
877}
878
879static void test_sockmap_many_maps(void)
880{
881 struct test_sockmap_pass_prog *skel;
882 int stream[2], dgram, udp, tcp;
883 int i, err, map[2], entry = 0;
884
885 skel = test_sockmap_pass_prog__open_and_load();
886 if (!ASSERT_OK_PTR(skel, "open_and_load"))
887 return;
888
889 map[0] = bpf_map__fd(skel->maps.sock_map_rx);
890 map[1] = bpf_map__fd(skel->maps.sock_map_tx);
891
892 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
893 if (dgram < 0) {
894 test_sockmap_pass_prog__destroy(skel);
895 return;
896 }
897
898 tcp = connected_socket_v4();
899 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
900 close(dgram);
901 test_sockmap_pass_prog__destroy(skel);
902 return;
903 }
904
905 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
906 if (udp < 0) {
907 close(dgram);
908 close(tcp);
909 test_sockmap_pass_prog__destroy(skel);
910 return;
911 }
912
913 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
914 ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
915 if (err)
916 goto out;
917
918 for (i = 0; i < 2; i++, entry++) {
919 err = bpf_map_update_elem(map[i], &entry, &stream[0], BPF_ANY);
920 ASSERT_OK(err, "bpf_map_update_elem(stream)");
921 }
922 for (i = 0; i < 2; i++, entry++) {
923 err = bpf_map_update_elem(map[i], &entry, &dgram, BPF_ANY);
924 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
925 }
926 for (i = 0; i < 2; i++, entry++) {
927 err = bpf_map_update_elem(map[i], &entry, &udp, BPF_ANY);
928 ASSERT_OK(err, "bpf_map_update_elem(udp)");
929 }
930 for (i = 0; i < 2; i++, entry++) {
931 err = bpf_map_update_elem(map[i], &entry, &tcp, BPF_ANY);
932 ASSERT_OK(err, "bpf_map_update_elem(tcp)");
933 }
934 for (entry--; entry >= 0; entry--) {
935 err = bpf_map_delete_elem(map[1], &entry);
936 entry--;
937 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
938 err = bpf_map_delete_elem(map[0], &entry);
939 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
940 }
941
942 close(stream[0]);
943 close(stream[1]);
944out:
945 close(dgram);
946 close(tcp);
947 close(udp);
948 test_sockmap_pass_prog__destroy(skel);
949}
950
951static void test_sockmap_same_sock(void)
952{
953 struct test_sockmap_pass_prog *skel;
954 int stream[2], dgram, udp, tcp;
955 int i, err, map, zero = 0;
956
957 skel = test_sockmap_pass_prog__open_and_load();
958 if (!ASSERT_OK_PTR(skel, "open_and_load"))
959 return;
960
961 map = bpf_map__fd(skel->maps.sock_map_rx);
962
963 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
964 if (dgram < 0) {
965 test_sockmap_pass_prog__destroy(skel);
966 return;
967 }
968
969 tcp = connected_socket_v4();
970 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
971 close(dgram);
972 test_sockmap_pass_prog__destroy(skel);
973 return;
974 }
975
976 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
977 if (udp < 0) {
978 close(dgram);
979 close(tcp);
980 test_sockmap_pass_prog__destroy(skel);
981 return;
982 }
983
984 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
985 ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
986 if (err) {
987 close(tcp);
988 goto out;
989 }
990
991 for (i = 0; i < 2; i++) {
992 err = bpf_map_update_elem(map, &zero, &stream[0], BPF_ANY);
993 ASSERT_OK(err, "bpf_map_update_elem(stream)");
994 }
995 for (i = 0; i < 2; i++) {
996 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
997 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
998 }
999 for (i = 0; i < 2; i++) {
1000 err = bpf_map_update_elem(map, &zero, &udp, BPF_ANY);
1001 ASSERT_OK(err, "bpf_map_update_elem(udp)");
1002 }
1003 for (i = 0; i < 2; i++) {
1004 err = bpf_map_update_elem(map, &zero, &tcp, BPF_ANY);
1005 ASSERT_OK(err, "bpf_map_update_elem(tcp)");
1006 }
1007
1008 close(tcp);
1009 err = bpf_map_delete_elem(map, &zero);
1010 ASSERT_ERR(err, "bpf_map_delete_elem(entry)");
1011
1012 close(stream[0]);
1013 close(stream[1]);
1014out:
1015 close(dgram);
1016 close(udp);
1017 test_sockmap_pass_prog__destroy(skel);
1018}
1019
1020static void test_sockmap_skb_verdict_vsock_poll(void)
1021{
1022 struct test_sockmap_pass_prog *skel;
1023 int err, map, conn, peer;
1024 struct bpf_program *prog;
1025 struct bpf_link *link;
1026 char buf = 'x';
1027 int zero = 0;
1028
1029 skel = test_sockmap_pass_prog__open_and_load();
1030 if (!ASSERT_OK_PTR(skel, "open_and_load"))
1031 return;
1032
1033 if (create_pair(AF_VSOCK, SOCK_STREAM, &conn, &peer))
1034 goto destroy;
1035
1036 prog = skel->progs.prog_skb_verdict;
1037 map = bpf_map__fd(skel->maps.sock_map_rx);
1038 link = bpf_program__attach_sockmap(prog, map);
1039 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
1040 goto close;
1041
1042 err = bpf_map_update_elem(map, &zero, &conn, BPF_ANY);
1043 if (!ASSERT_OK(err, "bpf_map_update_elem"))
1044 goto detach;
1045
1046 if (xsend(peer, &buf, 1, 0) != 1)
1047 goto detach;
1048
1049 err = poll_read(conn, IO_TIMEOUT_SEC);
1050 if (!ASSERT_OK(err, "poll"))
1051 goto detach;
1052
1053 if (xrecv_nonblock(conn, &buf, 1, 0) != 1)
1054 FAIL("xrecv_nonblock");
1055detach:
1056 bpf_link__detach(link);
1057close:
1058 xclose(conn);
1059 xclose(peer);
1060destroy:
1061 test_sockmap_pass_prog__destroy(skel);
1062}
1063
1064void test_sockmap_basic(void)
1065{
1066 if (test__start_subtest("sockmap create_update_free"))
1067 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
1068 if (test__start_subtest("sockhash create_update_free"))
1069 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
1070 if (test__start_subtest("sockmap vsock delete on close"))
1071 test_sockmap_vsock_delete_on_close();
1072 if (test__start_subtest("sockmap sk_msg load helpers"))
1073 test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
1074 if (test__start_subtest("sockhash sk_msg load helpers"))
1075 test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
1076 if (test__start_subtest("sockmap update"))
1077 test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
1078 if (test__start_subtest("sockhash update"))
1079 test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
1080 if (test__start_subtest("sockmap update in unsafe context"))
1081 test_sockmap_invalid_update();
1082 if (test__start_subtest("sockmap copy"))
1083 test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
1084 if (test__start_subtest("sockhash copy"))
1085 test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
1086 if (test__start_subtest("sockmap skb_verdict attach")) {
1087 test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT,
1088 BPF_SK_SKB_STREAM_VERDICT);
1089 test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,
1090 BPF_SK_SKB_VERDICT);
1091 }
1092 if (test__start_subtest("sockmap skb_verdict attach_with_link"))
1093 test_sockmap_skb_verdict_attach_with_link();
1094 if (test__start_subtest("sockmap msg_verdict progs query"))
1095 test_sockmap_progs_query(BPF_SK_MSG_VERDICT);
1096 if (test__start_subtest("sockmap stream_parser progs query"))
1097 test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER);
1098 if (test__start_subtest("sockmap stream_verdict progs query"))
1099 test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT);
1100 if (test__start_subtest("sockmap skb_verdict progs query"))
1101 test_sockmap_progs_query(BPF_SK_SKB_VERDICT);
1102 if (test__start_subtest("sockmap skb_verdict shutdown"))
1103 test_sockmap_skb_verdict_shutdown();
1104 if (test__start_subtest("sockmap stream parser and verdict pass"))
1105 test_sockmap_stream_pass();
1106 if (test__start_subtest("sockmap skb_verdict fionread"))
1107 test_sockmap_skb_verdict_fionread(true);
1108 if (test__start_subtest("sockmap skb_verdict fionread on drop"))
1109 test_sockmap_skb_verdict_fionread(false);
1110 if (test__start_subtest("sockmap skb_verdict change tail"))
1111 test_sockmap_skb_verdict_change_tail();
1112 if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
1113 test_sockmap_skb_verdict_peek();
1114 if (test__start_subtest("sockmap skb_verdict msg_f_peek with link"))
1115 test_sockmap_skb_verdict_peek_with_link();
1116 if (test__start_subtest("sockmap unconnected af_unix"))
1117 test_sockmap_unconnected_unix();
1118 if (test__start_subtest("sockmap one socket to many map entries"))
1119 test_sockmap_many_socket();
1120 if (test__start_subtest("sockmap one socket to many maps"))
1121 test_sockmap_many_maps();
1122 if (test__start_subtest("sockmap same socket replace"))
1123 test_sockmap_same_sock();
1124 if (test__start_subtest("sockmap sk_msg attach sockmap helpers with link"))
1125 test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKMAP);
1126 if (test__start_subtest("sockhash sk_msg attach sockhash helpers with link"))
1127 test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKHASH);
1128 if (test__start_subtest("sockmap skb_verdict vsock poll"))
1129 test_sockmap_skb_verdict_vsock_poll();
1130}
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2020 Cloudflare
3#include <error.h>
4#include <netinet/tcp.h>
5#include <sys/epoll.h>
6
7#include "test_progs.h"
8#include "test_skmsg_load_helpers.skel.h"
9#include "test_sockmap_update.skel.h"
10#include "test_sockmap_invalid_update.skel.h"
11#include "test_sockmap_skb_verdict_attach.skel.h"
12#include "test_sockmap_progs_query.skel.h"
13#include "test_sockmap_pass_prog.skel.h"
14#include "test_sockmap_drop_prog.skel.h"
15#include "bpf_iter_sockmap.skel.h"
16
17#include "sockmap_helpers.h"
18
19#define TCP_REPAIR 19 /* TCP sock is under repair right now */
20
21#define TCP_REPAIR_ON 1
22#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
23
24static int connected_socket_v4(void)
25{
26 struct sockaddr_in addr = {
27 .sin_family = AF_INET,
28 .sin_port = htons(80),
29 .sin_addr = { inet_addr("127.0.0.1") },
30 };
31 socklen_t len = sizeof(addr);
32 int s, repair, err;
33
34 s = socket(AF_INET, SOCK_STREAM, 0);
35 if (!ASSERT_GE(s, 0, "socket"))
36 goto error;
37
38 repair = TCP_REPAIR_ON;
39 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
40 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
41 goto error;
42
43 err = connect(s, (struct sockaddr *)&addr, len);
44 if (!ASSERT_OK(err, "connect"))
45 goto error;
46
47 repair = TCP_REPAIR_OFF_NO_WP;
48 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
49 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
50 goto error;
51
52 return s;
53error:
54 perror(__func__);
55 close(s);
56 return -1;
57}
58
59static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
60{
61 __u32 i, max_entries = bpf_map__max_entries(src);
62 int err, src_fd, dst_fd;
63
64 src_fd = bpf_map__fd(src);
65 dst_fd = bpf_map__fd(dst);
66
67 for (i = 0; i < max_entries; i++) {
68 __u64 src_cookie, dst_cookie;
69
70 err = bpf_map_lookup_elem(src_fd, &i, &src_cookie);
71 if (err && errno == ENOENT) {
72 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
73 ASSERT_ERR(err, "map_lookup_elem(dst)");
74 ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)");
75 continue;
76 }
77 if (!ASSERT_OK(err, "lookup_elem(src)"))
78 continue;
79
80 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
81 if (!ASSERT_OK(err, "lookup_elem(dst)"))
82 continue;
83
84 ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch");
85 }
86}
87
88/* Create a map, populate it with one socket, and free the map. */
89static void test_sockmap_create_update_free(enum bpf_map_type map_type)
90{
91 const int zero = 0;
92 int s, map, err;
93
94 s = connected_socket_v4();
95 if (!ASSERT_GE(s, 0, "connected_socket_v4"))
96 return;
97
98 map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
99 if (!ASSERT_GE(map, 0, "bpf_map_create"))
100 goto out;
101
102 err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
103 if (!ASSERT_OK(err, "bpf_map_update"))
104 goto out;
105
106out:
107 close(map);
108 close(s);
109}
110
111static void test_skmsg_helpers(enum bpf_map_type map_type)
112{
113 struct test_skmsg_load_helpers *skel;
114 int err, map, verdict;
115
116 skel = test_skmsg_load_helpers__open_and_load();
117 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
118 return;
119
120 verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
121 map = bpf_map__fd(skel->maps.sock_map);
122
123 err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
124 if (!ASSERT_OK(err, "bpf_prog_attach"))
125 goto out;
126
127 err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT);
128 if (!ASSERT_OK(err, "bpf_prog_detach2"))
129 goto out;
130out:
131 test_skmsg_load_helpers__destroy(skel);
132}
133
134static void test_sockmap_update(enum bpf_map_type map_type)
135{
136 int err, prog, src;
137 struct test_sockmap_update *skel;
138 struct bpf_map *dst_map;
139 const __u32 zero = 0;
140 char dummy[14] = {0};
141 LIBBPF_OPTS(bpf_test_run_opts, topts,
142 .data_in = dummy,
143 .data_size_in = sizeof(dummy),
144 .repeat = 1,
145 );
146 __s64 sk;
147
148 sk = connected_socket_v4();
149 if (!ASSERT_NEQ(sk, -1, "connected_socket_v4"))
150 return;
151
152 skel = test_sockmap_update__open_and_load();
153 if (!ASSERT_OK_PTR(skel, "open_and_load"))
154 goto close_sk;
155
156 prog = bpf_program__fd(skel->progs.copy_sock_map);
157 src = bpf_map__fd(skel->maps.src);
158 if (map_type == BPF_MAP_TYPE_SOCKMAP)
159 dst_map = skel->maps.dst_sock_map;
160 else
161 dst_map = skel->maps.dst_sock_hash;
162
163 err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
164 if (!ASSERT_OK(err, "update_elem(src)"))
165 goto out;
166
167 err = bpf_prog_test_run_opts(prog, &topts);
168 if (!ASSERT_OK(err, "test_run"))
169 goto out;
170 if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
171 goto out;
172
173 compare_cookies(skel->maps.src, dst_map);
174
175out:
176 test_sockmap_update__destroy(skel);
177close_sk:
178 close(sk);
179}
180
181static void test_sockmap_invalid_update(void)
182{
183 struct test_sockmap_invalid_update *skel;
184
185 skel = test_sockmap_invalid_update__open_and_load();
186 if (!ASSERT_NULL(skel, "open_and_load"))
187 test_sockmap_invalid_update__destroy(skel);
188}
189
190static void test_sockmap_copy(enum bpf_map_type map_type)
191{
192 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
193 int err, len, src_fd, iter_fd;
194 union bpf_iter_link_info linfo = {};
195 __u32 i, num_sockets, num_elems;
196 struct bpf_iter_sockmap *skel;
197 __s64 *sock_fd = NULL;
198 struct bpf_link *link;
199 struct bpf_map *src;
200 char buf[64];
201
202 skel = bpf_iter_sockmap__open_and_load();
203 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
204 return;
205
206 if (map_type == BPF_MAP_TYPE_SOCKMAP) {
207 src = skel->maps.sockmap;
208 num_elems = bpf_map__max_entries(src);
209 num_sockets = num_elems - 1;
210 } else {
211 src = skel->maps.sockhash;
212 num_elems = bpf_map__max_entries(src) - 1;
213 num_sockets = num_elems;
214 }
215
216 sock_fd = calloc(num_sockets, sizeof(*sock_fd));
217 if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)"))
218 goto out;
219
220 for (i = 0; i < num_sockets; i++)
221 sock_fd[i] = -1;
222
223 src_fd = bpf_map__fd(src);
224
225 for (i = 0; i < num_sockets; i++) {
226 sock_fd[i] = connected_socket_v4();
227 if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4"))
228 goto out;
229
230 err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
231 if (!ASSERT_OK(err, "map_update"))
232 goto out;
233 }
234
235 linfo.map.map_fd = src_fd;
236 opts.link_info = &linfo;
237 opts.link_info_len = sizeof(linfo);
238 link = bpf_program__attach_iter(skel->progs.copy, &opts);
239 if (!ASSERT_OK_PTR(link, "attach_iter"))
240 goto out;
241
242 iter_fd = bpf_iter_create(bpf_link__fd(link));
243 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
244 goto free_link;
245
246 /* do some tests */
247 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
248 ;
249 if (!ASSERT_GE(len, 0, "read"))
250 goto close_iter;
251
252 /* test results */
253 if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems"))
254 goto close_iter;
255
256 if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks"))
257 goto close_iter;
258
259 compare_cookies(src, skel->maps.dst);
260
261close_iter:
262 close(iter_fd);
263free_link:
264 bpf_link__destroy(link);
265out:
266 for (i = 0; sock_fd && i < num_sockets; i++)
267 if (sock_fd[i] >= 0)
268 close(sock_fd[i]);
269 if (sock_fd)
270 free(sock_fd);
271 bpf_iter_sockmap__destroy(skel);
272}
273
274static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
275 enum bpf_attach_type second)
276{
277 struct test_sockmap_skb_verdict_attach *skel;
278 int err, map, verdict;
279
280 skel = test_sockmap_skb_verdict_attach__open_and_load();
281 if (!ASSERT_OK_PTR(skel, "open_and_load"))
282 return;
283
284 verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
285 map = bpf_map__fd(skel->maps.sock_map);
286
287 err = bpf_prog_attach(verdict, map, first, 0);
288 if (!ASSERT_OK(err, "bpf_prog_attach"))
289 goto out;
290
291 err = bpf_prog_attach(verdict, map, second, 0);
292 ASSERT_EQ(err, -EBUSY, "prog_attach_fail");
293
294 err = bpf_prog_detach2(verdict, map, first);
295 if (!ASSERT_OK(err, "bpf_prog_detach2"))
296 goto out;
297out:
298 test_sockmap_skb_verdict_attach__destroy(skel);
299}
300
301static __u32 query_prog_id(int prog_fd)
302{
303 struct bpf_prog_info info = {};
304 __u32 info_len = sizeof(info);
305 int err;
306
307 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
308 if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
309 !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd"))
310 return 0;
311
312 return info.id;
313}
314
315static void test_sockmap_progs_query(enum bpf_attach_type attach_type)
316{
317 struct test_sockmap_progs_query *skel;
318 int err, map_fd, verdict_fd;
319 __u32 attach_flags = 0;
320 __u32 prog_ids[3] = {};
321 __u32 prog_cnt = 3;
322
323 skel = test_sockmap_progs_query__open_and_load();
324 if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load"))
325 return;
326
327 map_fd = bpf_map__fd(skel->maps.sock_map);
328
329 if (attach_type == BPF_SK_MSG_VERDICT)
330 verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict);
331 else
332 verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict);
333
334 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
335 &attach_flags, prog_ids, &prog_cnt);
336 ASSERT_OK(err, "bpf_prog_query failed");
337 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
338 ASSERT_EQ(prog_cnt, 0, "wrong program count on query");
339
340 err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0);
341 if (!ASSERT_OK(err, "bpf_prog_attach failed"))
342 goto out;
343
344 prog_cnt = 1;
345 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
346 &attach_flags, prog_ids, &prog_cnt);
347 ASSERT_OK(err, "bpf_prog_query failed");
348 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
349 ASSERT_EQ(prog_cnt, 1, "wrong program count on query");
350 ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd),
351 "wrong prog_ids on query");
352
353 bpf_prog_detach2(verdict_fd, map_fd, attach_type);
354out:
355 test_sockmap_progs_query__destroy(skel);
356}
357
358#define MAX_EVENTS 10
359static void test_sockmap_skb_verdict_shutdown(void)
360{
361 struct epoll_event ev, events[MAX_EVENTS];
362 int n, err, map, verdict, s, c1 = -1, p1 = -1;
363 struct test_sockmap_pass_prog *skel;
364 int epollfd;
365 int zero = 0;
366 char b;
367
368 skel = test_sockmap_pass_prog__open_and_load();
369 if (!ASSERT_OK_PTR(skel, "open_and_load"))
370 return;
371
372 verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
373 map = bpf_map__fd(skel->maps.sock_map_rx);
374
375 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
376 if (!ASSERT_OK(err, "bpf_prog_attach"))
377 goto out;
378
379 s = socket_loopback(AF_INET, SOCK_STREAM);
380 if (s < 0)
381 goto out;
382 err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
383 if (err < 0)
384 goto out;
385
386 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
387 if (err < 0)
388 goto out_close;
389
390 shutdown(p1, SHUT_WR);
391
392 ev.events = EPOLLIN;
393 ev.data.fd = c1;
394
395 epollfd = epoll_create1(0);
396 if (!ASSERT_GT(epollfd, -1, "epoll_create(0)"))
397 goto out_close;
398 err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev);
399 if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)"))
400 goto out_close;
401 err = epoll_wait(epollfd, events, MAX_EVENTS, -1);
402 if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
403 goto out_close;
404
405 n = recv(c1, &b, 1, SOCK_NONBLOCK);
406 ASSERT_EQ(n, 0, "recv_timeout(fin)");
407out_close:
408 close(c1);
409 close(p1);
410out:
411 test_sockmap_pass_prog__destroy(skel);
412}
413
414static void test_sockmap_skb_verdict_fionread(bool pass_prog)
415{
416 int expected, zero = 0, sent, recvd, avail;
417 int err, map, verdict, s, c0 = -1, c1 = -1, p0 = -1, p1 = -1;
418 struct test_sockmap_pass_prog *pass = NULL;
419 struct test_sockmap_drop_prog *drop = NULL;
420 char buf[256] = "0123456789";
421
422 if (pass_prog) {
423 pass = test_sockmap_pass_prog__open_and_load();
424 if (!ASSERT_OK_PTR(pass, "open_and_load"))
425 return;
426 verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
427 map = bpf_map__fd(pass->maps.sock_map_rx);
428 expected = sizeof(buf);
429 } else {
430 drop = test_sockmap_drop_prog__open_and_load();
431 if (!ASSERT_OK_PTR(drop, "open_and_load"))
432 return;
433 verdict = bpf_program__fd(drop->progs.prog_skb_verdict);
434 map = bpf_map__fd(drop->maps.sock_map_rx);
435 /* On drop data is consumed immediately and copied_seq inc'd */
436 expected = 0;
437 }
438
439
440 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
441 if (!ASSERT_OK(err, "bpf_prog_attach"))
442 goto out;
443
444 s = socket_loopback(AF_INET, SOCK_STREAM);
445 if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
446 goto out;
447 err = create_socket_pairs(s, AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1);
448 if (!ASSERT_OK(err, "create_socket_pairs(s)"))
449 goto out;
450
451 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
452 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
453 goto out_close;
454
455 sent = xsend(p1, &buf, sizeof(buf), 0);
456 ASSERT_EQ(sent, sizeof(buf), "xsend(p0)");
457 err = ioctl(c1, FIONREAD, &avail);
458 ASSERT_OK(err, "ioctl(FIONREAD) error");
459 ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
460 /* On DROP test there will be no data to read */
461 if (pass_prog) {
462 recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC);
463 ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
464 }
465
466out_close:
467 close(c0);
468 close(p0);
469 close(c1);
470 close(p1);
471out:
472 if (pass_prog)
473 test_sockmap_pass_prog__destroy(pass);
474 else
475 test_sockmap_drop_prog__destroy(drop);
476}
477
478static void test_sockmap_skb_verdict_peek(void)
479{
480 int err, map, verdict, s, c1, p1, zero = 0, sent, recvd, avail;
481 struct test_sockmap_pass_prog *pass;
482 char snd[256] = "0123456789";
483 char rcv[256] = "0";
484
485 pass = test_sockmap_pass_prog__open_and_load();
486 if (!ASSERT_OK_PTR(pass, "open_and_load"))
487 return;
488 verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
489 map = bpf_map__fd(pass->maps.sock_map_rx);
490
491 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
492 if (!ASSERT_OK(err, "bpf_prog_attach"))
493 goto out;
494
495 s = socket_loopback(AF_INET, SOCK_STREAM);
496 if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
497 goto out;
498
499 err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
500 if (!ASSERT_OK(err, "create_pairs(s)"))
501 goto out;
502
503 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
504 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
505 goto out_close;
506
507 sent = xsend(p1, snd, sizeof(snd), 0);
508 ASSERT_EQ(sent, sizeof(snd), "xsend(p1)");
509 recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK);
510 ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)");
511 err = ioctl(c1, FIONREAD, &avail);
512 ASSERT_OK(err, "ioctl(FIONREAD) error");
513 ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)");
514 recvd = recv(c1, rcv, sizeof(rcv), 0);
515 ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)");
516 err = ioctl(c1, FIONREAD, &avail);
517 ASSERT_OK(err, "ioctl(FIONREAD) error");
518 ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)");
519
520out_close:
521 close(c1);
522 close(p1);
523out:
524 test_sockmap_pass_prog__destroy(pass);
525}
526
527static void test_sockmap_unconnected_unix(void)
528{
529 int err, map, stream = 0, dgram = 0, zero = 0;
530 struct test_sockmap_pass_prog *skel;
531
532 skel = test_sockmap_pass_prog__open_and_load();
533 if (!ASSERT_OK_PTR(skel, "open_and_load"))
534 return;
535
536 map = bpf_map__fd(skel->maps.sock_map_rx);
537
538 stream = xsocket(AF_UNIX, SOCK_STREAM, 0);
539 if (stream < 0)
540 return;
541
542 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
543 if (dgram < 0) {
544 close(stream);
545 return;
546 }
547
548 err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY);
549 ASSERT_ERR(err, "bpf_map_update_elem(stream)");
550
551 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
552 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
553
554 close(stream);
555 close(dgram);
556}
557
558static void test_sockmap_many_socket(void)
559{
560 struct test_sockmap_pass_prog *skel;
561 int stream[2], dgram, udp, tcp;
562 int i, err, map, entry = 0;
563
564 skel = test_sockmap_pass_prog__open_and_load();
565 if (!ASSERT_OK_PTR(skel, "open_and_load"))
566 return;
567
568 map = bpf_map__fd(skel->maps.sock_map_rx);
569
570 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
571 if (dgram < 0) {
572 test_sockmap_pass_prog__destroy(skel);
573 return;
574 }
575
576 tcp = connected_socket_v4();
577 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
578 close(dgram);
579 test_sockmap_pass_prog__destroy(skel);
580 return;
581 }
582
583 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
584 if (udp < 0) {
585 close(dgram);
586 close(tcp);
587 test_sockmap_pass_prog__destroy(skel);
588 return;
589 }
590
591 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
592 ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
593 if (err)
594 goto out;
595
596 for (i = 0; i < 2; i++, entry++) {
597 err = bpf_map_update_elem(map, &entry, &stream[0], BPF_ANY);
598 ASSERT_OK(err, "bpf_map_update_elem(stream)");
599 }
600 for (i = 0; i < 2; i++, entry++) {
601 err = bpf_map_update_elem(map, &entry, &dgram, BPF_ANY);
602 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
603 }
604 for (i = 0; i < 2; i++, entry++) {
605 err = bpf_map_update_elem(map, &entry, &udp, BPF_ANY);
606 ASSERT_OK(err, "bpf_map_update_elem(udp)");
607 }
608 for (i = 0; i < 2; i++, entry++) {
609 err = bpf_map_update_elem(map, &entry, &tcp, BPF_ANY);
610 ASSERT_OK(err, "bpf_map_update_elem(tcp)");
611 }
612 for (entry--; entry >= 0; entry--) {
613 err = bpf_map_delete_elem(map, &entry);
614 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
615 }
616
617 close(stream[0]);
618 close(stream[1]);
619out:
620 close(dgram);
621 close(tcp);
622 close(udp);
623 test_sockmap_pass_prog__destroy(skel);
624}
625
626static void test_sockmap_many_maps(void)
627{
628 struct test_sockmap_pass_prog *skel;
629 int stream[2], dgram, udp, tcp;
630 int i, err, map[2], entry = 0;
631
632 skel = test_sockmap_pass_prog__open_and_load();
633 if (!ASSERT_OK_PTR(skel, "open_and_load"))
634 return;
635
636 map[0] = bpf_map__fd(skel->maps.sock_map_rx);
637 map[1] = bpf_map__fd(skel->maps.sock_map_tx);
638
639 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
640 if (dgram < 0) {
641 test_sockmap_pass_prog__destroy(skel);
642 return;
643 }
644
645 tcp = connected_socket_v4();
646 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
647 close(dgram);
648 test_sockmap_pass_prog__destroy(skel);
649 return;
650 }
651
652 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
653 if (udp < 0) {
654 close(dgram);
655 close(tcp);
656 test_sockmap_pass_prog__destroy(skel);
657 return;
658 }
659
660 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
661 ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
662 if (err)
663 goto out;
664
665 for (i = 0; i < 2; i++, entry++) {
666 err = bpf_map_update_elem(map[i], &entry, &stream[0], BPF_ANY);
667 ASSERT_OK(err, "bpf_map_update_elem(stream)");
668 }
669 for (i = 0; i < 2; i++, entry++) {
670 err = bpf_map_update_elem(map[i], &entry, &dgram, BPF_ANY);
671 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
672 }
673 for (i = 0; i < 2; i++, entry++) {
674 err = bpf_map_update_elem(map[i], &entry, &udp, BPF_ANY);
675 ASSERT_OK(err, "bpf_map_update_elem(udp)");
676 }
677 for (i = 0; i < 2; i++, entry++) {
678 err = bpf_map_update_elem(map[i], &entry, &tcp, BPF_ANY);
679 ASSERT_OK(err, "bpf_map_update_elem(tcp)");
680 }
681 for (entry--; entry >= 0; entry--) {
682 err = bpf_map_delete_elem(map[1], &entry);
683 entry--;
684 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
685 err = bpf_map_delete_elem(map[0], &entry);
686 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
687 }
688
689 close(stream[0]);
690 close(stream[1]);
691out:
692 close(dgram);
693 close(tcp);
694 close(udp);
695 test_sockmap_pass_prog__destroy(skel);
696}
697
698static void test_sockmap_same_sock(void)
699{
700 struct test_sockmap_pass_prog *skel;
701 int stream[2], dgram, udp, tcp;
702 int i, err, map, zero = 0;
703
704 skel = test_sockmap_pass_prog__open_and_load();
705 if (!ASSERT_OK_PTR(skel, "open_and_load"))
706 return;
707
708 map = bpf_map__fd(skel->maps.sock_map_rx);
709
710 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
711 if (dgram < 0) {
712 test_sockmap_pass_prog__destroy(skel);
713 return;
714 }
715
716 tcp = connected_socket_v4();
717 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
718 close(dgram);
719 test_sockmap_pass_prog__destroy(skel);
720 return;
721 }
722
723 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
724 if (udp < 0) {
725 close(dgram);
726 close(tcp);
727 test_sockmap_pass_prog__destroy(skel);
728 return;
729 }
730
731 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
732 ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
733 if (err)
734 goto out;
735
736 for (i = 0; i < 2; i++) {
737 err = bpf_map_update_elem(map, &zero, &stream[0], BPF_ANY);
738 ASSERT_OK(err, "bpf_map_update_elem(stream)");
739 }
740 for (i = 0; i < 2; i++) {
741 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
742 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
743 }
744 for (i = 0; i < 2; i++) {
745 err = bpf_map_update_elem(map, &zero, &udp, BPF_ANY);
746 ASSERT_OK(err, "bpf_map_update_elem(udp)");
747 }
748 for (i = 0; i < 2; i++) {
749 err = bpf_map_update_elem(map, &zero, &tcp, BPF_ANY);
750 ASSERT_OK(err, "bpf_map_update_elem(tcp)");
751 }
752
753 err = bpf_map_delete_elem(map, &zero);
754 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
755
756 close(stream[0]);
757 close(stream[1]);
758out:
759 close(dgram);
760 close(tcp);
761 close(udp);
762 test_sockmap_pass_prog__destroy(skel);
763}
764
765void test_sockmap_basic(void)
766{
767 if (test__start_subtest("sockmap create_update_free"))
768 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
769 if (test__start_subtest("sockhash create_update_free"))
770 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
771 if (test__start_subtest("sockmap sk_msg load helpers"))
772 test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
773 if (test__start_subtest("sockhash sk_msg load helpers"))
774 test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
775 if (test__start_subtest("sockmap update"))
776 test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
777 if (test__start_subtest("sockhash update"))
778 test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
779 if (test__start_subtest("sockmap update in unsafe context"))
780 test_sockmap_invalid_update();
781 if (test__start_subtest("sockmap copy"))
782 test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
783 if (test__start_subtest("sockhash copy"))
784 test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
785 if (test__start_subtest("sockmap skb_verdict attach")) {
786 test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT,
787 BPF_SK_SKB_STREAM_VERDICT);
788 test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,
789 BPF_SK_SKB_VERDICT);
790 }
791 if (test__start_subtest("sockmap msg_verdict progs query"))
792 test_sockmap_progs_query(BPF_SK_MSG_VERDICT);
793 if (test__start_subtest("sockmap stream_parser progs query"))
794 test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER);
795 if (test__start_subtest("sockmap stream_verdict progs query"))
796 test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT);
797 if (test__start_subtest("sockmap skb_verdict progs query"))
798 test_sockmap_progs_query(BPF_SK_SKB_VERDICT);
799 if (test__start_subtest("sockmap skb_verdict shutdown"))
800 test_sockmap_skb_verdict_shutdown();
801 if (test__start_subtest("sockmap skb_verdict fionread"))
802 test_sockmap_skb_verdict_fionread(true);
803 if (test__start_subtest("sockmap skb_verdict fionread on drop"))
804 test_sockmap_skb_verdict_fionread(false);
805 if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
806 test_sockmap_skb_verdict_peek();
807 if (test__start_subtest("sockmap unconnected af_unix"))
808 test_sockmap_unconnected_unix();
809 if (test__start_subtest("sockmap one socket to many map entries"))
810 test_sockmap_many_socket();
811 if (test__start_subtest("sockmap one socket to many maps"))
812 test_sockmap_many_maps();
813 if (test__start_subtest("sockmap same socket replace"))
814 test_sockmap_same_sock();
815}