Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3
4#include <linux/err.h>
5#include <netinet/tcp.h>
6#include <test_progs.h>
7#include "network_helpers.h"
8#include "bpf_dctcp.skel.h"
9#include "bpf_cubic.skel.h"
10#include "bpf_tcp_nogpl.skel.h"
11#include "tcp_ca_update.skel.h"
12#include "bpf_dctcp_release.skel.h"
13#include "tcp_ca_write_sk_pacing.skel.h"
14#include "tcp_ca_incompl_cong_ops.skel.h"
15#include "tcp_ca_unsupp_cong_op.skel.h"
16#include "tcp_ca_kfunc.skel.h"
17#include "bpf_cc_cubic.skel.h"
18
19static const unsigned int total_bytes = 10 * 1024 * 1024;
20static int expected_stg = 0xeB9F;
21
22struct cb_opts {
23 const char *cc;
24 int map_fd;
25};
26
27static int settcpca(int fd, const char *tcp_ca)
28{
29 int err;
30
31 err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca));
32 if (!ASSERT_NEQ(err, -1, "setsockopt"))
33 return -1;
34
35 return 0;
36}
37
38static bool start_test(char *addr_str,
39 const struct network_helper_opts *srv_opts,
40 const struct network_helper_opts *cli_opts,
41 int *srv_fd, int *cli_fd)
42{
43 *srv_fd = start_server_str(AF_INET6, SOCK_STREAM, addr_str, 0, srv_opts);
44 if (!ASSERT_NEQ(*srv_fd, -1, "start_server_str"))
45 goto err;
46
47 /* connect to server */
48 *cli_fd = connect_to_fd_opts(*srv_fd, cli_opts);
49 if (!ASSERT_NEQ(*cli_fd, -1, "connect_to_fd_opts"))
50 goto err;
51
52 return true;
53
54err:
55 if (*srv_fd != -1) {
56 close(*srv_fd);
57 *srv_fd = -1;
58 }
59 if (*cli_fd != -1) {
60 close(*cli_fd);
61 *cli_fd = -1;
62 }
63 return false;
64}
65
66static void do_test(const struct network_helper_opts *opts)
67{
68 int lfd = -1, fd = -1;
69
70 if (!start_test(NULL, opts, opts, &lfd, &fd))
71 goto done;
72
73 ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data");
74
75done:
76 if (lfd != -1)
77 close(lfd);
78 if (fd != -1)
79 close(fd);
80}
81
82static int cc_cb(int fd, void *opts)
83{
84 struct cb_opts *cb_opts = (struct cb_opts *)opts;
85
86 return settcpca(fd, cb_opts->cc);
87}
88
89static void test_cubic(void)
90{
91 struct cb_opts cb_opts = {
92 .cc = "bpf_cubic",
93 };
94 struct network_helper_opts opts = {
95 .post_socket_cb = cc_cb,
96 .cb_opts = &cb_opts,
97 };
98 struct bpf_cubic *cubic_skel;
99 struct bpf_link *link;
100
101 cubic_skel = bpf_cubic__open_and_load();
102 if (!ASSERT_OK_PTR(cubic_skel, "bpf_cubic__open_and_load"))
103 return;
104
105 link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
106 if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
107 bpf_cubic__destroy(cubic_skel);
108 return;
109 }
110
111 do_test(&opts);
112
113 ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called");
114
115 bpf_link__destroy(link);
116 bpf_cubic__destroy(cubic_skel);
117}
118
119static int stg_post_socket_cb(int fd, void *opts)
120{
121 struct cb_opts *cb_opts = (struct cb_opts *)opts;
122 int err;
123
124 err = settcpca(fd, cb_opts->cc);
125 if (err)
126 return err;
127
128 err = bpf_map_update_elem(cb_opts->map_fd, &fd,
129 &expected_stg, BPF_NOEXIST);
130 if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)"))
131 return err;
132
133 return 0;
134}
135
136static void test_dctcp(void)
137{
138 struct cb_opts cb_opts = {
139 .cc = "bpf_dctcp",
140 };
141 struct network_helper_opts opts = {
142 .post_socket_cb = cc_cb,
143 .cb_opts = &cb_opts,
144 };
145 struct network_helper_opts cli_opts = {
146 .post_socket_cb = stg_post_socket_cb,
147 .cb_opts = &cb_opts,
148 };
149 int lfd = -1, fd = -1, tmp_stg, err;
150 struct bpf_dctcp *dctcp_skel;
151 struct bpf_link *link;
152
153 dctcp_skel = bpf_dctcp__open_and_load();
154 if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load"))
155 return;
156
157 link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
158 if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
159 bpf_dctcp__destroy(dctcp_skel);
160 return;
161 }
162
163 cb_opts.map_fd = bpf_map__fd(dctcp_skel->maps.sk_stg_map);
164 if (!start_test(NULL, &opts, &cli_opts, &lfd, &fd))
165 goto done;
166
167 err = bpf_map_lookup_elem(cb_opts.map_fd, &fd, &tmp_stg);
168 if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") ||
169 !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)"))
170 goto done;
171
172 ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data");
173 ASSERT_EQ(dctcp_skel->bss->stg_result, expected_stg, "stg_result");
174
175done:
176 bpf_link__destroy(link);
177 bpf_dctcp__destroy(dctcp_skel);
178 if (lfd != -1)
179 close(lfd);
180 if (fd != -1)
181 close(fd);
182}
183
184static void test_dctcp_autoattach_map(void)
185{
186 struct cb_opts cb_opts = {
187 .cc = "bpf_dctcp",
188 };
189 struct network_helper_opts opts = {
190 .post_socket_cb = cc_cb,
191 .cb_opts = &cb_opts,
192 };
193 struct bpf_dctcp *dctcp_skel;
194 struct bpf_link *link;
195
196 dctcp_skel = bpf_dctcp__open_and_load();
197 if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load"))
198 return;
199
200 bpf_map__set_autoattach(dctcp_skel->maps.dctcp, true);
201 bpf_map__set_autoattach(dctcp_skel->maps.dctcp_nouse, false);
202
203 if (!ASSERT_OK(bpf_dctcp__attach(dctcp_skel), "bpf_dctcp__attach"))
204 goto destroy;
205
206 /* struct_ops is auto-attached */
207 link = dctcp_skel->links.dctcp;
208 if (!ASSERT_OK_PTR(link, "link"))
209 goto destroy;
210
211 do_test(&opts);
212
213destroy:
214 bpf_dctcp__destroy(dctcp_skel);
215}
216
217static char *err_str;
218static bool found;
219
220static int libbpf_debug_print(enum libbpf_print_level level,
221 const char *format, va_list args)
222{
223 const char *prog_name, *log_buf;
224
225 if (level != LIBBPF_WARN ||
226 !strstr(format, "-- BEGIN PROG LOAD LOG --")) {
227 vprintf(format, args);
228 return 0;
229 }
230
231 prog_name = va_arg(args, char *);
232 log_buf = va_arg(args, char *);
233 if (!log_buf)
234 goto out;
235 if (err_str && strstr(log_buf, err_str) != NULL)
236 found = true;
237out:
238 printf(format, prog_name, log_buf);
239 return 0;
240}
241
242static void test_invalid_license(void)
243{
244 libbpf_print_fn_t old_print_fn;
245 struct bpf_tcp_nogpl *skel;
246
247 err_str = "struct ops programs must have a GPL compatible license";
248 found = false;
249 old_print_fn = libbpf_set_print(libbpf_debug_print);
250
251 skel = bpf_tcp_nogpl__open_and_load();
252 ASSERT_NULL(skel, "bpf_tcp_nogpl");
253 ASSERT_EQ(found, true, "expected_err_msg");
254
255 bpf_tcp_nogpl__destroy(skel);
256 libbpf_set_print(old_print_fn);
257}
258
259static void test_dctcp_fallback(void)
260{
261 int err, lfd = -1, cli_fd = -1, srv_fd = -1;
262 struct bpf_dctcp *dctcp_skel;
263 struct bpf_link *link = NULL;
264 struct cb_opts dctcp = {
265 .cc = "bpf_dctcp",
266 };
267 struct network_helper_opts srv_opts = {
268 .post_socket_cb = cc_cb,
269 .cb_opts = &dctcp,
270 };
271 struct cb_opts cubic = {
272 .cc = "cubic",
273 };
274 struct network_helper_opts cli_opts = {
275 .post_socket_cb = cc_cb,
276 .cb_opts = &cubic,
277 };
278 char srv_cc[16];
279 socklen_t cc_len = sizeof(srv_cc);
280
281 dctcp_skel = bpf_dctcp__open();
282 if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel"))
283 return;
284 strcpy(dctcp_skel->rodata->fallback_cc, "cubic");
285 if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load"))
286 goto done;
287
288 link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
289 if (!ASSERT_OK_PTR(link, "dctcp link"))
290 goto done;
291
292 if (!start_test("::1", &srv_opts, &cli_opts, &lfd, &cli_fd))
293 goto done;
294
295 srv_fd = accept(lfd, NULL, 0);
296 if (!ASSERT_GE(srv_fd, 0, "srv_fd"))
297 goto done;
298 ASSERT_STREQ(dctcp_skel->bss->cc_res, "cubic", "cc_res");
299 ASSERT_EQ(dctcp_skel->bss->tcp_cdg_res, -ENOTSUPP, "tcp_cdg_res");
300 /* All setsockopt(TCP_CONGESTION) in the recurred
301 * bpf_dctcp->init() should fail with -EBUSY.
302 */
303 ASSERT_EQ(dctcp_skel->bss->ebusy_cnt, 3, "ebusy_cnt");
304
305 err = getsockopt(srv_fd, SOL_TCP, TCP_CONGESTION, srv_cc, &cc_len);
306 if (!ASSERT_OK(err, "getsockopt(srv_fd, TCP_CONGESTION)"))
307 goto done;
308 ASSERT_STREQ(srv_cc, "cubic", "srv_fd cc");
309
310done:
311 bpf_link__destroy(link);
312 bpf_dctcp__destroy(dctcp_skel);
313 if (lfd != -1)
314 close(lfd);
315 if (srv_fd != -1)
316 close(srv_fd);
317 if (cli_fd != -1)
318 close(cli_fd);
319}
320
321static void test_rel_setsockopt(void)
322{
323 struct bpf_dctcp_release *rel_skel;
324 libbpf_print_fn_t old_print_fn;
325
326 err_str = "program of this type cannot use helper bpf_setsockopt";
327 found = false;
328
329 old_print_fn = libbpf_set_print(libbpf_debug_print);
330 rel_skel = bpf_dctcp_release__open_and_load();
331 libbpf_set_print(old_print_fn);
332
333 ASSERT_ERR_PTR(rel_skel, "rel_skel");
334 ASSERT_TRUE(found, "expected_err_msg");
335
336 bpf_dctcp_release__destroy(rel_skel);
337}
338
339static void test_write_sk_pacing(void)
340{
341 struct tcp_ca_write_sk_pacing *skel;
342 struct bpf_link *link;
343
344 skel = tcp_ca_write_sk_pacing__open_and_load();
345 if (!ASSERT_OK_PTR(skel, "open_and_load"))
346 return;
347
348 link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing);
349 ASSERT_OK_PTR(link, "attach_struct_ops");
350
351 bpf_link__destroy(link);
352 tcp_ca_write_sk_pacing__destroy(skel);
353}
354
355static void test_incompl_cong_ops(void)
356{
357 struct tcp_ca_incompl_cong_ops *skel;
358 struct bpf_link *link;
359
360 skel = tcp_ca_incompl_cong_ops__open_and_load();
361 if (!ASSERT_OK_PTR(skel, "open_and_load"))
362 return;
363
364 /* That cong_avoid() and cong_control() are missing is only reported at
365 * this point:
366 */
367 link = bpf_map__attach_struct_ops(skel->maps.incompl_cong_ops);
368 ASSERT_ERR_PTR(link, "attach_struct_ops");
369
370 bpf_link__destroy(link);
371 tcp_ca_incompl_cong_ops__destroy(skel);
372}
373
374static void test_unsupp_cong_op(void)
375{
376 libbpf_print_fn_t old_print_fn;
377 struct tcp_ca_unsupp_cong_op *skel;
378
379 err_str = "attach to unsupported member get_info";
380 found = false;
381 old_print_fn = libbpf_set_print(libbpf_debug_print);
382
383 skel = tcp_ca_unsupp_cong_op__open_and_load();
384 ASSERT_NULL(skel, "open_and_load");
385 ASSERT_EQ(found, true, "expected_err_msg");
386
387 tcp_ca_unsupp_cong_op__destroy(skel);
388 libbpf_set_print(old_print_fn);
389}
390
391static void test_update_ca(void)
392{
393 struct cb_opts cb_opts = {
394 .cc = "tcp_ca_update",
395 };
396 struct network_helper_opts opts = {
397 .post_socket_cb = cc_cb,
398 .cb_opts = &cb_opts,
399 };
400 struct tcp_ca_update *skel;
401 struct bpf_link *link;
402 int saved_ca1_cnt;
403 int err;
404
405 skel = tcp_ca_update__open_and_load();
406 if (!ASSERT_OK_PTR(skel, "open"))
407 return;
408
409 link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
410 if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
411 goto out;
412
413 do_test(&opts);
414 saved_ca1_cnt = skel->bss->ca1_cnt;
415 ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
416
417 err = bpf_link__update_map(link, skel->maps.ca_update_2);
418 ASSERT_OK(err, "update_map");
419
420 do_test(&opts);
421 ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
422 ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt");
423
424 bpf_link__destroy(link);
425out:
426 tcp_ca_update__destroy(skel);
427}
428
429static void test_update_wrong(void)
430{
431 struct cb_opts cb_opts = {
432 .cc = "tcp_ca_update",
433 };
434 struct network_helper_opts opts = {
435 .post_socket_cb = cc_cb,
436 .cb_opts = &cb_opts,
437 };
438 struct tcp_ca_update *skel;
439 struct bpf_link *link;
440 int saved_ca1_cnt;
441 int err;
442
443 skel = tcp_ca_update__open_and_load();
444 if (!ASSERT_OK_PTR(skel, "open"))
445 return;
446
447 link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
448 if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
449 goto out;
450
451 do_test(&opts);
452 saved_ca1_cnt = skel->bss->ca1_cnt;
453 ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
454
455 err = bpf_link__update_map(link, skel->maps.ca_wrong);
456 ASSERT_ERR(err, "update_map");
457
458 do_test(&opts);
459 ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
460
461 bpf_link__destroy(link);
462out:
463 tcp_ca_update__destroy(skel);
464}
465
466static void test_mixed_links(void)
467{
468 struct cb_opts cb_opts = {
469 .cc = "tcp_ca_update",
470 };
471 struct network_helper_opts opts = {
472 .post_socket_cb = cc_cb,
473 .cb_opts = &cb_opts,
474 };
475 struct tcp_ca_update *skel;
476 struct bpf_link *link, *link_nl;
477 int err;
478
479 skel = tcp_ca_update__open_and_load();
480 if (!ASSERT_OK_PTR(skel, "open"))
481 return;
482
483 link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link);
484 if (!ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl"))
485 goto out;
486
487 link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
488 ASSERT_OK_PTR(link, "attach_struct_ops");
489
490 do_test(&opts);
491 ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt");
492
493 err = bpf_link__update_map(link, skel->maps.ca_no_link);
494 ASSERT_ERR(err, "update_map");
495
496 bpf_link__destroy(link);
497 bpf_link__destroy(link_nl);
498out:
499 tcp_ca_update__destroy(skel);
500}
501
502static void test_multi_links(void)
503{
504 struct tcp_ca_update *skel;
505 struct bpf_link *link;
506
507 skel = tcp_ca_update__open_and_load();
508 if (!ASSERT_OK_PTR(skel, "open"))
509 return;
510
511 link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
512 ASSERT_OK_PTR(link, "attach_struct_ops_1st");
513 bpf_link__destroy(link);
514
515 /* A map should be able to be used to create links multiple
516 * times.
517 */
518 link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
519 ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
520 bpf_link__destroy(link);
521
522 tcp_ca_update__destroy(skel);
523}
524
525static void test_link_replace(void)
526{
527 DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts);
528 struct tcp_ca_update *skel;
529 struct bpf_link *link;
530 int err;
531
532 skel = tcp_ca_update__open_and_load();
533 if (!ASSERT_OK_PTR(skel, "open"))
534 return;
535
536 link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
537 ASSERT_OK_PTR(link, "attach_struct_ops_1st");
538 bpf_link__destroy(link);
539
540 link = bpf_map__attach_struct_ops(skel->maps.ca_update_2);
541 if (!ASSERT_OK_PTR(link, "attach_struct_ops_2nd"))
542 goto out;
543
544 /* BPF_F_REPLACE with a wrong old map Fd. It should fail!
545 *
546 * With BPF_F_REPLACE, the link should be updated only if the
547 * old map fd given here matches the map backing the link.
548 */
549 opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_1);
550 opts.flags = BPF_F_REPLACE;
551 err = bpf_link_update(bpf_link__fd(link),
552 bpf_map__fd(skel->maps.ca_update_1),
553 &opts);
554 ASSERT_ERR(err, "bpf_link_update_fail");
555
556 /* BPF_F_REPLACE with a correct old map Fd. It should success! */
557 opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_2);
558 err = bpf_link_update(bpf_link__fd(link),
559 bpf_map__fd(skel->maps.ca_update_1),
560 &opts);
561 ASSERT_OK(err, "bpf_link_update_success");
562
563 bpf_link__destroy(link);
564
565out:
566 tcp_ca_update__destroy(skel);
567}
568
569static void test_tcp_ca_kfunc(void)
570{
571 struct tcp_ca_kfunc *skel;
572
573 skel = tcp_ca_kfunc__open_and_load();
574 ASSERT_OK_PTR(skel, "tcp_ca_kfunc__open_and_load");
575 tcp_ca_kfunc__destroy(skel);
576}
577
578static void test_cc_cubic(void)
579{
580 struct cb_opts cb_opts = {
581 .cc = "bpf_cc_cubic",
582 };
583 struct network_helper_opts opts = {
584 .post_socket_cb = cc_cb,
585 .cb_opts = &cb_opts,
586 };
587 struct bpf_cc_cubic *cc_cubic_skel;
588 struct bpf_link *link;
589
590 cc_cubic_skel = bpf_cc_cubic__open_and_load();
591 if (!ASSERT_OK_PTR(cc_cubic_skel, "bpf_cc_cubic__open_and_load"))
592 return;
593
594 link = bpf_map__attach_struct_ops(cc_cubic_skel->maps.cc_cubic);
595 if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
596 bpf_cc_cubic__destroy(cc_cubic_skel);
597 return;
598 }
599
600 do_test(&opts);
601
602 bpf_link__destroy(link);
603 bpf_cc_cubic__destroy(cc_cubic_skel);
604}
605
606void test_bpf_tcp_ca(void)
607{
608 if (test__start_subtest("dctcp"))
609 test_dctcp();
610 if (test__start_subtest("cubic"))
611 test_cubic();
612 if (test__start_subtest("invalid_license"))
613 test_invalid_license();
614 if (test__start_subtest("dctcp_fallback"))
615 test_dctcp_fallback();
616 if (test__start_subtest("rel_setsockopt"))
617 test_rel_setsockopt();
618 if (test__start_subtest("write_sk_pacing"))
619 test_write_sk_pacing();
620 if (test__start_subtest("incompl_cong_ops"))
621 test_incompl_cong_ops();
622 if (test__start_subtest("unsupp_cong_op"))
623 test_unsupp_cong_op();
624 if (test__start_subtest("update_ca"))
625 test_update_ca();
626 if (test__start_subtest("update_wrong"))
627 test_update_wrong();
628 if (test__start_subtest("mixed_links"))
629 test_mixed_links();
630 if (test__start_subtest("multi_links"))
631 test_multi_links();
632 if (test__start_subtest("link_replace"))
633 test_link_replace();
634 if (test__start_subtest("tcp_ca_kfunc"))
635 test_tcp_ca_kfunc();
636 if (test__start_subtest("cc_cubic"))
637 test_cc_cubic();
638 if (test__start_subtest("dctcp_autoattach_map"))
639 test_dctcp_autoattach_map();
640}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3
4#include <linux/err.h>
5#include <netinet/tcp.h>
6#include <test_progs.h>
7#include "network_helpers.h"
8#include "bpf_dctcp.skel.h"
9#include "bpf_cubic.skel.h"
10#include "bpf_tcp_nogpl.skel.h"
11#include "bpf_dctcp_release.skel.h"
12#include "tcp_ca_write_sk_pacing.skel.h"
13#include "tcp_ca_incompl_cong_ops.skel.h"
14#include "tcp_ca_unsupp_cong_op.skel.h"
15
16#ifndef ENOTSUPP
17#define ENOTSUPP 524
18#endif
19
20static const unsigned int total_bytes = 10 * 1024 * 1024;
21static int expected_stg = 0xeB9F;
22static int stop, duration;
23
24static int settcpca(int fd, const char *tcp_ca)
25{
26 int err;
27
28 err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca));
29 if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n",
30 errno))
31 return -1;
32
33 return 0;
34}
35
36static void *server(void *arg)
37{
38 int lfd = (int)(long)arg, err = 0, fd;
39 ssize_t nr_sent = 0, bytes = 0;
40 char batch[1500];
41
42 fd = accept(lfd, NULL, NULL);
43 while (fd == -1) {
44 if (errno == EINTR)
45 continue;
46 err = -errno;
47 goto done;
48 }
49
50 if (settimeo(fd, 0)) {
51 err = -errno;
52 goto done;
53 }
54
55 while (bytes < total_bytes && !READ_ONCE(stop)) {
56 nr_sent = send(fd, &batch,
57 MIN(total_bytes - bytes, sizeof(batch)), 0);
58 if (nr_sent == -1 && errno == EINTR)
59 continue;
60 if (nr_sent == -1) {
61 err = -errno;
62 break;
63 }
64 bytes += nr_sent;
65 }
66
67 CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n",
68 bytes, total_bytes, nr_sent, errno);
69
70done:
71 if (fd >= 0)
72 close(fd);
73 if (err) {
74 WRITE_ONCE(stop, 1);
75 return ERR_PTR(err);
76 }
77 return NULL;
78}
79
80static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
81{
82 struct sockaddr_in6 sa6 = {};
83 ssize_t nr_recv = 0, bytes = 0;
84 int lfd = -1, fd = -1;
85 pthread_t srv_thread;
86 socklen_t addrlen = sizeof(sa6);
87 void *thread_ret;
88 char batch[1500];
89 int err;
90
91 WRITE_ONCE(stop, 0);
92
93 lfd = socket(AF_INET6, SOCK_STREAM, 0);
94 if (CHECK(lfd == -1, "socket", "errno:%d\n", errno))
95 return;
96 fd = socket(AF_INET6, SOCK_STREAM, 0);
97 if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) {
98 close(lfd);
99 return;
100 }
101
102 if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca) ||
103 settimeo(lfd, 0) || settimeo(fd, 0))
104 goto done;
105
106 /* bind, listen and start server thread to accept */
107 sa6.sin6_family = AF_INET6;
108 sa6.sin6_addr = in6addr_loopback;
109 err = bind(lfd, (struct sockaddr *)&sa6, addrlen);
110 if (CHECK(err == -1, "bind", "errno:%d\n", errno))
111 goto done;
112 err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen);
113 if (CHECK(err == -1, "getsockname", "errno:%d\n", errno))
114 goto done;
115 err = listen(lfd, 1);
116 if (CHECK(err == -1, "listen", "errno:%d\n", errno))
117 goto done;
118
119 if (sk_stg_map) {
120 err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
121 &expected_stg, BPF_NOEXIST);
122 if (CHECK(err, "bpf_map_update_elem(sk_stg_map)",
123 "err:%d errno:%d\n", err, errno))
124 goto done;
125 }
126
127 /* connect to server */
128 err = connect(fd, (struct sockaddr *)&sa6, addrlen);
129 if (CHECK(err == -1, "connect", "errno:%d\n", errno))
130 goto done;
131
132 if (sk_stg_map) {
133 int tmp_stg;
134
135 err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
136 &tmp_stg);
137 if (CHECK(!err || errno != ENOENT,
138 "bpf_map_lookup_elem(sk_stg_map)",
139 "err:%d errno:%d\n", err, errno))
140 goto done;
141 }
142
143 err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
144 if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno))
145 goto done;
146
147 /* recv total_bytes */
148 while (bytes < total_bytes && !READ_ONCE(stop)) {
149 nr_recv = recv(fd, &batch,
150 MIN(total_bytes - bytes, sizeof(batch)), 0);
151 if (nr_recv == -1 && errno == EINTR)
152 continue;
153 if (nr_recv == -1)
154 break;
155 bytes += nr_recv;
156 }
157
158 CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n",
159 bytes, total_bytes, nr_recv, errno);
160
161 WRITE_ONCE(stop, 1);
162 pthread_join(srv_thread, &thread_ret);
163 CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld",
164 PTR_ERR(thread_ret));
165done:
166 close(lfd);
167 close(fd);
168}
169
170static void test_cubic(void)
171{
172 struct bpf_cubic *cubic_skel;
173 struct bpf_link *link;
174
175 cubic_skel = bpf_cubic__open_and_load();
176 if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n"))
177 return;
178
179 link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
180 if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
181 bpf_cubic__destroy(cubic_skel);
182 return;
183 }
184
185 do_test("bpf_cubic", NULL);
186
187 bpf_link__destroy(link);
188 bpf_cubic__destroy(cubic_skel);
189}
190
191static void test_dctcp(void)
192{
193 struct bpf_dctcp *dctcp_skel;
194 struct bpf_link *link;
195
196 dctcp_skel = bpf_dctcp__open_and_load();
197 if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n"))
198 return;
199
200 link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
201 if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
202 bpf_dctcp__destroy(dctcp_skel);
203 return;
204 }
205
206 do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
207 CHECK(dctcp_skel->bss->stg_result != expected_stg,
208 "Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n",
209 dctcp_skel->bss->stg_result, expected_stg);
210
211 bpf_link__destroy(link);
212 bpf_dctcp__destroy(dctcp_skel);
213}
214
215static char *err_str;
216static bool found;
217
218static int libbpf_debug_print(enum libbpf_print_level level,
219 const char *format, va_list args)
220{
221 const char *prog_name, *log_buf;
222
223 if (level != LIBBPF_WARN ||
224 !strstr(format, "-- BEGIN PROG LOAD LOG --")) {
225 vprintf(format, args);
226 return 0;
227 }
228
229 prog_name = va_arg(args, char *);
230 log_buf = va_arg(args, char *);
231 if (!log_buf)
232 goto out;
233 if (err_str && strstr(log_buf, err_str) != NULL)
234 found = true;
235out:
236 printf(format, prog_name, log_buf);
237 return 0;
238}
239
240static void test_invalid_license(void)
241{
242 libbpf_print_fn_t old_print_fn;
243 struct bpf_tcp_nogpl *skel;
244
245 err_str = "struct ops programs must have a GPL compatible license";
246 found = false;
247 old_print_fn = libbpf_set_print(libbpf_debug_print);
248
249 skel = bpf_tcp_nogpl__open_and_load();
250 ASSERT_NULL(skel, "bpf_tcp_nogpl");
251 ASSERT_EQ(found, true, "expected_err_msg");
252
253 bpf_tcp_nogpl__destroy(skel);
254 libbpf_set_print(old_print_fn);
255}
256
257static void test_dctcp_fallback(void)
258{
259 int err, lfd = -1, cli_fd = -1, srv_fd = -1;
260 struct network_helper_opts opts = {
261 .cc = "cubic",
262 };
263 struct bpf_dctcp *dctcp_skel;
264 struct bpf_link *link = NULL;
265 char srv_cc[16];
266 socklen_t cc_len = sizeof(srv_cc);
267
268 dctcp_skel = bpf_dctcp__open();
269 if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel"))
270 return;
271 strcpy(dctcp_skel->rodata->fallback, "cubic");
272 if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load"))
273 goto done;
274
275 link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
276 if (!ASSERT_OK_PTR(link, "dctcp link"))
277 goto done;
278
279 lfd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
280 if (!ASSERT_GE(lfd, 0, "lfd") ||
281 !ASSERT_OK(settcpca(lfd, "bpf_dctcp"), "lfd=>bpf_dctcp"))
282 goto done;
283
284 cli_fd = connect_to_fd_opts(lfd, &opts);
285 if (!ASSERT_GE(cli_fd, 0, "cli_fd"))
286 goto done;
287
288 srv_fd = accept(lfd, NULL, 0);
289 if (!ASSERT_GE(srv_fd, 0, "srv_fd"))
290 goto done;
291 ASSERT_STREQ(dctcp_skel->bss->cc_res, "cubic", "cc_res");
292 ASSERT_EQ(dctcp_skel->bss->tcp_cdg_res, -ENOTSUPP, "tcp_cdg_res");
293 /* All setsockopt(TCP_CONGESTION) in the recurred
294 * bpf_dctcp->init() should fail with -EBUSY.
295 */
296 ASSERT_EQ(dctcp_skel->bss->ebusy_cnt, 3, "ebusy_cnt");
297
298 err = getsockopt(srv_fd, SOL_TCP, TCP_CONGESTION, srv_cc, &cc_len);
299 if (!ASSERT_OK(err, "getsockopt(srv_fd, TCP_CONGESTION)"))
300 goto done;
301 ASSERT_STREQ(srv_cc, "cubic", "srv_fd cc");
302
303done:
304 bpf_link__destroy(link);
305 bpf_dctcp__destroy(dctcp_skel);
306 if (lfd != -1)
307 close(lfd);
308 if (srv_fd != -1)
309 close(srv_fd);
310 if (cli_fd != -1)
311 close(cli_fd);
312}
313
314static void test_rel_setsockopt(void)
315{
316 struct bpf_dctcp_release *rel_skel;
317 libbpf_print_fn_t old_print_fn;
318
319 err_str = "unknown func bpf_setsockopt";
320 found = false;
321
322 old_print_fn = libbpf_set_print(libbpf_debug_print);
323 rel_skel = bpf_dctcp_release__open_and_load();
324 libbpf_set_print(old_print_fn);
325
326 ASSERT_ERR_PTR(rel_skel, "rel_skel");
327 ASSERT_TRUE(found, "expected_err_msg");
328
329 bpf_dctcp_release__destroy(rel_skel);
330}
331
332static void test_write_sk_pacing(void)
333{
334 struct tcp_ca_write_sk_pacing *skel;
335 struct bpf_link *link;
336
337 skel = tcp_ca_write_sk_pacing__open_and_load();
338 if (!ASSERT_OK_PTR(skel, "open_and_load"))
339 return;
340
341 link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing);
342 ASSERT_OK_PTR(link, "attach_struct_ops");
343
344 bpf_link__destroy(link);
345 tcp_ca_write_sk_pacing__destroy(skel);
346}
347
348static void test_incompl_cong_ops(void)
349{
350 struct tcp_ca_incompl_cong_ops *skel;
351 struct bpf_link *link;
352
353 skel = tcp_ca_incompl_cong_ops__open_and_load();
354 if (!ASSERT_OK_PTR(skel, "open_and_load"))
355 return;
356
357 /* That cong_avoid() and cong_control() are missing is only reported at
358 * this point:
359 */
360 link = bpf_map__attach_struct_ops(skel->maps.incompl_cong_ops);
361 ASSERT_ERR_PTR(link, "attach_struct_ops");
362
363 bpf_link__destroy(link);
364 tcp_ca_incompl_cong_ops__destroy(skel);
365}
366
367static void test_unsupp_cong_op(void)
368{
369 libbpf_print_fn_t old_print_fn;
370 struct tcp_ca_unsupp_cong_op *skel;
371
372 err_str = "attach to unsupported member get_info";
373 found = false;
374 old_print_fn = libbpf_set_print(libbpf_debug_print);
375
376 skel = tcp_ca_unsupp_cong_op__open_and_load();
377 ASSERT_NULL(skel, "open_and_load");
378 ASSERT_EQ(found, true, "expected_err_msg");
379
380 tcp_ca_unsupp_cong_op__destroy(skel);
381 libbpf_set_print(old_print_fn);
382}
383
384void test_bpf_tcp_ca(void)
385{
386 if (test__start_subtest("dctcp"))
387 test_dctcp();
388 if (test__start_subtest("cubic"))
389 test_cubic();
390 if (test__start_subtest("invalid_license"))
391 test_invalid_license();
392 if (test__start_subtest("dctcp_fallback"))
393 test_dctcp_fallback();
394 if (test__start_subtest("rel_setsockopt"))
395 test_rel_setsockopt();
396 if (test__start_subtest("write_sk_pacing"))
397 test_write_sk_pacing();
398 if (test__start_subtest("incompl_cong_ops"))
399 test_incompl_cong_ops();
400 if (test__start_subtest("unsupp_cong_op"))
401 test_unsupp_cong_op();
402}