Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2#include <unistd.h>
   3#include <test_progs.h>
   4#include <network_helpers.h>
   5#include "tailcall_poke.skel.h"
   6
   7
   8/* test_tailcall_1 checks basic functionality by patching multiple locations
   9 * in a single program for a single tail call slot with nop->jmp, jmp->nop
  10 * and jmp->jmp rewrites. Also checks for nop->nop.
  11 */
  12static void test_tailcall_1(void)
  13{
  14	int err, map_fd, prog_fd, main_fd, i, j;
  15	struct bpf_map *prog_array;
  16	struct bpf_program *prog;
  17	struct bpf_object *obj;
  18	char prog_name[32];
  19	char buff[128] = {};
  20	LIBBPF_OPTS(bpf_test_run_opts, topts,
  21		.data_in = buff,
  22		.data_size_in = sizeof(buff),
  23		.repeat = 1,
  24	);
  25
  26	err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
  27				 &prog_fd);
  28	if (CHECK_FAIL(err))
  29		return;
  30
  31	prog = bpf_object__find_program_by_name(obj, "entry");
  32	if (CHECK_FAIL(!prog))
  33		goto out;
  34
  35	main_fd = bpf_program__fd(prog);
  36	if (CHECK_FAIL(main_fd < 0))
  37		goto out;
  38
  39	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
  40	if (CHECK_FAIL(!prog_array))
  41		goto out;
  42
  43	map_fd = bpf_map__fd(prog_array);
  44	if (CHECK_FAIL(map_fd < 0))
  45		goto out;
  46
  47	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
  48		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
  49
  50		prog = bpf_object__find_program_by_name(obj, prog_name);
  51		if (CHECK_FAIL(!prog))
  52			goto out;
  53
  54		prog_fd = bpf_program__fd(prog);
  55		if (CHECK_FAIL(prog_fd < 0))
  56			goto out;
  57
  58		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
  59		if (CHECK_FAIL(err))
  60			goto out;
  61	}
  62
  63	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
  64		err = bpf_prog_test_run_opts(main_fd, &topts);
  65		ASSERT_OK(err, "tailcall");
  66		ASSERT_EQ(topts.retval, i, "tailcall retval");
  67
  68		err = bpf_map_delete_elem(map_fd, &i);
  69		if (CHECK_FAIL(err))
  70			goto out;
  71	}
  72
  73	err = bpf_prog_test_run_opts(main_fd, &topts);
  74	ASSERT_OK(err, "tailcall");
  75	ASSERT_EQ(topts.retval, 3, "tailcall retval");
  76
  77	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
  78		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
  79
  80		prog = bpf_object__find_program_by_name(obj, prog_name);
  81		if (CHECK_FAIL(!prog))
  82			goto out;
  83
  84		prog_fd = bpf_program__fd(prog);
  85		if (CHECK_FAIL(prog_fd < 0))
  86			goto out;
  87
  88		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
  89		if (CHECK_FAIL(err))
  90			goto out;
  91	}
  92
  93	err = bpf_prog_test_run_opts(main_fd, &topts);
  94	ASSERT_OK(err, "tailcall");
  95	ASSERT_OK(topts.retval, "tailcall retval");
  96
  97	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
  98		j = bpf_map__max_entries(prog_array) - 1 - i;
  99		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
 100
 101		prog = bpf_object__find_program_by_name(obj, prog_name);
 102		if (CHECK_FAIL(!prog))
 103			goto out;
 104
 105		prog_fd = bpf_program__fd(prog);
 106		if (CHECK_FAIL(prog_fd < 0))
 107			goto out;
 108
 109		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 110		if (CHECK_FAIL(err))
 111			goto out;
 112	}
 113
 114	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 115		j = bpf_map__max_entries(prog_array) - 1 - i;
 116
 117		err = bpf_prog_test_run_opts(main_fd, &topts);
 118		ASSERT_OK(err, "tailcall");
 119		ASSERT_EQ(topts.retval, j, "tailcall retval");
 120
 121		err = bpf_map_delete_elem(map_fd, &i);
 122		if (CHECK_FAIL(err))
 123			goto out;
 124	}
 125
 126	err = bpf_prog_test_run_opts(main_fd, &topts);
 127	ASSERT_OK(err, "tailcall");
 128	ASSERT_EQ(topts.retval, 3, "tailcall retval");
 129
 130	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 131		err = bpf_map_delete_elem(map_fd, &i);
 132		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
 133			goto out;
 134
 135		err = bpf_prog_test_run_opts(main_fd, &topts);
 136		ASSERT_OK(err, "tailcall");
 137		ASSERT_EQ(topts.retval, 3, "tailcall retval");
 138	}
 139
 140out:
 141	bpf_object__close(obj);
 142}
 143
 144/* test_tailcall_2 checks that patching multiple programs for a single
 145 * tail call slot works. It also jumps through several programs and tests
 146 * the tail call limit counter.
 147 */
 148static void test_tailcall_2(void)
 149{
 150	int err, map_fd, prog_fd, main_fd, i;
 151	struct bpf_map *prog_array;
 152	struct bpf_program *prog;
 153	struct bpf_object *obj;
 154	char prog_name[32];
 155	char buff[128] = {};
 156	LIBBPF_OPTS(bpf_test_run_opts, topts,
 157		.data_in = buff,
 158		.data_size_in = sizeof(buff),
 159		.repeat = 1,
 160	);
 161
 162	err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
 163				 &prog_fd);
 164	if (CHECK_FAIL(err))
 165		return;
 166
 167	prog = bpf_object__find_program_by_name(obj, "entry");
 168	if (CHECK_FAIL(!prog))
 169		goto out;
 170
 171	main_fd = bpf_program__fd(prog);
 172	if (CHECK_FAIL(main_fd < 0))
 173		goto out;
 174
 175	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
 176	if (CHECK_FAIL(!prog_array))
 177		goto out;
 178
 179	map_fd = bpf_map__fd(prog_array);
 180	if (CHECK_FAIL(map_fd < 0))
 181		goto out;
 182
 183	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 184		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 185
 186		prog = bpf_object__find_program_by_name(obj, prog_name);
 187		if (CHECK_FAIL(!prog))
 188			goto out;
 189
 190		prog_fd = bpf_program__fd(prog);
 191		if (CHECK_FAIL(prog_fd < 0))
 192			goto out;
 193
 194		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 195		if (CHECK_FAIL(err))
 196			goto out;
 197	}
 198
 199	err = bpf_prog_test_run_opts(main_fd, &topts);
 200	ASSERT_OK(err, "tailcall");
 201	ASSERT_EQ(topts.retval, 2, "tailcall retval");
 202
 203	i = 2;
 204	err = bpf_map_delete_elem(map_fd, &i);
 205	if (CHECK_FAIL(err))
 206		goto out;
 207
 208	err = bpf_prog_test_run_opts(main_fd, &topts);
 209	ASSERT_OK(err, "tailcall");
 210	ASSERT_EQ(topts.retval, 1, "tailcall retval");
 211
 212	i = 0;
 213	err = bpf_map_delete_elem(map_fd, &i);
 214	if (CHECK_FAIL(err))
 215		goto out;
 216
 217	err = bpf_prog_test_run_opts(main_fd, &topts);
 218	ASSERT_OK(err, "tailcall");
 219	ASSERT_EQ(topts.retval, 3, "tailcall retval");
 220out:
 221	bpf_object__close(obj);
 222}
 223
 224static void test_tailcall_count(const char *which, bool test_fentry,
 225				bool test_fexit)
 226{
 227	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
 228	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
 229	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
 230	struct bpf_map *prog_array, *data_map;
 231	struct bpf_program *prog;
 
 232	char buff[128] = {};
 233	LIBBPF_OPTS(bpf_test_run_opts, topts,
 234		.data_in = buff,
 235		.data_size_in = sizeof(buff),
 236		.repeat = 1,
 237	);
 238
 239	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
 240			    &prog_fd);
 241	if (CHECK_FAIL(err))
 242		return;
 243
 244	prog = bpf_object__find_program_by_name(obj, "entry");
 245	if (CHECK_FAIL(!prog))
 246		goto out;
 247
 248	main_fd = bpf_program__fd(prog);
 249	if (CHECK_FAIL(main_fd < 0))
 250		goto out;
 251
 252	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
 253	if (CHECK_FAIL(!prog_array))
 254		goto out;
 255
 256	map_fd = bpf_map__fd(prog_array);
 257	if (CHECK_FAIL(map_fd < 0))
 258		goto out;
 259
 260	prog = bpf_object__find_program_by_name(obj, "classifier_0");
 261	if (CHECK_FAIL(!prog))
 262		goto out;
 263
 264	prog_fd = bpf_program__fd(prog);
 265	if (CHECK_FAIL(prog_fd < 0))
 266		goto out;
 267
 268	i = 0;
 269	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 270	if (CHECK_FAIL(err))
 271		goto out;
 272
 273	if (test_fentry) {
 274		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
 275						   NULL);
 276		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
 277			goto out;
 278
 279		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
 280		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
 281			goto out;
 282
 283		err = bpf_program__set_attach_target(prog, prog_fd,
 284						     "subprog_tail");
 285		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
 286			goto out;
 287
 288		err = bpf_object__load(fentry_obj);
 289		if (!ASSERT_OK(err, "load fentry_obj"))
 290			goto out;
 291
 292		fentry_link = bpf_program__attach_trace(prog);
 293		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
 294			goto out;
 295	}
 296
 297	if (test_fexit) {
 298		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
 299						  NULL);
 300		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
 301			goto out;
 302
 303		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
 304		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
 305			goto out;
 306
 307		err = bpf_program__set_attach_target(prog, prog_fd,
 308						     "subprog_tail");
 309		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
 310			goto out;
 311
 312		err = bpf_object__load(fexit_obj);
 313		if (!ASSERT_OK(err, "load fexit_obj"))
 314			goto out;
 315
 316		fexit_link = bpf_program__attach_trace(prog);
 317		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
 318			goto out;
 319	}
 320
 321	err = bpf_prog_test_run_opts(main_fd, &topts);
 322	ASSERT_OK(err, "tailcall");
 323	ASSERT_EQ(topts.retval, 1, "tailcall retval");
 324
 325	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
 326	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
 327		goto out;
 328
 329	data_fd = bpf_map__fd(data_map);
 330	if (CHECK_FAIL(data_fd < 0))
 331		goto out;
 332
 333	i = 0;
 334	err = bpf_map_lookup_elem(data_fd, &i, &val);
 335	ASSERT_OK(err, "tailcall count");
 336	ASSERT_EQ(val, 33, "tailcall count");
 337
 338	if (test_fentry) {
 339		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
 340		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
 341				  "find tailcall_bpf2bpf_fentry.bss map"))
 342			goto out;
 343
 344		data_fd = bpf_map__fd(data_map);
 345		if (!ASSERT_FALSE(data_fd < 0,
 346				  "find tailcall_bpf2bpf_fentry.bss map fd"))
 347			goto out;
 348
 349		i = 0;
 350		err = bpf_map_lookup_elem(data_fd, &i, &val);
 351		ASSERT_OK(err, "fentry count");
 352		ASSERT_EQ(val, 33, "fentry count");
 353	}
 354
 355	if (test_fexit) {
 356		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
 357		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
 358				  "find tailcall_bpf2bpf_fexit.bss map"))
 359			goto out;
 360
 361		data_fd = bpf_map__fd(data_map);
 362		if (!ASSERT_FALSE(data_fd < 0,
 363				  "find tailcall_bpf2bpf_fexit.bss map fd"))
 364			goto out;
 365
 366		i = 0;
 367		err = bpf_map_lookup_elem(data_fd, &i, &val);
 368		ASSERT_OK(err, "fexit count");
 369		ASSERT_EQ(val, 33, "fexit count");
 370	}
 371
 372	i = 0;
 373	err = bpf_map_delete_elem(map_fd, &i);
 374	if (CHECK_FAIL(err))
 375		goto out;
 376
 377	err = bpf_prog_test_run_opts(main_fd, &topts);
 378	ASSERT_OK(err, "tailcall");
 379	ASSERT_OK(topts.retval, "tailcall retval");
 380out:
 381	bpf_link__destroy(fentry_link);
 382	bpf_link__destroy(fexit_link);
 383	bpf_object__close(fentry_obj);
 384	bpf_object__close(fexit_obj);
 385	bpf_object__close(obj);
 386}
 387
 388/* test_tailcall_3 checks that the count value of the tail call limit
 389 * enforcement matches with expectations. JIT uses direct jump.
 390 */
 391static void test_tailcall_3(void)
 392{
 393	test_tailcall_count("tailcall3.bpf.o", false, false);
 394}
 395
 396/* test_tailcall_6 checks that the count value of the tail call limit
 397 * enforcement matches with expectations. JIT uses indirect jump.
 398 */
 399static void test_tailcall_6(void)
 400{
 401	test_tailcall_count("tailcall6.bpf.o", false, false);
 402}
 403
 404/* test_tailcall_4 checks that the kernel properly selects indirect jump
 405 * for the case where the key is not known. Latter is passed via global
 406 * data to select different targets we can compare return value of.
 407 */
 408static void test_tailcall_4(void)
 409{
 410	int err, map_fd, prog_fd, main_fd, data_fd, i;
 411	struct bpf_map *prog_array, *data_map;
 412	struct bpf_program *prog;
 413	struct bpf_object *obj;
 414	static const int zero = 0;
 415	char buff[128] = {};
 416	char prog_name[32];
 417	LIBBPF_OPTS(bpf_test_run_opts, topts,
 418		.data_in = buff,
 419		.data_size_in = sizeof(buff),
 420		.repeat = 1,
 421	);
 422
 423	err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
 424				 &prog_fd);
 425	if (CHECK_FAIL(err))
 426		return;
 427
 428	prog = bpf_object__find_program_by_name(obj, "entry");
 429	if (CHECK_FAIL(!prog))
 430		goto out;
 431
 432	main_fd = bpf_program__fd(prog);
 433	if (CHECK_FAIL(main_fd < 0))
 434		goto out;
 435
 436	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
 437	if (CHECK_FAIL(!prog_array))
 438		goto out;
 439
 440	map_fd = bpf_map__fd(prog_array);
 441	if (CHECK_FAIL(map_fd < 0))
 442		goto out;
 443
 444	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
 445	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
 446		goto out;
 447
 448	data_fd = bpf_map__fd(data_map);
 449	if (CHECK_FAIL(data_fd < 0))
 450		goto out;
 451
 452	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 453		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 454
 455		prog = bpf_object__find_program_by_name(obj, prog_name);
 456		if (CHECK_FAIL(!prog))
 457			goto out;
 458
 459		prog_fd = bpf_program__fd(prog);
 460		if (CHECK_FAIL(prog_fd < 0))
 461			goto out;
 462
 463		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 464		if (CHECK_FAIL(err))
 465			goto out;
 466	}
 467
 468	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 469		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
 470		if (CHECK_FAIL(err))
 471			goto out;
 472
 473		err = bpf_prog_test_run_opts(main_fd, &topts);
 474		ASSERT_OK(err, "tailcall");
 475		ASSERT_EQ(topts.retval, i, "tailcall retval");
 476	}
 477
 478	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 479		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
 480		if (CHECK_FAIL(err))
 481			goto out;
 482
 483		err = bpf_map_delete_elem(map_fd, &i);
 484		if (CHECK_FAIL(err))
 485			goto out;
 486
 487		err = bpf_prog_test_run_opts(main_fd, &topts);
 488		ASSERT_OK(err, "tailcall");
 489		ASSERT_EQ(topts.retval, 3, "tailcall retval");
 490	}
 491out:
 492	bpf_object__close(obj);
 493}
 494
 495/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
 496 * an indirect jump when the keys are const but different from different branches.
 497 */
 498static void test_tailcall_5(void)
 499{
 500	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
 501	struct bpf_map *prog_array, *data_map;
 502	struct bpf_program *prog;
 503	struct bpf_object *obj;
 504	static const int zero = 0;
 505	char buff[128] = {};
 506	char prog_name[32];
 507	LIBBPF_OPTS(bpf_test_run_opts, topts,
 508		.data_in = buff,
 509		.data_size_in = sizeof(buff),
 510		.repeat = 1,
 511	);
 512
 513	err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
 514				 &prog_fd);
 515	if (CHECK_FAIL(err))
 516		return;
 517
 518	prog = bpf_object__find_program_by_name(obj, "entry");
 519	if (CHECK_FAIL(!prog))
 520		goto out;
 521
 522	main_fd = bpf_program__fd(prog);
 523	if (CHECK_FAIL(main_fd < 0))
 524		goto out;
 525
 526	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
 527	if (CHECK_FAIL(!prog_array))
 528		goto out;
 529
 530	map_fd = bpf_map__fd(prog_array);
 531	if (CHECK_FAIL(map_fd < 0))
 532		goto out;
 533
 534	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
 535	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
 536		goto out;
 537
 538	data_fd = bpf_map__fd(data_map);
 539	if (CHECK_FAIL(data_fd < 0))
 540		goto out;
 541
 542	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 543		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 544
 545		prog = bpf_object__find_program_by_name(obj, prog_name);
 546		if (CHECK_FAIL(!prog))
 547			goto out;
 548
 549		prog_fd = bpf_program__fd(prog);
 550		if (CHECK_FAIL(prog_fd < 0))
 551			goto out;
 552
 553		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 554		if (CHECK_FAIL(err))
 555			goto out;
 556	}
 557
 558	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 559		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
 560		if (CHECK_FAIL(err))
 561			goto out;
 562
 563		err = bpf_prog_test_run_opts(main_fd, &topts);
 564		ASSERT_OK(err, "tailcall");
 565		ASSERT_EQ(topts.retval, i, "tailcall retval");
 566	}
 567
 568	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 569		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
 570		if (CHECK_FAIL(err))
 571			goto out;
 572
 573		err = bpf_map_delete_elem(map_fd, &i);
 574		if (CHECK_FAIL(err))
 575			goto out;
 576
 577		err = bpf_prog_test_run_opts(main_fd, &topts);
 578		ASSERT_OK(err, "tailcall");
 579		ASSERT_EQ(topts.retval, 3, "tailcall retval");
 580	}
 581out:
 582	bpf_object__close(obj);
 583}
 584
 585/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
 586 * correctly in correlation with BPF subprograms
 587 */
 588static void test_tailcall_bpf2bpf_1(void)
 589{
 590	int err, map_fd, prog_fd, main_fd, i;
 591	struct bpf_map *prog_array;
 592	struct bpf_program *prog;
 593	struct bpf_object *obj;
 594	char prog_name[32];
 595	LIBBPF_OPTS(bpf_test_run_opts, topts,
 596		.data_in = &pkt_v4,
 597		.data_size_in = sizeof(pkt_v4),
 598		.repeat = 1,
 599	);
 600
 601	err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
 602				 &obj, &prog_fd);
 603	if (CHECK_FAIL(err))
 604		return;
 605
 606	prog = bpf_object__find_program_by_name(obj, "entry");
 607	if (CHECK_FAIL(!prog))
 608		goto out;
 609
 610	main_fd = bpf_program__fd(prog);
 611	if (CHECK_FAIL(main_fd < 0))
 612		goto out;
 613
 614	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
 615	if (CHECK_FAIL(!prog_array))
 616		goto out;
 617
 618	map_fd = bpf_map__fd(prog_array);
 619	if (CHECK_FAIL(map_fd < 0))
 620		goto out;
 621
 622	/* nop -> jmp */
 623	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 624		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 625
 626		prog = bpf_object__find_program_by_name(obj, prog_name);
 627		if (CHECK_FAIL(!prog))
 628			goto out;
 629
 630		prog_fd = bpf_program__fd(prog);
 631		if (CHECK_FAIL(prog_fd < 0))
 632			goto out;
 633
 634		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 635		if (CHECK_FAIL(err))
 636			goto out;
 637	}
 638
 639	err = bpf_prog_test_run_opts(main_fd, &topts);
 640	ASSERT_OK(err, "tailcall");
 641	ASSERT_EQ(topts.retval, 1, "tailcall retval");
 642
 643	/* jmp -> nop, call subprog that will do tailcall */
 644	i = 1;
 645	err = bpf_map_delete_elem(map_fd, &i);
 646	if (CHECK_FAIL(err))
 647		goto out;
 648
 649	err = bpf_prog_test_run_opts(main_fd, &topts);
 650	ASSERT_OK(err, "tailcall");
 651	ASSERT_OK(topts.retval, "tailcall retval");
 652
 653	/* make sure that subprog can access ctx and entry prog that
 654	 * called this subprog can properly return
 655	 */
 656	i = 0;
 657	err = bpf_map_delete_elem(map_fd, &i);
 658	if (CHECK_FAIL(err))
 659		goto out;
 660
 661	err = bpf_prog_test_run_opts(main_fd, &topts);
 662	ASSERT_OK(err, "tailcall");
 663	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
 664out:
 665	bpf_object__close(obj);
 666}
 667
 668/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
 669 * enforcement matches with expectations when tailcall is preceded with
 670 * bpf2bpf call.
 671 */
 672static void test_tailcall_bpf2bpf_2(void)
 673{
 674	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
 675	struct bpf_map *prog_array, *data_map;
 676	struct bpf_program *prog;
 677	struct bpf_object *obj;
 678	char buff[128] = {};
 679	LIBBPF_OPTS(bpf_test_run_opts, topts,
 680		.data_in = buff,
 681		.data_size_in = sizeof(buff),
 682		.repeat = 1,
 683	);
 684
 685	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
 686				 &obj, &prog_fd);
 687	if (CHECK_FAIL(err))
 688		return;
 689
 690	prog = bpf_object__find_program_by_name(obj, "entry");
 691	if (CHECK_FAIL(!prog))
 692		goto out;
 693
 694	main_fd = bpf_program__fd(prog);
 695	if (CHECK_FAIL(main_fd < 0))
 696		goto out;
 697
 698	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
 699	if (CHECK_FAIL(!prog_array))
 700		goto out;
 701
 702	map_fd = bpf_map__fd(prog_array);
 703	if (CHECK_FAIL(map_fd < 0))
 704		goto out;
 705
 706	prog = bpf_object__find_program_by_name(obj, "classifier_0");
 707	if (CHECK_FAIL(!prog))
 708		goto out;
 709
 710	prog_fd = bpf_program__fd(prog);
 711	if (CHECK_FAIL(prog_fd < 0))
 712		goto out;
 713
 714	i = 0;
 715	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 716	if (CHECK_FAIL(err))
 717		goto out;
 718
 719	err = bpf_prog_test_run_opts(main_fd, &topts);
 720	ASSERT_OK(err, "tailcall");
 721	ASSERT_EQ(topts.retval, 1, "tailcall retval");
 722
 723	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
 724	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
 725		goto out;
 726
 727	data_fd = bpf_map__fd(data_map);
 728	if (CHECK_FAIL(data_fd < 0))
 729		goto out;
 730
 731	i = 0;
 732	err = bpf_map_lookup_elem(data_fd, &i, &val);
 733	ASSERT_OK(err, "tailcall count");
 734	ASSERT_EQ(val, 33, "tailcall count");
 735
 736	i = 0;
 737	err = bpf_map_delete_elem(map_fd, &i);
 738	if (CHECK_FAIL(err))
 739		goto out;
 740
 741	err = bpf_prog_test_run_opts(main_fd, &topts);
 742	ASSERT_OK(err, "tailcall");
 743	ASSERT_OK(topts.retval, "tailcall retval");
 744out:
 745	bpf_object__close(obj);
 746}
 747
 748/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
 749 * 256 bytes) can be used within bpf subprograms that have the tailcalls
 750 * in them
 751 */
 752static void test_tailcall_bpf2bpf_3(void)
 753{
 754	int err, map_fd, prog_fd, main_fd, i;
 755	struct bpf_map *prog_array;
 756	struct bpf_program *prog;
 757	struct bpf_object *obj;
 758	char prog_name[32];
 759	LIBBPF_OPTS(bpf_test_run_opts, topts,
 760		.data_in = &pkt_v4,
 761		.data_size_in = sizeof(pkt_v4),
 762		.repeat = 1,
 763	);
 764
 765	err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
 766				 &obj, &prog_fd);
 767	if (CHECK_FAIL(err))
 768		return;
 769
 770	prog = bpf_object__find_program_by_name(obj, "entry");
 771	if (CHECK_FAIL(!prog))
 772		goto out;
 773
 774	main_fd = bpf_program__fd(prog);
 775	if (CHECK_FAIL(main_fd < 0))
 776		goto out;
 777
 778	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
 779	if (CHECK_FAIL(!prog_array))
 780		goto out;
 781
 782	map_fd = bpf_map__fd(prog_array);
 783	if (CHECK_FAIL(map_fd < 0))
 784		goto out;
 785
 786	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 787		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 788
 789		prog = bpf_object__find_program_by_name(obj, prog_name);
 790		if (CHECK_FAIL(!prog))
 791			goto out;
 792
 793		prog_fd = bpf_program__fd(prog);
 794		if (CHECK_FAIL(prog_fd < 0))
 795			goto out;
 796
 797		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 798		if (CHECK_FAIL(err))
 799			goto out;
 800	}
 801
 802	err = bpf_prog_test_run_opts(main_fd, &topts);
 803	ASSERT_OK(err, "tailcall");
 804	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
 805
 806	i = 1;
 807	err = bpf_map_delete_elem(map_fd, &i);
 808	if (CHECK_FAIL(err))
 809		goto out;
 810
 811	err = bpf_prog_test_run_opts(main_fd, &topts);
 812	ASSERT_OK(err, "tailcall");
 813	ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
 814
 815	i = 0;
 816	err = bpf_map_delete_elem(map_fd, &i);
 817	if (CHECK_FAIL(err))
 818		goto out;
 819
 820	err = bpf_prog_test_run_opts(main_fd, &topts);
 821	ASSERT_OK(err, "tailcall");
 822	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
 823out:
 824	bpf_object__close(obj);
 825}
 826
 827#include "tailcall_bpf2bpf4.skel.h"
 828
 829/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
 830 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
 831 * counter behaves correctly, bpf program will go through following flow:
 832 *
 833 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
 834 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
 835 * subprog2 [here bump global counter] --------^
 836 *
 837 * We go through first two tailcalls and start counting from the subprog2 where
 838 * the loop begins. At the end of the test make sure that the global counter is
 839 * equal to 31, because tailcall counter includes the first two tailcalls
 840 * whereas global counter is incremented only on loop presented on flow above.
 841 *
 842 * The noise parameter is used to insert bpf_map_update calls into the logic
 843 * to force verifier to patch instructions. This allows us to ensure jump
 844 * logic remains correct with instruction movement.
 845 */
 846static void test_tailcall_bpf2bpf_4(bool noise)
 847{
 848	int err, map_fd, prog_fd, main_fd, data_fd, i;
 849	struct tailcall_bpf2bpf4__bss val;
 850	struct bpf_map *prog_array, *data_map;
 851	struct bpf_program *prog;
 852	struct bpf_object *obj;
 853	char prog_name[32];
 854	LIBBPF_OPTS(bpf_test_run_opts, topts,
 855		.data_in = &pkt_v4,
 856		.data_size_in = sizeof(pkt_v4),
 857		.repeat = 1,
 858	);
 859
 860	err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
 861				 &obj, &prog_fd);
 862	if (CHECK_FAIL(err))
 863		return;
 864
 865	prog = bpf_object__find_program_by_name(obj, "entry");
 866	if (CHECK_FAIL(!prog))
 867		goto out;
 868
 869	main_fd = bpf_program__fd(prog);
 870	if (CHECK_FAIL(main_fd < 0))
 871		goto out;
 872
 873	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
 874	if (CHECK_FAIL(!prog_array))
 875		goto out;
 876
 877	map_fd = bpf_map__fd(prog_array);
 878	if (CHECK_FAIL(map_fd < 0))
 879		goto out;
 880
 881	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 882		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 883
 884		prog = bpf_object__find_program_by_name(obj, prog_name);
 885		if (CHECK_FAIL(!prog))
 886			goto out;
 887
 888		prog_fd = bpf_program__fd(prog);
 889		if (CHECK_FAIL(prog_fd < 0))
 890			goto out;
 891
 892		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 893		if (CHECK_FAIL(err))
 894			goto out;
 895	}
 896
 897	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
 898	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
 899		goto out;
 900
 901	data_fd = bpf_map__fd(data_map);
 902	if (CHECK_FAIL(data_fd < 0))
 903		goto out;
 904
 905	i = 0;
 906	val.noise = noise;
 907	val.count = 0;
 908	err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
 909	if (CHECK_FAIL(err))
 910		goto out;
 911
 912	err = bpf_prog_test_run_opts(main_fd, &topts);
 913	ASSERT_OK(err, "tailcall");
 914	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
 915
 916	i = 0;
 917	err = bpf_map_lookup_elem(data_fd, &i, &val);
 918	ASSERT_OK(err, "tailcall count");
 919	ASSERT_EQ(val.count, 31, "tailcall count");
 920
 921out:
 922	bpf_object__close(obj);
 923}
 924
 925#include "tailcall_bpf2bpf6.skel.h"
 926
 927/* Tail call counting works even when there is data on stack which is
 928 * not aligned to 8 bytes.
 929 */
 930static void test_tailcall_bpf2bpf_6(void)
 931{
 932	struct tailcall_bpf2bpf6 *obj;
 933	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
 934	LIBBPF_OPTS(bpf_test_run_opts, topts,
 935		.data_in = &pkt_v4,
 936		.data_size_in = sizeof(pkt_v4),
 937		.repeat = 1,
 938	);
 939
 940	obj = tailcall_bpf2bpf6__open_and_load();
 941	if (!ASSERT_OK_PTR(obj, "open and load"))
 942		return;
 943
 944	main_fd = bpf_program__fd(obj->progs.entry);
 945	if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
 946		goto out;
 947
 948	map_fd = bpf_map__fd(obj->maps.jmp_table);
 949	if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
 950		goto out;
 951
 952	prog_fd = bpf_program__fd(obj->progs.classifier_0);
 953	if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
 954		goto out;
 955
 956	i = 0;
 957	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 958	if (!ASSERT_OK(err, "jmp_table map update"))
 959		goto out;
 960
 961	err = bpf_prog_test_run_opts(main_fd, &topts);
 962	ASSERT_OK(err, "entry prog test run");
 963	ASSERT_EQ(topts.retval, 0, "tailcall retval");
 964
 965	data_fd = bpf_map__fd(obj->maps.bss);
 966	if (!ASSERT_GE(data_fd, 0, "bss map fd"))
 967		goto out;
 968
 969	i = 0;
 970	err = bpf_map_lookup_elem(data_fd, &i, &val);
 971	ASSERT_OK(err, "bss map lookup");
 972	ASSERT_EQ(val, 1, "done flag is set");
 973
 974out:
 975	tailcall_bpf2bpf6__destroy(obj);
 976}
 977
 978/* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
 979 * limit enforcement matches with expectations when tailcall is preceded with
 980 * bpf2bpf call, and the bpf2bpf call is traced by fentry.
 981 */
 982static void test_tailcall_bpf2bpf_fentry(void)
 983{
 984	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
 985}
 986
 987/* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
 988 * limit enforcement matches with expectations when tailcall is preceded with
 989 * bpf2bpf call, and the bpf2bpf call is traced by fexit.
 990 */
 991static void test_tailcall_bpf2bpf_fexit(void)
 992{
 993	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
 994}
 995
 996/* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
 997 * call limit enforcement matches with expectations when tailcall is preceded
 998 * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
 999 */
1000static void test_tailcall_bpf2bpf_fentry_fexit(void)
1001{
1002	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
1003}
1004
1005/* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
1006 * call limit enforcement matches with expectations when tailcall is preceded
1007 * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
1008 */
1009static void test_tailcall_bpf2bpf_fentry_entry(void)
1010{
1011	struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
1012	int err, map_fd, prog_fd, data_fd, i, val;
1013	struct bpf_map *prog_array, *data_map;
1014	struct bpf_link *fentry_link = NULL;
1015	struct bpf_program *prog;
1016	char buff[128] = {};
1017
1018	LIBBPF_OPTS(bpf_test_run_opts, topts,
1019		.data_in = buff,
1020		.data_size_in = sizeof(buff),
1021		.repeat = 1,
1022	);
1023
1024	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
1025				 BPF_PROG_TYPE_SCHED_CLS,
1026				 &tgt_obj, &prog_fd);
1027	if (!ASSERT_OK(err, "load tgt_obj"))
1028		return;
1029
1030	prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
1031	if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
1032		goto out;
1033
1034	map_fd = bpf_map__fd(prog_array);
1035	if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
1036		goto out;
1037
1038	prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
1039	if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
1040		goto out;
1041
1042	prog_fd = bpf_program__fd(prog);
1043	if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
1044		goto out;
1045
1046	i = 0;
1047	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1048	if (!ASSERT_OK(err, "update jmp_table"))
1049		goto out;
1050
1051	fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1052					   NULL);
1053	if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1054		goto out;
1055
1056	prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1057	if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1058		goto out;
1059
1060	err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
1061	if (!ASSERT_OK(err, "set_attach_target classifier_0"))
1062		goto out;
1063
1064	err = bpf_object__load(fentry_obj);
1065	if (!ASSERT_OK(err, "load fentry_obj"))
1066		goto out;
1067
1068	fentry_link = bpf_program__attach_trace(prog);
1069	if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1070		goto out;
1071
1072	err = bpf_prog_test_run_opts(prog_fd, &topts);
1073	ASSERT_OK(err, "tailcall");
1074	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1075
1076	data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
1077	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1078			  "find tailcall.bss map"))
1079		goto out;
1080
1081	data_fd = bpf_map__fd(data_map);
1082	if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
1083		goto out;
1084
1085	i = 0;
1086	err = bpf_map_lookup_elem(data_fd, &i, &val);
1087	ASSERT_OK(err, "tailcall count");
1088	ASSERT_EQ(val, 34, "tailcall count");
1089
1090	data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1091	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1092			  "find tailcall_bpf2bpf_fentry.bss map"))
1093		goto out;
1094
1095	data_fd = bpf_map__fd(data_map);
1096	if (!ASSERT_FALSE(data_fd < 0,
1097			  "find tailcall_bpf2bpf_fentry.bss map fd"))
1098		goto out;
1099
1100	i = 0;
1101	err = bpf_map_lookup_elem(data_fd, &i, &val);
1102	ASSERT_OK(err, "fentry count");
1103	ASSERT_EQ(val, 1, "fentry count");
1104
1105out:
1106	bpf_link__destroy(fentry_link);
1107	bpf_object__close(fentry_obj);
1108	bpf_object__close(tgt_obj);
1109}
1110
1111#define JMP_TABLE "/sys/fs/bpf/jmp_table"
1112
1113static int poke_thread_exit;
1114
1115static void *poke_update(void *arg)
1116{
1117	__u32 zero = 0, prog1_fd, prog2_fd, map_fd;
1118	struct tailcall_poke *call = arg;
1119
1120	map_fd = bpf_map__fd(call->maps.jmp_table);
1121	prog1_fd = bpf_program__fd(call->progs.call1);
1122	prog2_fd = bpf_program__fd(call->progs.call2);
1123
1124	while (!poke_thread_exit) {
1125		bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
1126		bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
1127	}
1128
1129	return NULL;
1130}
1131
1132/*
1133 * We are trying to hit prog array update during another program load
1134 * that shares the same prog array map.
1135 *
1136 * For that we share the jmp_table map between two skeleton instances
1137 * by pinning the jmp_table to same path. Then first skeleton instance
1138 * periodically updates jmp_table in 'poke update' thread while we load
1139 * the second skeleton instance in the main thread.
1140 */
1141static void test_tailcall_poke(void)
1142{
1143	struct tailcall_poke *call, *test;
1144	int err, cnt = 10;
1145	pthread_t thread;
1146
1147	unlink(JMP_TABLE);
1148
1149	call = tailcall_poke__open_and_load();
1150	if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
1151		return;
1152
1153	err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
1154	if (!ASSERT_OK(err, "bpf_map__pin"))
1155		goto out;
1156
1157	err = pthread_create(&thread, NULL, poke_update, call);
1158	if (!ASSERT_OK(err, "new toggler"))
1159		goto out;
1160
1161	while (cnt--) {
1162		test = tailcall_poke__open();
1163		if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
1164			break;
1165
1166		err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
1167		if (!ASSERT_OK(err, "bpf_map__pin")) {
1168			tailcall_poke__destroy(test);
1169			break;
1170		}
1171
1172		bpf_program__set_autoload(test->progs.test, true);
1173		bpf_program__set_autoload(test->progs.call1, false);
1174		bpf_program__set_autoload(test->progs.call2, false);
1175
1176		err = tailcall_poke__load(test);
1177		tailcall_poke__destroy(test);
1178		if (!ASSERT_OK(err, "tailcall_poke__load"))
1179			break;
1180	}
1181
1182	poke_thread_exit = 1;
1183	ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
1184
1185out:
1186	bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
1187	tailcall_poke__destroy(call);
1188}
1189
1190void test_tailcalls(void)
1191{
1192	if (test__start_subtest("tailcall_1"))
1193		test_tailcall_1();
1194	if (test__start_subtest("tailcall_2"))
1195		test_tailcall_2();
1196	if (test__start_subtest("tailcall_3"))
1197		test_tailcall_3();
1198	if (test__start_subtest("tailcall_4"))
1199		test_tailcall_4();
1200	if (test__start_subtest("tailcall_5"))
1201		test_tailcall_5();
1202	if (test__start_subtest("tailcall_6"))
1203		test_tailcall_6();
1204	if (test__start_subtest("tailcall_bpf2bpf_1"))
1205		test_tailcall_bpf2bpf_1();
1206	if (test__start_subtest("tailcall_bpf2bpf_2"))
1207		test_tailcall_bpf2bpf_2();
1208	if (test__start_subtest("tailcall_bpf2bpf_3"))
1209		test_tailcall_bpf2bpf_3();
1210	if (test__start_subtest("tailcall_bpf2bpf_4"))
1211		test_tailcall_bpf2bpf_4(false);
1212	if (test__start_subtest("tailcall_bpf2bpf_5"))
1213		test_tailcall_bpf2bpf_4(true);
1214	if (test__start_subtest("tailcall_bpf2bpf_6"))
1215		test_tailcall_bpf2bpf_6();
1216	if (test__start_subtest("tailcall_bpf2bpf_fentry"))
1217		test_tailcall_bpf2bpf_fentry();
1218	if (test__start_subtest("tailcall_bpf2bpf_fexit"))
1219		test_tailcall_bpf2bpf_fexit();
1220	if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
1221		test_tailcall_bpf2bpf_fentry_fexit();
1222	if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
1223		test_tailcall_bpf2bpf_fentry_entry();
1224	if (test__start_subtest("tailcall_poke"))
1225		test_tailcall_poke();
1226}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
 
  2#include <test_progs.h>
  3#include <network_helpers.h>
 
 
  4
  5/* test_tailcall_1 checks basic functionality by patching multiple locations
  6 * in a single program for a single tail call slot with nop->jmp, jmp->nop
  7 * and jmp->jmp rewrites. Also checks for nop->nop.
  8 */
  9static void test_tailcall_1(void)
 10{
 11	int err, map_fd, prog_fd, main_fd, i, j;
 12	struct bpf_map *prog_array;
 13	struct bpf_program *prog;
 14	struct bpf_object *obj;
 15	char prog_name[32];
 16	char buff[128] = {};
 17	LIBBPF_OPTS(bpf_test_run_opts, topts,
 18		.data_in = buff,
 19		.data_size_in = sizeof(buff),
 20		.repeat = 1,
 21	);
 22
 23	err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
 24				 &prog_fd);
 25	if (CHECK_FAIL(err))
 26		return;
 27
 28	prog = bpf_object__find_program_by_name(obj, "entry");
 29	if (CHECK_FAIL(!prog))
 30		goto out;
 31
 32	main_fd = bpf_program__fd(prog);
 33	if (CHECK_FAIL(main_fd < 0))
 34		goto out;
 35
 36	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
 37	if (CHECK_FAIL(!prog_array))
 38		goto out;
 39
 40	map_fd = bpf_map__fd(prog_array);
 41	if (CHECK_FAIL(map_fd < 0))
 42		goto out;
 43
 44	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 45		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 46
 47		prog = bpf_object__find_program_by_name(obj, prog_name);
 48		if (CHECK_FAIL(!prog))
 49			goto out;
 50
 51		prog_fd = bpf_program__fd(prog);
 52		if (CHECK_FAIL(prog_fd < 0))
 53			goto out;
 54
 55		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 56		if (CHECK_FAIL(err))
 57			goto out;
 58	}
 59
 60	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 61		err = bpf_prog_test_run_opts(main_fd, &topts);
 62		ASSERT_OK(err, "tailcall");
 63		ASSERT_EQ(topts.retval, i, "tailcall retval");
 64
 65		err = bpf_map_delete_elem(map_fd, &i);
 66		if (CHECK_FAIL(err))
 67			goto out;
 68	}
 69
 70	err = bpf_prog_test_run_opts(main_fd, &topts);
 71	ASSERT_OK(err, "tailcall");
 72	ASSERT_EQ(topts.retval, 3, "tailcall retval");
 73
 74	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 75		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 76
 77		prog = bpf_object__find_program_by_name(obj, prog_name);
 78		if (CHECK_FAIL(!prog))
 79			goto out;
 80
 81		prog_fd = bpf_program__fd(prog);
 82		if (CHECK_FAIL(prog_fd < 0))
 83			goto out;
 84
 85		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
 86		if (CHECK_FAIL(err))
 87			goto out;
 88	}
 89
 90	err = bpf_prog_test_run_opts(main_fd, &topts);
 91	ASSERT_OK(err, "tailcall");
 92	ASSERT_OK(topts.retval, "tailcall retval");
 93
 94	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
 95		j = bpf_map__max_entries(prog_array) - 1 - i;
 96		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
 97
 98		prog = bpf_object__find_program_by_name(obj, prog_name);
 99		if (CHECK_FAIL(!prog))
100			goto out;
101
102		prog_fd = bpf_program__fd(prog);
103		if (CHECK_FAIL(prog_fd < 0))
104			goto out;
105
106		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
107		if (CHECK_FAIL(err))
108			goto out;
109	}
110
111	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
112		j = bpf_map__max_entries(prog_array) - 1 - i;
113
114		err = bpf_prog_test_run_opts(main_fd, &topts);
115		ASSERT_OK(err, "tailcall");
116		ASSERT_EQ(topts.retval, j, "tailcall retval");
117
118		err = bpf_map_delete_elem(map_fd, &i);
119		if (CHECK_FAIL(err))
120			goto out;
121	}
122
123	err = bpf_prog_test_run_opts(main_fd, &topts);
124	ASSERT_OK(err, "tailcall");
125	ASSERT_EQ(topts.retval, 3, "tailcall retval");
126
127	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
128		err = bpf_map_delete_elem(map_fd, &i);
129		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
130			goto out;
131
132		err = bpf_prog_test_run_opts(main_fd, &topts);
133		ASSERT_OK(err, "tailcall");
134		ASSERT_EQ(topts.retval, 3, "tailcall retval");
135	}
136
137out:
138	bpf_object__close(obj);
139}
140
141/* test_tailcall_2 checks that patching multiple programs for a single
142 * tail call slot works. It also jumps through several programs and tests
143 * the tail call limit counter.
144 */
145static void test_tailcall_2(void)
146{
147	int err, map_fd, prog_fd, main_fd, i;
148	struct bpf_map *prog_array;
149	struct bpf_program *prog;
150	struct bpf_object *obj;
151	char prog_name[32];
152	char buff[128] = {};
153	LIBBPF_OPTS(bpf_test_run_opts, topts,
154		.data_in = buff,
155		.data_size_in = sizeof(buff),
156		.repeat = 1,
157	);
158
159	err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
160				 &prog_fd);
161	if (CHECK_FAIL(err))
162		return;
163
164	prog = bpf_object__find_program_by_name(obj, "entry");
165	if (CHECK_FAIL(!prog))
166		goto out;
167
168	main_fd = bpf_program__fd(prog);
169	if (CHECK_FAIL(main_fd < 0))
170		goto out;
171
172	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
173	if (CHECK_FAIL(!prog_array))
174		goto out;
175
176	map_fd = bpf_map__fd(prog_array);
177	if (CHECK_FAIL(map_fd < 0))
178		goto out;
179
180	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
181		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
182
183		prog = bpf_object__find_program_by_name(obj, prog_name);
184		if (CHECK_FAIL(!prog))
185			goto out;
186
187		prog_fd = bpf_program__fd(prog);
188		if (CHECK_FAIL(prog_fd < 0))
189			goto out;
190
191		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
192		if (CHECK_FAIL(err))
193			goto out;
194	}
195
196	err = bpf_prog_test_run_opts(main_fd, &topts);
197	ASSERT_OK(err, "tailcall");
198	ASSERT_EQ(topts.retval, 2, "tailcall retval");
199
200	i = 2;
201	err = bpf_map_delete_elem(map_fd, &i);
202	if (CHECK_FAIL(err))
203		goto out;
204
205	err = bpf_prog_test_run_opts(main_fd, &topts);
206	ASSERT_OK(err, "tailcall");
207	ASSERT_EQ(topts.retval, 1, "tailcall retval");
208
209	i = 0;
210	err = bpf_map_delete_elem(map_fd, &i);
211	if (CHECK_FAIL(err))
212		goto out;
213
214	err = bpf_prog_test_run_opts(main_fd, &topts);
215	ASSERT_OK(err, "tailcall");
216	ASSERT_EQ(topts.retval, 3, "tailcall retval");
217out:
218	bpf_object__close(obj);
219}
220
221static void test_tailcall_count(const char *which)
 
222{
 
 
223	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
224	struct bpf_map *prog_array, *data_map;
225	struct bpf_program *prog;
226	struct bpf_object *obj;
227	char buff[128] = {};
228	LIBBPF_OPTS(bpf_test_run_opts, topts,
229		.data_in = buff,
230		.data_size_in = sizeof(buff),
231		.repeat = 1,
232	);
233
234	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
235			    &prog_fd);
236	if (CHECK_FAIL(err))
237		return;
238
239	prog = bpf_object__find_program_by_name(obj, "entry");
240	if (CHECK_FAIL(!prog))
241		goto out;
242
243	main_fd = bpf_program__fd(prog);
244	if (CHECK_FAIL(main_fd < 0))
245		goto out;
246
247	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
248	if (CHECK_FAIL(!prog_array))
249		goto out;
250
251	map_fd = bpf_map__fd(prog_array);
252	if (CHECK_FAIL(map_fd < 0))
253		goto out;
254
255	prog = bpf_object__find_program_by_name(obj, "classifier_0");
256	if (CHECK_FAIL(!prog))
257		goto out;
258
259	prog_fd = bpf_program__fd(prog);
260	if (CHECK_FAIL(prog_fd < 0))
261		goto out;
262
263	i = 0;
264	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
265	if (CHECK_FAIL(err))
266		goto out;
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268	err = bpf_prog_test_run_opts(main_fd, &topts);
269	ASSERT_OK(err, "tailcall");
270	ASSERT_EQ(topts.retval, 1, "tailcall retval");
271
272	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
273	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
274		return;
275
276	data_fd = bpf_map__fd(data_map);
277	if (CHECK_FAIL(map_fd < 0))
278		return;
279
280	i = 0;
281	err = bpf_map_lookup_elem(data_fd, &i, &val);
282	ASSERT_OK(err, "tailcall count");
283	ASSERT_EQ(val, 33, "tailcall count");
284
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285	i = 0;
286	err = bpf_map_delete_elem(map_fd, &i);
287	if (CHECK_FAIL(err))
288		goto out;
289
290	err = bpf_prog_test_run_opts(main_fd, &topts);
291	ASSERT_OK(err, "tailcall");
292	ASSERT_OK(topts.retval, "tailcall retval");
293out:
 
 
 
 
294	bpf_object__close(obj);
295}
296
297/* test_tailcall_3 checks that the count value of the tail call limit
298 * enforcement matches with expectations. JIT uses direct jump.
299 */
300static void test_tailcall_3(void)
301{
302	test_tailcall_count("tailcall3.bpf.o");
303}
304
305/* test_tailcall_6 checks that the count value of the tail call limit
306 * enforcement matches with expectations. JIT uses indirect jump.
307 */
308static void test_tailcall_6(void)
309{
310	test_tailcall_count("tailcall6.bpf.o");
311}
312
313/* test_tailcall_4 checks that the kernel properly selects indirect jump
314 * for the case where the key is not known. Latter is passed via global
315 * data to select different targets we can compare return value of.
316 */
317static void test_tailcall_4(void)
318{
319	int err, map_fd, prog_fd, main_fd, data_fd, i;
320	struct bpf_map *prog_array, *data_map;
321	struct bpf_program *prog;
322	struct bpf_object *obj;
323	static const int zero = 0;
324	char buff[128] = {};
325	char prog_name[32];
326	LIBBPF_OPTS(bpf_test_run_opts, topts,
327		.data_in = buff,
328		.data_size_in = sizeof(buff),
329		.repeat = 1,
330	);
331
332	err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
333				 &prog_fd);
334	if (CHECK_FAIL(err))
335		return;
336
337	prog = bpf_object__find_program_by_name(obj, "entry");
338	if (CHECK_FAIL(!prog))
339		goto out;
340
341	main_fd = bpf_program__fd(prog);
342	if (CHECK_FAIL(main_fd < 0))
343		goto out;
344
345	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
346	if (CHECK_FAIL(!prog_array))
347		goto out;
348
349	map_fd = bpf_map__fd(prog_array);
350	if (CHECK_FAIL(map_fd < 0))
351		goto out;
352
353	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
354	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
355		return;
356
357	data_fd = bpf_map__fd(data_map);
358	if (CHECK_FAIL(map_fd < 0))
359		return;
360
361	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
362		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
363
364		prog = bpf_object__find_program_by_name(obj, prog_name);
365		if (CHECK_FAIL(!prog))
366			goto out;
367
368		prog_fd = bpf_program__fd(prog);
369		if (CHECK_FAIL(prog_fd < 0))
370			goto out;
371
372		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
373		if (CHECK_FAIL(err))
374			goto out;
375	}
376
377	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
378		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
379		if (CHECK_FAIL(err))
380			goto out;
381
382		err = bpf_prog_test_run_opts(main_fd, &topts);
383		ASSERT_OK(err, "tailcall");
384		ASSERT_EQ(topts.retval, i, "tailcall retval");
385	}
386
387	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
388		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
389		if (CHECK_FAIL(err))
390			goto out;
391
392		err = bpf_map_delete_elem(map_fd, &i);
393		if (CHECK_FAIL(err))
394			goto out;
395
396		err = bpf_prog_test_run_opts(main_fd, &topts);
397		ASSERT_OK(err, "tailcall");
398		ASSERT_EQ(topts.retval, 3, "tailcall retval");
399	}
400out:
401	bpf_object__close(obj);
402}
403
404/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
405 * an indirect jump when the keys are const but different from different branches.
406 */
407static void test_tailcall_5(void)
408{
409	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
410	struct bpf_map *prog_array, *data_map;
411	struct bpf_program *prog;
412	struct bpf_object *obj;
413	static const int zero = 0;
414	char buff[128] = {};
415	char prog_name[32];
416	LIBBPF_OPTS(bpf_test_run_opts, topts,
417		.data_in = buff,
418		.data_size_in = sizeof(buff),
419		.repeat = 1,
420	);
421
422	err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
423				 &prog_fd);
424	if (CHECK_FAIL(err))
425		return;
426
427	prog = bpf_object__find_program_by_name(obj, "entry");
428	if (CHECK_FAIL(!prog))
429		goto out;
430
431	main_fd = bpf_program__fd(prog);
432	if (CHECK_FAIL(main_fd < 0))
433		goto out;
434
435	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
436	if (CHECK_FAIL(!prog_array))
437		goto out;
438
439	map_fd = bpf_map__fd(prog_array);
440	if (CHECK_FAIL(map_fd < 0))
441		goto out;
442
443	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
444	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
445		return;
446
447	data_fd = bpf_map__fd(data_map);
448	if (CHECK_FAIL(map_fd < 0))
449		return;
450
451	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
452		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
453
454		prog = bpf_object__find_program_by_name(obj, prog_name);
455		if (CHECK_FAIL(!prog))
456			goto out;
457
458		prog_fd = bpf_program__fd(prog);
459		if (CHECK_FAIL(prog_fd < 0))
460			goto out;
461
462		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
463		if (CHECK_FAIL(err))
464			goto out;
465	}
466
467	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
468		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
469		if (CHECK_FAIL(err))
470			goto out;
471
472		err = bpf_prog_test_run_opts(main_fd, &topts);
473		ASSERT_OK(err, "tailcall");
474		ASSERT_EQ(topts.retval, i, "tailcall retval");
475	}
476
477	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
478		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
479		if (CHECK_FAIL(err))
480			goto out;
481
482		err = bpf_map_delete_elem(map_fd, &i);
483		if (CHECK_FAIL(err))
484			goto out;
485
486		err = bpf_prog_test_run_opts(main_fd, &topts);
487		ASSERT_OK(err, "tailcall");
488		ASSERT_EQ(topts.retval, 3, "tailcall retval");
489	}
490out:
491	bpf_object__close(obj);
492}
493
494/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
495 * correctly in correlation with BPF subprograms
496 */
497static void test_tailcall_bpf2bpf_1(void)
498{
499	int err, map_fd, prog_fd, main_fd, i;
500	struct bpf_map *prog_array;
501	struct bpf_program *prog;
502	struct bpf_object *obj;
503	char prog_name[32];
504	LIBBPF_OPTS(bpf_test_run_opts, topts,
505		.data_in = &pkt_v4,
506		.data_size_in = sizeof(pkt_v4),
507		.repeat = 1,
508	);
509
510	err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
511				 &obj, &prog_fd);
512	if (CHECK_FAIL(err))
513		return;
514
515	prog = bpf_object__find_program_by_name(obj, "entry");
516	if (CHECK_FAIL(!prog))
517		goto out;
518
519	main_fd = bpf_program__fd(prog);
520	if (CHECK_FAIL(main_fd < 0))
521		goto out;
522
523	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
524	if (CHECK_FAIL(!prog_array))
525		goto out;
526
527	map_fd = bpf_map__fd(prog_array);
528	if (CHECK_FAIL(map_fd < 0))
529		goto out;
530
531	/* nop -> jmp */
532	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
533		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
534
535		prog = bpf_object__find_program_by_name(obj, prog_name);
536		if (CHECK_FAIL(!prog))
537			goto out;
538
539		prog_fd = bpf_program__fd(prog);
540		if (CHECK_FAIL(prog_fd < 0))
541			goto out;
542
543		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
544		if (CHECK_FAIL(err))
545			goto out;
546	}
547
548	err = bpf_prog_test_run_opts(main_fd, &topts);
549	ASSERT_OK(err, "tailcall");
550	ASSERT_EQ(topts.retval, 1, "tailcall retval");
551
552	/* jmp -> nop, call subprog that will do tailcall */
553	i = 1;
554	err = bpf_map_delete_elem(map_fd, &i);
555	if (CHECK_FAIL(err))
556		goto out;
557
558	err = bpf_prog_test_run_opts(main_fd, &topts);
559	ASSERT_OK(err, "tailcall");
560	ASSERT_OK(topts.retval, "tailcall retval");
561
562	/* make sure that subprog can access ctx and entry prog that
563	 * called this subprog can properly return
564	 */
565	i = 0;
566	err = bpf_map_delete_elem(map_fd, &i);
567	if (CHECK_FAIL(err))
568		goto out;
569
570	err = bpf_prog_test_run_opts(main_fd, &topts);
571	ASSERT_OK(err, "tailcall");
572	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
573out:
574	bpf_object__close(obj);
575}
576
577/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
578 * enforcement matches with expectations when tailcall is preceded with
579 * bpf2bpf call.
580 */
581static void test_tailcall_bpf2bpf_2(void)
582{
583	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
584	struct bpf_map *prog_array, *data_map;
585	struct bpf_program *prog;
586	struct bpf_object *obj;
587	char buff[128] = {};
588	LIBBPF_OPTS(bpf_test_run_opts, topts,
589		.data_in = buff,
590		.data_size_in = sizeof(buff),
591		.repeat = 1,
592	);
593
594	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
595				 &obj, &prog_fd);
596	if (CHECK_FAIL(err))
597		return;
598
599	prog = bpf_object__find_program_by_name(obj, "entry");
600	if (CHECK_FAIL(!prog))
601		goto out;
602
603	main_fd = bpf_program__fd(prog);
604	if (CHECK_FAIL(main_fd < 0))
605		goto out;
606
607	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
608	if (CHECK_FAIL(!prog_array))
609		goto out;
610
611	map_fd = bpf_map__fd(prog_array);
612	if (CHECK_FAIL(map_fd < 0))
613		goto out;
614
615	prog = bpf_object__find_program_by_name(obj, "classifier_0");
616	if (CHECK_FAIL(!prog))
617		goto out;
618
619	prog_fd = bpf_program__fd(prog);
620	if (CHECK_FAIL(prog_fd < 0))
621		goto out;
622
623	i = 0;
624	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
625	if (CHECK_FAIL(err))
626		goto out;
627
628	err = bpf_prog_test_run_opts(main_fd, &topts);
629	ASSERT_OK(err, "tailcall");
630	ASSERT_EQ(topts.retval, 1, "tailcall retval");
631
632	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
633	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
634		return;
635
636	data_fd = bpf_map__fd(data_map);
637	if (CHECK_FAIL(map_fd < 0))
638		return;
639
640	i = 0;
641	err = bpf_map_lookup_elem(data_fd, &i, &val);
642	ASSERT_OK(err, "tailcall count");
643	ASSERT_EQ(val, 33, "tailcall count");
644
645	i = 0;
646	err = bpf_map_delete_elem(map_fd, &i);
647	if (CHECK_FAIL(err))
648		goto out;
649
650	err = bpf_prog_test_run_opts(main_fd, &topts);
651	ASSERT_OK(err, "tailcall");
652	ASSERT_OK(topts.retval, "tailcall retval");
653out:
654	bpf_object__close(obj);
655}
656
657/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
658 * 256 bytes) can be used within bpf subprograms that have the tailcalls
659 * in them
660 */
661static void test_tailcall_bpf2bpf_3(void)
662{
663	int err, map_fd, prog_fd, main_fd, i;
664	struct bpf_map *prog_array;
665	struct bpf_program *prog;
666	struct bpf_object *obj;
667	char prog_name[32];
668	LIBBPF_OPTS(bpf_test_run_opts, topts,
669		.data_in = &pkt_v4,
670		.data_size_in = sizeof(pkt_v4),
671		.repeat = 1,
672	);
673
674	err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
675				 &obj, &prog_fd);
676	if (CHECK_FAIL(err))
677		return;
678
679	prog = bpf_object__find_program_by_name(obj, "entry");
680	if (CHECK_FAIL(!prog))
681		goto out;
682
683	main_fd = bpf_program__fd(prog);
684	if (CHECK_FAIL(main_fd < 0))
685		goto out;
686
687	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
688	if (CHECK_FAIL(!prog_array))
689		goto out;
690
691	map_fd = bpf_map__fd(prog_array);
692	if (CHECK_FAIL(map_fd < 0))
693		goto out;
694
695	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
696		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
697
698		prog = bpf_object__find_program_by_name(obj, prog_name);
699		if (CHECK_FAIL(!prog))
700			goto out;
701
702		prog_fd = bpf_program__fd(prog);
703		if (CHECK_FAIL(prog_fd < 0))
704			goto out;
705
706		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
707		if (CHECK_FAIL(err))
708			goto out;
709	}
710
711	err = bpf_prog_test_run_opts(main_fd, &topts);
712	ASSERT_OK(err, "tailcall");
713	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
714
715	i = 1;
716	err = bpf_map_delete_elem(map_fd, &i);
717	if (CHECK_FAIL(err))
718		goto out;
719
720	err = bpf_prog_test_run_opts(main_fd, &topts);
721	ASSERT_OK(err, "tailcall");
722	ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
723
724	i = 0;
725	err = bpf_map_delete_elem(map_fd, &i);
726	if (CHECK_FAIL(err))
727		goto out;
728
729	err = bpf_prog_test_run_opts(main_fd, &topts);
730	ASSERT_OK(err, "tailcall");
731	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
732out:
733	bpf_object__close(obj);
734}
735
736#include "tailcall_bpf2bpf4.skel.h"
737
738/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
739 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
740 * counter behaves correctly, bpf program will go through following flow:
741 *
742 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
743 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
744 * subprog2 [here bump global counter] --------^
745 *
746 * We go through first two tailcalls and start counting from the subprog2 where
747 * the loop begins. At the end of the test make sure that the global counter is
748 * equal to 31, because tailcall counter includes the first two tailcalls
749 * whereas global counter is incremented only on loop presented on flow above.
750 *
751 * The noise parameter is used to insert bpf_map_update calls into the logic
752 * to force verifier to patch instructions. This allows us to ensure jump
753 * logic remains correct with instruction movement.
754 */
755static void test_tailcall_bpf2bpf_4(bool noise)
756{
757	int err, map_fd, prog_fd, main_fd, data_fd, i;
758	struct tailcall_bpf2bpf4__bss val;
759	struct bpf_map *prog_array, *data_map;
760	struct bpf_program *prog;
761	struct bpf_object *obj;
762	char prog_name[32];
763	LIBBPF_OPTS(bpf_test_run_opts, topts,
764		.data_in = &pkt_v4,
765		.data_size_in = sizeof(pkt_v4),
766		.repeat = 1,
767	);
768
769	err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
770				 &obj, &prog_fd);
771	if (CHECK_FAIL(err))
772		return;
773
774	prog = bpf_object__find_program_by_name(obj, "entry");
775	if (CHECK_FAIL(!prog))
776		goto out;
777
778	main_fd = bpf_program__fd(prog);
779	if (CHECK_FAIL(main_fd < 0))
780		goto out;
781
782	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
783	if (CHECK_FAIL(!prog_array))
784		goto out;
785
786	map_fd = bpf_map__fd(prog_array);
787	if (CHECK_FAIL(map_fd < 0))
788		goto out;
789
790	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
791		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
792
793		prog = bpf_object__find_program_by_name(obj, prog_name);
794		if (CHECK_FAIL(!prog))
795			goto out;
796
797		prog_fd = bpf_program__fd(prog);
798		if (CHECK_FAIL(prog_fd < 0))
799			goto out;
800
801		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
802		if (CHECK_FAIL(err))
803			goto out;
804	}
805
806	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
807	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
808		return;
809
810	data_fd = bpf_map__fd(data_map);
811	if (CHECK_FAIL(map_fd < 0))
812		return;
813
814	i = 0;
815	val.noise = noise;
816	val.count = 0;
817	err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
818	if (CHECK_FAIL(err))
819		goto out;
820
821	err = bpf_prog_test_run_opts(main_fd, &topts);
822	ASSERT_OK(err, "tailcall");
823	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
824
825	i = 0;
826	err = bpf_map_lookup_elem(data_fd, &i, &val);
827	ASSERT_OK(err, "tailcall count");
828	ASSERT_EQ(val.count, 31, "tailcall count");
829
830out:
831	bpf_object__close(obj);
832}
833
834#include "tailcall_bpf2bpf6.skel.h"
835
836/* Tail call counting works even when there is data on stack which is
837 * not aligned to 8 bytes.
838 */
839static void test_tailcall_bpf2bpf_6(void)
840{
841	struct tailcall_bpf2bpf6 *obj;
842	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
843	LIBBPF_OPTS(bpf_test_run_opts, topts,
844		.data_in = &pkt_v4,
845		.data_size_in = sizeof(pkt_v4),
846		.repeat = 1,
847	);
848
849	obj = tailcall_bpf2bpf6__open_and_load();
850	if (!ASSERT_OK_PTR(obj, "open and load"))
851		return;
852
853	main_fd = bpf_program__fd(obj->progs.entry);
854	if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
855		goto out;
856
857	map_fd = bpf_map__fd(obj->maps.jmp_table);
858	if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
859		goto out;
860
861	prog_fd = bpf_program__fd(obj->progs.classifier_0);
862	if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
863		goto out;
864
865	i = 0;
866	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
867	if (!ASSERT_OK(err, "jmp_table map update"))
868		goto out;
869
870	err = bpf_prog_test_run_opts(main_fd, &topts);
871	ASSERT_OK(err, "entry prog test run");
872	ASSERT_EQ(topts.retval, 0, "tailcall retval");
873
874	data_fd = bpf_map__fd(obj->maps.bss);
875	if (!ASSERT_GE(map_fd, 0, "bss map fd"))
876		goto out;
877
878	i = 0;
879	err = bpf_map_lookup_elem(data_fd, &i, &val);
880	ASSERT_OK(err, "bss map lookup");
881	ASSERT_EQ(val, 1, "done flag is set");
882
883out:
884	tailcall_bpf2bpf6__destroy(obj);
885}
886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
887void test_tailcalls(void)
888{
889	if (test__start_subtest("tailcall_1"))
890		test_tailcall_1();
891	if (test__start_subtest("tailcall_2"))
892		test_tailcall_2();
893	if (test__start_subtest("tailcall_3"))
894		test_tailcall_3();
895	if (test__start_subtest("tailcall_4"))
896		test_tailcall_4();
897	if (test__start_subtest("tailcall_5"))
898		test_tailcall_5();
899	if (test__start_subtest("tailcall_6"))
900		test_tailcall_6();
901	if (test__start_subtest("tailcall_bpf2bpf_1"))
902		test_tailcall_bpf2bpf_1();
903	if (test__start_subtest("tailcall_bpf2bpf_2"))
904		test_tailcall_bpf2bpf_2();
905	if (test__start_subtest("tailcall_bpf2bpf_3"))
906		test_tailcall_bpf2bpf_3();
907	if (test__start_subtest("tailcall_bpf2bpf_4"))
908		test_tailcall_bpf2bpf_4(false);
909	if (test__start_subtest("tailcall_bpf2bpf_5"))
910		test_tailcall_bpf2bpf_4(true);
911	if (test__start_subtest("tailcall_bpf2bpf_6"))
912		test_tailcall_bpf2bpf_6();
 
 
 
 
 
 
 
 
 
 
913}