Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2020 Facebook */
   3#include <test_progs.h>
   4#include <unistd.h>
   5#include <sys/syscall.h>
   6#include <task_local_storage_helpers.h>
   7#include "bpf_iter_ipv6_route.skel.h"
   8#include "bpf_iter_netlink.skel.h"
   9#include "bpf_iter_bpf_map.skel.h"
  10#include "bpf_iter_tasks.skel.h"
  11#include "bpf_iter_task_stack.skel.h"
  12#include "bpf_iter_task_file.skel.h"
  13#include "bpf_iter_task_vmas.skel.h"
  14#include "bpf_iter_task_btf.skel.h"
  15#include "bpf_iter_tcp4.skel.h"
  16#include "bpf_iter_tcp6.skel.h"
  17#include "bpf_iter_udp4.skel.h"
  18#include "bpf_iter_udp6.skel.h"
  19#include "bpf_iter_unix.skel.h"
  20#include "bpf_iter_vma_offset.skel.h"
  21#include "bpf_iter_test_kern1.skel.h"
  22#include "bpf_iter_test_kern2.skel.h"
  23#include "bpf_iter_test_kern3.skel.h"
  24#include "bpf_iter_test_kern4.skel.h"
  25#include "bpf_iter_bpf_hash_map.skel.h"
  26#include "bpf_iter_bpf_percpu_hash_map.skel.h"
  27#include "bpf_iter_bpf_array_map.skel.h"
  28#include "bpf_iter_bpf_percpu_array_map.skel.h"
  29#include "bpf_iter_bpf_sk_storage_helpers.skel.h"
  30#include "bpf_iter_bpf_sk_storage_map.skel.h"
  31#include "bpf_iter_test_kern5.skel.h"
  32#include "bpf_iter_test_kern6.skel.h"
  33#include "bpf_iter_bpf_link.skel.h"
  34#include "bpf_iter_ksym.skel.h"
  35#include "bpf_iter_sockmap.skel.h"
  36
 
 
  37static void test_btf_id_or_null(void)
  38{
  39	struct bpf_iter_test_kern3 *skel;
  40
  41	skel = bpf_iter_test_kern3__open_and_load();
  42	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
  43		bpf_iter_test_kern3__destroy(skel);
  44		return;
  45	}
  46}
  47
  48static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
  49{
  50	struct bpf_link *link;
  51	char buf[16] = {};
  52	int iter_fd, len;
  53
  54	link = bpf_program__attach_iter(prog, opts);
  55	if (!ASSERT_OK_PTR(link, "attach_iter"))
  56		return;
  57
  58	iter_fd = bpf_iter_create(bpf_link__fd(link));
  59	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
  60		goto free_link;
  61
  62	/* not check contents, but ensure read() ends without error */
  63	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
  64		;
  65	ASSERT_GE(len, 0, "read");
  66
  67	close(iter_fd);
  68
  69free_link:
  70	bpf_link__destroy(link);
  71}
  72
  73static void do_dummy_read(struct bpf_program *prog)
  74{
  75	do_dummy_read_opts(prog, NULL);
  76}
  77
  78static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
  79				struct bpf_map *map)
  80{
  81	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
  82	union bpf_iter_link_info linfo;
  83	struct bpf_link *link;
  84	char buf[16] = {};
  85	int iter_fd, len;
  86
  87	memset(&linfo, 0, sizeof(linfo));
  88	linfo.map.map_fd = bpf_map__fd(map);
  89	opts.link_info = &linfo;
  90	opts.link_info_len = sizeof(linfo);
  91	link = bpf_program__attach_iter(prog, &opts);
  92	if (!ASSERT_OK_PTR(link, "attach_map_iter"))
  93		return;
  94
  95	iter_fd = bpf_iter_create(bpf_link__fd(link));
  96	if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
  97		bpf_link__destroy(link);
  98		return;
  99	}
 100
 101	/* Close link and map fd prematurely */
 102	bpf_link__destroy(link);
 103	bpf_object__destroy_skeleton(*skel);
 104	*skel = NULL;
 105
 106	/* Try to let map free work to run first if map is freed */
 107	usleep(100);
 108	/* Memory used by both sock map and sock local storage map are
 109	 * freed after two synchronize_rcu() calls, so wait for it
 110	 */
 111	kern_sync_rcu();
 112	kern_sync_rcu();
 113
 114	/* Read after both map fd and link fd are closed */
 115	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
 116		;
 117	ASSERT_GE(len, 0, "read_iterator");
 118
 119	close(iter_fd);
 120}
 121
 122static int read_fd_into_buffer(int fd, char *buf, int size)
 123{
 124	int bufleft = size;
 125	int len;
 126
 127	do {
 128		len = read(fd, buf, bufleft);
 129		if (len > 0) {
 130			buf += len;
 131			bufleft -= len;
 132		}
 133	} while (len > 0);
 134
 135	return len < 0 ? len : size - bufleft;
 136}
 137
 138static void test_ipv6_route(void)
 139{
 140	struct bpf_iter_ipv6_route *skel;
 141
 142	skel = bpf_iter_ipv6_route__open_and_load();
 143	if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
 144		return;
 145
 146	do_dummy_read(skel->progs.dump_ipv6_route);
 147
 148	bpf_iter_ipv6_route__destroy(skel);
 149}
 150
 151static void test_netlink(void)
 152{
 153	struct bpf_iter_netlink *skel;
 154
 155	skel = bpf_iter_netlink__open_and_load();
 156	if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
 157		return;
 158
 159	do_dummy_read(skel->progs.dump_netlink);
 160
 161	bpf_iter_netlink__destroy(skel);
 162}
 163
 164static void test_bpf_map(void)
 165{
 166	struct bpf_iter_bpf_map *skel;
 167
 168	skel = bpf_iter_bpf_map__open_and_load();
 169	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
 170		return;
 171
 172	do_dummy_read(skel->progs.dump_bpf_map);
 173
 174	bpf_iter_bpf_map__destroy(skel);
 175}
 176
 177static void check_bpf_link_info(const struct bpf_program *prog)
 178{
 179	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 180	union bpf_iter_link_info linfo;
 181	struct bpf_link_info info = {};
 182	struct bpf_link *link;
 183	__u32 info_len;
 184	int err;
 185
 186	memset(&linfo, 0, sizeof(linfo));
 187	linfo.task.tid = getpid();
 188	opts.link_info = &linfo;
 189	opts.link_info_len = sizeof(linfo);
 190
 191	link = bpf_program__attach_iter(prog, &opts);
 192	if (!ASSERT_OK_PTR(link, "attach_iter"))
 193		return;
 194
 195	info_len = sizeof(info);
 196	err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
 197	ASSERT_OK(err, "bpf_link_get_info_by_fd");
 198	ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
 199
 200	bpf_link__destroy(link);
 201}
 202
 203static pthread_mutex_t do_nothing_mutex;
 204
 205static void *do_nothing_wait(void *arg)
 206{
 207	pthread_mutex_lock(&do_nothing_mutex);
 208	pthread_mutex_unlock(&do_nothing_mutex);
 209
 210	pthread_exit(arg);
 211}
 212
 213static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
 214				     int *num_unknown, int *num_known)
 215{
 216	struct bpf_iter_tasks *skel;
 217	pthread_t thread_id;
 218	void *ret;
 219
 220	skel = bpf_iter_tasks__open_and_load();
 221	if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load"))
 222		return;
 223
 224	ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
 225
 226	ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
 227		  "pthread_create");
 228
 229	skel->bss->tid = sys_gettid();
 230
 231	do_dummy_read_opts(skel->progs.dump_task, opts);
 232
 233	*num_unknown = skel->bss->num_unknown_tid;
 234	*num_known = skel->bss->num_known_tid;
 235
 236	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
 237	ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
 238		     "pthread_join");
 239
 240	bpf_iter_tasks__destroy(skel);
 241}
 242
 243static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
 244{
 245	int num_unknown_tid, num_known_tid;
 246
 247	test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
 248	ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
 249	ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
 250}
 251
 252static void *run_test_task_tid(void *arg)
 253{
 254	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 255	union bpf_iter_link_info linfo;
 256	int num_unknown_tid, num_known_tid;
 257
 258	ASSERT_NEQ(getpid(), sys_gettid(), "check_new_thread_id");
 259
 260	memset(&linfo, 0, sizeof(linfo));
 261	linfo.task.tid = sys_gettid();
 262	opts.link_info = &linfo;
 263	opts.link_info_len = sizeof(linfo);
 264	test_task_common(&opts, 0, 1);
 265
 266	linfo.task.tid = 0;
 267	linfo.task.pid = getpid();
 268	/* This includes the parent thread, this thread, watchdog timer thread
 269	 * and the do_nothing_wait thread
 270	 */
 271	test_task_common(&opts, 3, 1);
 272
 273	test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
 274	ASSERT_GT(num_unknown_tid, 2, "check_num_unknown_tid");
 275	ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
 276
 277	return NULL;
 278}
 279
 280static void test_task_tid(void)
 281{
 282	pthread_t thread_id;
 283
 284	/* Create a new thread so pid and tid aren't the same */
 285	ASSERT_OK(pthread_create(&thread_id, NULL, &run_test_task_tid, NULL),
 286		  "pthread_create");
 287	ASSERT_FALSE(pthread_join(thread_id, NULL), "pthread_join");
 288}
 289
 290static void test_task_pid(void)
 291{
 292	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 293	union bpf_iter_link_info linfo;
 294
 295	memset(&linfo, 0, sizeof(linfo));
 296	linfo.task.pid = getpid();
 297	opts.link_info = &linfo;
 298	opts.link_info_len = sizeof(linfo);
 299
 300	test_task_common(&opts, 2, 1);
 301}
 302
 303static void test_task_pidfd(void)
 304{
 305	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 306	union bpf_iter_link_info linfo;
 307	int pidfd;
 308
 309	pidfd = sys_pidfd_open(getpid(), 0);
 310	if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open"))
 311		return;
 312
 313	memset(&linfo, 0, sizeof(linfo));
 314	linfo.task.pid_fd = pidfd;
 315	opts.link_info = &linfo;
 316	opts.link_info_len = sizeof(linfo);
 317
 318	test_task_common(&opts, 2, 1);
 319
 320	close(pidfd);
 321}
 322
 323static void test_task_sleepable(void)
 324{
 325	struct bpf_iter_tasks *skel;
 326
 327	skel = bpf_iter_tasks__open_and_load();
 328	if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load"))
 329		return;
 330
 331	do_dummy_read(skel->progs.dump_task_sleepable);
 332
 333	ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
 334		  "num_expected_failure_copy_from_user_task");
 335	ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
 336		  "num_success_copy_from_user_task");
 337
 338	bpf_iter_tasks__destroy(skel);
 339}
 340
 341static void test_task_stack(void)
 342{
 343	struct bpf_iter_task_stack *skel;
 344
 345	skel = bpf_iter_task_stack__open_and_load();
 346	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
 347		return;
 348
 349	do_dummy_read(skel->progs.dump_task_stack);
 350	do_dummy_read(skel->progs.get_task_user_stacks);
 351
 352	ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
 353
 354	bpf_iter_task_stack__destroy(skel);
 355}
 356
 357static void test_task_file(void)
 358{
 359	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 360	struct bpf_iter_task_file *skel;
 361	union bpf_iter_link_info linfo;
 362	pthread_t thread_id;
 363	void *ret;
 364
 365	skel = bpf_iter_task_file__open_and_load();
 366	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
 367		return;
 368
 369	skel->bss->tgid = getpid();
 370
 371	ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
 372
 373	ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
 374		  "pthread_create");
 375
 376	memset(&linfo, 0, sizeof(linfo));
 377	linfo.task.tid = getpid();
 378	opts.link_info = &linfo;
 379	opts.link_info_len = sizeof(linfo);
 380
 381	do_dummy_read_opts(skel->progs.dump_task_file, &opts);
 382
 383	ASSERT_EQ(skel->bss->count, 0, "check_count");
 384	ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
 385
 386	skel->bss->last_tgid = 0;
 387	skel->bss->count = 0;
 388	skel->bss->unique_tgid_count = 0;
 389
 390	do_dummy_read(skel->progs.dump_task_file);
 391
 392	ASSERT_EQ(skel->bss->count, 0, "check_count");
 393	ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
 394
 395	check_bpf_link_info(skel->progs.dump_task_file);
 396
 397	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
 398	ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
 399	ASSERT_NULL(ret, "pthread_join");
 400
 401	bpf_iter_task_file__destroy(skel);
 402}
 403
 404#define TASKBUFSZ		32768
 405
 406static char taskbuf[TASKBUFSZ];
 407
 408static int do_btf_read(struct bpf_iter_task_btf *skel)
 409{
 410	struct bpf_program *prog = skel->progs.dump_task_struct;
 411	struct bpf_iter_task_btf__bss *bss = skel->bss;
 412	int iter_fd = -1, err;
 413	struct bpf_link *link;
 414	char *buf = taskbuf;
 415	int ret = 0;
 416
 417	link = bpf_program__attach_iter(prog, NULL);
 418	if (!ASSERT_OK_PTR(link, "attach_iter"))
 419		return ret;
 420
 421	iter_fd = bpf_iter_create(bpf_link__fd(link));
 422	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 423		goto free_link;
 424
 425	err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
 426	if (bss->skip) {
 427		printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
 428		ret = 1;
 429		test__skip();
 430		goto free_link;
 431	}
 432
 433	if (!ASSERT_GE(err, 0, "read"))
 434		goto free_link;
 435
 436	ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
 437	      "check for btf representation of task_struct in iter data");
 438free_link:
 439	if (iter_fd > 0)
 440		close(iter_fd);
 441	bpf_link__destroy(link);
 442	return ret;
 443}
 444
 445static void test_task_btf(void)
 446{
 447	struct bpf_iter_task_btf__bss *bss;
 448	struct bpf_iter_task_btf *skel;
 449	int ret;
 450
 451	skel = bpf_iter_task_btf__open_and_load();
 452	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
 453		return;
 454
 455	bss = skel->bss;
 456
 457	ret = do_btf_read(skel);
 458	if (ret)
 459		goto cleanup;
 460
 461	if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
 462		goto cleanup;
 463
 464	ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
 465
 466cleanup:
 467	bpf_iter_task_btf__destroy(skel);
 468}
 469
 470static void test_tcp4(void)
 471{
 472	struct bpf_iter_tcp4 *skel;
 473
 474	skel = bpf_iter_tcp4__open_and_load();
 475	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
 476		return;
 477
 478	do_dummy_read(skel->progs.dump_tcp4);
 479
 480	bpf_iter_tcp4__destroy(skel);
 481}
 482
 483static void test_tcp6(void)
 484{
 485	struct bpf_iter_tcp6 *skel;
 486
 487	skel = bpf_iter_tcp6__open_and_load();
 488	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
 489		return;
 490
 491	do_dummy_read(skel->progs.dump_tcp6);
 492
 493	bpf_iter_tcp6__destroy(skel);
 494}
 495
 496static void test_udp4(void)
 497{
 498	struct bpf_iter_udp4 *skel;
 499
 500	skel = bpf_iter_udp4__open_and_load();
 501	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
 502		return;
 503
 504	do_dummy_read(skel->progs.dump_udp4);
 505
 506	bpf_iter_udp4__destroy(skel);
 507}
 508
 509static void test_udp6(void)
 510{
 511	struct bpf_iter_udp6 *skel;
 512
 513	skel = bpf_iter_udp6__open_and_load();
 514	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
 515		return;
 516
 517	do_dummy_read(skel->progs.dump_udp6);
 518
 519	bpf_iter_udp6__destroy(skel);
 520}
 521
 522static void test_unix(void)
 523{
 524	struct bpf_iter_unix *skel;
 525
 526	skel = bpf_iter_unix__open_and_load();
 527	if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
 528		return;
 529
 530	do_dummy_read(skel->progs.dump_unix);
 531
 532	bpf_iter_unix__destroy(skel);
 533}
 534
 535/* The expected string is less than 16 bytes */
 536static int do_read_with_fd(int iter_fd, const char *expected,
 537			   bool read_one_char)
 538{
 539	int len, read_buf_len, start;
 540	char buf[16] = {};
 541
 542	read_buf_len = read_one_char ? 1 : 16;
 543	start = 0;
 544	while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
 545		start += len;
 546		if (!ASSERT_LT(start, 16, "read"))
 547			return -1;
 548		read_buf_len = read_one_char ? 1 : 16 - start;
 549	}
 550	if (!ASSERT_GE(len, 0, "read"))
 551		return -1;
 552
 553	if (!ASSERT_STREQ(buf, expected, "read"))
 554		return -1;
 555
 556	return 0;
 557}
 558
 559static void test_anon_iter(bool read_one_char)
 560{
 561	struct bpf_iter_test_kern1 *skel;
 562	struct bpf_link *link;
 563	int iter_fd, err;
 564
 565	skel = bpf_iter_test_kern1__open_and_load();
 566	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
 567		return;
 568
 569	err = bpf_iter_test_kern1__attach(skel);
 570	if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
 571		goto out;
 572	}
 573
 574	link = skel->links.dump_task;
 575	iter_fd = bpf_iter_create(bpf_link__fd(link));
 576	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 577		goto out;
 578
 579	do_read_with_fd(iter_fd, "abcd", read_one_char);
 580	close(iter_fd);
 581
 582out:
 583	bpf_iter_test_kern1__destroy(skel);
 584}
 585
 586static int do_read(const char *path, const char *expected)
 587{
 588	int err, iter_fd;
 589
 590	iter_fd = open(path, O_RDONLY);
 591	if (!ASSERT_GE(iter_fd, 0, "open"))
 
 592		return -1;
 593
 594	err = do_read_with_fd(iter_fd, expected, false);
 595	close(iter_fd);
 596	return err;
 597}
 598
 599static void test_file_iter(void)
 600{
 601	const char *path = "/sys/fs/bpf/bpf_iter_test1";
 602	struct bpf_iter_test_kern1 *skel1;
 603	struct bpf_iter_test_kern2 *skel2;
 604	struct bpf_link *link;
 605	int err;
 606
 607	skel1 = bpf_iter_test_kern1__open_and_load();
 608	if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
 609		return;
 610
 611	link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
 612	if (!ASSERT_OK_PTR(link, "attach_iter"))
 613		goto out;
 614
 615	/* unlink this path if it exists. */
 616	unlink(path);
 617
 618	err = bpf_link__pin(link, path);
 619	if (!ASSERT_OK(err, "pin_iter"))
 620		goto free_link;
 621
 622	err = do_read(path, "abcd");
 623	if (err)
 624		goto unlink_path;
 625
 626	/* file based iterator seems working fine. Let us a link update
 627	 * of the underlying link and `cat` the iterator again, its content
 628	 * should change.
 629	 */
 630	skel2 = bpf_iter_test_kern2__open_and_load();
 631	if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
 632		goto unlink_path;
 633
 634	err = bpf_link__update_program(link, skel2->progs.dump_task);
 635	if (!ASSERT_OK(err, "update_prog"))
 636		goto destroy_skel2;
 637
 638	do_read(path, "ABCD");
 639
 640destroy_skel2:
 641	bpf_iter_test_kern2__destroy(skel2);
 642unlink_path:
 643	unlink(path);
 644free_link:
 645	bpf_link__destroy(link);
 646out:
 647	bpf_iter_test_kern1__destroy(skel1);
 648}
 649
 650static void test_overflow(bool test_e2big_overflow, bool ret1)
 651{
 652	__u32 map_info_len, total_read_len, expected_read_len;
 653	int err, iter_fd, map1_fd, map2_fd, len;
 654	struct bpf_map_info map_info = {};
 655	struct bpf_iter_test_kern4 *skel;
 656	struct bpf_link *link;
 657	__u32 iter_size;
 658	char *buf;
 659
 660	skel = bpf_iter_test_kern4__open();
 661	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
 662		return;
 663
 664	/* create two maps: bpf program will only do bpf_seq_write
 665	 * for these two maps. The goal is one map output almost
 666	 * fills seq_file buffer and then the other will trigger
 667	 * overflow and needs restart.
 668	 */
 669	map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
 670	if (!ASSERT_GE(map1_fd, 0, "bpf_map_create"))
 
 671		goto out;
 672	map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
 673	if (!ASSERT_GE(map2_fd, 0, "bpf_map_create"))
 
 674		goto free_map1;
 675
 676	/* bpf_seq_printf kernel buffer is 8 pages, so one map
 677	 * bpf_seq_write will mostly fill it, and the other map
 678	 * will partially fill and then trigger overflow and need
 679	 * bpf_seq_read restart.
 680	 */
 681	iter_size = sysconf(_SC_PAGE_SIZE) << 3;
 682
 683	if (test_e2big_overflow) {
 684		skel->rodata->print_len = (iter_size + 8) / 8;
 685		expected_read_len = 2 * (iter_size + 8);
 686	} else if (!ret1) {
 687		skel->rodata->print_len = (iter_size - 8) / 8;
 688		expected_read_len = 2 * (iter_size - 8);
 689	} else {
 690		skel->rodata->print_len = 1;
 691		expected_read_len = 2 * 8;
 692	}
 693	skel->rodata->ret1 = ret1;
 694
 695	if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
 696		  "bpf_iter_test_kern4__load"))
 697		goto free_map2;
 698
 699	/* setup filtering map_id in bpf program */
 700	map_info_len = sizeof(map_info);
 701	err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
 702	if (!ASSERT_OK(err, "get_map_info"))
 
 703		goto free_map2;
 704	skel->bss->map1_id = map_info.id;
 705
 706	err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
 707	if (!ASSERT_OK(err, "get_map_info"))
 
 708		goto free_map2;
 709	skel->bss->map2_id = map_info.id;
 710
 711	link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
 712	if (!ASSERT_OK_PTR(link, "attach_iter"))
 713		goto free_map2;
 714
 715	iter_fd = bpf_iter_create(bpf_link__fd(link));
 716	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 717		goto free_link;
 718
 719	buf = malloc(expected_read_len);
 720	if (!ASSERT_OK_PTR(buf, "malloc"))
 721		goto close_iter;
 722
 723	/* do read */
 724	total_read_len = 0;
 725	if (test_e2big_overflow) {
 726		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
 727			total_read_len += len;
 728
 729		ASSERT_EQ(len, -1, "read");
 730		ASSERT_EQ(errno, E2BIG, "read");
 
 731		goto free_buf;
 732	} else if (!ret1) {
 733		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
 734			total_read_len += len;
 735
 736		if (!ASSERT_GE(len, 0, "read"))
 
 737			goto free_buf;
 738	} else {
 739		do {
 740			len = read(iter_fd, buf, expected_read_len);
 741			if (len > 0)
 742				total_read_len += len;
 743		} while (len > 0 || len == -EAGAIN);
 744
 745		if (!ASSERT_GE(len, 0, "read"))
 
 746			goto free_buf;
 747	}
 748
 749	if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
 750		goto free_buf;
 751
 752	if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
 753		goto free_buf;
 754
 755	if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
 756		goto free_buf;
 757
 758	ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
 759
 760free_buf:
 761	free(buf);
 762close_iter:
 763	close(iter_fd);
 764free_link:
 765	bpf_link__destroy(link);
 766free_map2:
 767	close(map2_fd);
 768free_map1:
 769	close(map1_fd);
 770out:
 771	bpf_iter_test_kern4__destroy(skel);
 772}
 773
 774static void test_bpf_hash_map(void)
 775{
 776	__u32 expected_key_a = 0, expected_key_b = 0;
 777	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 778	struct bpf_iter_bpf_hash_map *skel;
 779	int err, i, len, map_fd, iter_fd;
 780	union bpf_iter_link_info linfo;
 781	__u64 val, expected_val = 0;
 782	struct bpf_link *link;
 783	struct key_t {
 784		int a;
 785		int b;
 786		int c;
 787	} key;
 788	char buf[64];
 789
 790	skel = bpf_iter_bpf_hash_map__open();
 791	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
 792		return;
 793
 794	skel->bss->in_test_mode = true;
 795
 796	err = bpf_iter_bpf_hash_map__load(skel);
 797	if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
 798		goto out;
 799
 800	/* iterator with hashmap2 and hashmap3 should fail */
 801	memset(&linfo, 0, sizeof(linfo));
 802	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
 803	opts.link_info = &linfo;
 804	opts.link_info_len = sizeof(linfo);
 805	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
 806	if (!ASSERT_ERR_PTR(link, "attach_iter"))
 807		goto out;
 808
 809	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
 810	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
 811	if (!ASSERT_ERR_PTR(link, "attach_iter"))
 812		goto out;
 813
 814	/* hashmap1 should be good, update map values here */
 815	map_fd = bpf_map__fd(skel->maps.hashmap1);
 816	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
 817		key.a = i + 1;
 818		key.b = i + 2;
 819		key.c = i + 3;
 820		val = i + 4;
 821		expected_key_a += key.a;
 822		expected_key_b += key.b;
 823		expected_val += val;
 824
 825		err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
 826		if (!ASSERT_OK(err, "map_update"))
 827			goto out;
 828	}
 829
 830	/* Sleepable program is prohibited for hash map iterator */
 831	linfo.map.map_fd = map_fd;
 832	link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
 833	if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
 834		goto out;
 835
 836	linfo.map.map_fd = map_fd;
 837	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
 838	if (!ASSERT_OK_PTR(link, "attach_iter"))
 839		goto out;
 840
 841	iter_fd = bpf_iter_create(bpf_link__fd(link));
 842	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 843		goto free_link;
 844
 845	/* do some tests */
 846	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
 847		;
 848	if (!ASSERT_GE(len, 0, "read"))
 849		goto close_iter;
 850
 851	/* test results */
 852	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
 853		goto close_iter;
 854	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
 855		goto close_iter;
 856	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
 857		goto close_iter;
 858
 859close_iter:
 860	close(iter_fd);
 861free_link:
 862	bpf_link__destroy(link);
 863out:
 864	bpf_iter_bpf_hash_map__destroy(skel);
 865}
 866
 867static void test_bpf_percpu_hash_map(void)
 868{
 869	__u32 expected_key_a = 0, expected_key_b = 0;
 870	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 871	struct bpf_iter_bpf_percpu_hash_map *skel;
 872	int err, i, j, len, map_fd, iter_fd;
 873	union bpf_iter_link_info linfo;
 874	__u32 expected_val = 0;
 875	struct bpf_link *link;
 876	struct key_t {
 877		int a;
 878		int b;
 879		int c;
 880	} key;
 881	char buf[64];
 882	void *val;
 883
 884	skel = bpf_iter_bpf_percpu_hash_map__open();
 885	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
 886		return;
 887
 888	skel->rodata->num_cpus = bpf_num_possible_cpus();
 889	val = malloc(8 * bpf_num_possible_cpus());
 890	if (!ASSERT_OK_PTR(val, "malloc"))
 891		goto out;
 892
 893	err = bpf_iter_bpf_percpu_hash_map__load(skel);
 894	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
 895		goto out;
 896
 897	/* update map values here */
 898	map_fd = bpf_map__fd(skel->maps.hashmap1);
 899	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
 900		key.a = i + 1;
 901		key.b = i + 2;
 902		key.c = i + 3;
 903		expected_key_a += key.a;
 904		expected_key_b += key.b;
 905
 906		for (j = 0; j < bpf_num_possible_cpus(); j++) {
 907			*(__u32 *)(val + j * 8) = i + j;
 908			expected_val += i + j;
 909		}
 910
 911		err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
 912		if (!ASSERT_OK(err, "map_update"))
 913			goto out;
 914	}
 915
 916	memset(&linfo, 0, sizeof(linfo));
 917	linfo.map.map_fd = map_fd;
 918	opts.link_info = &linfo;
 919	opts.link_info_len = sizeof(linfo);
 920	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
 921	if (!ASSERT_OK_PTR(link, "attach_iter"))
 922		goto out;
 923
 924	iter_fd = bpf_iter_create(bpf_link__fd(link));
 925	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 926		goto free_link;
 927
 928	/* do some tests */
 929	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
 930		;
 931	if (!ASSERT_GE(len, 0, "read"))
 932		goto close_iter;
 933
 934	/* test results */
 935	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
 936		goto close_iter;
 937	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
 938		goto close_iter;
 939	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
 940		goto close_iter;
 941
 942close_iter:
 943	close(iter_fd);
 944free_link:
 945	bpf_link__destroy(link);
 946out:
 947	bpf_iter_bpf_percpu_hash_map__destroy(skel);
 948	free(val);
 949}
 950
 951static void test_bpf_array_map(void)
 952{
 953	__u64 val, expected_val = 0, res_first_val, first_val = 0;
 954	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 955	__u32 key, expected_key = 0, res_first_key;
 956	int err, i, map_fd, hash_fd, iter_fd;
 957	struct bpf_iter_bpf_array_map *skel;
 958	union bpf_iter_link_info linfo;
 959	struct bpf_link *link;
 960	char buf[64] = {};
 961	int len, start;
 962
 963	skel = bpf_iter_bpf_array_map__open_and_load();
 964	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
 965		return;
 966
 967	map_fd = bpf_map__fd(skel->maps.arraymap1);
 968	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
 969		val = i + 4;
 970		expected_key += i;
 971		expected_val += val;
 972
 973		if (i == 0)
 974			first_val = val;
 975
 976		err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
 977		if (!ASSERT_OK(err, "map_update"))
 978			goto out;
 979	}
 980
 981	memset(&linfo, 0, sizeof(linfo));
 982	linfo.map.map_fd = map_fd;
 983	opts.link_info = &linfo;
 984	opts.link_info_len = sizeof(linfo);
 985	link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
 986	if (!ASSERT_OK_PTR(link, "attach_iter"))
 987		goto out;
 988
 989	iter_fd = bpf_iter_create(bpf_link__fd(link));
 990	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 991		goto free_link;
 992
 993	/* do some tests */
 994	start = 0;
 995	while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
 996		start += len;
 997	if (!ASSERT_GE(len, 0, "read"))
 998		goto close_iter;
 999
1000	/* test results */
1001	res_first_key = *(__u32 *)buf;
1002	res_first_val = *(__u64 *)(buf + sizeof(__u32));
1003	if (!ASSERT_EQ(res_first_key, 0, "bpf_seq_write") ||
1004			!ASSERT_EQ(res_first_val, first_val, "bpf_seq_write"))
 
 
 
1005		goto close_iter;
1006
1007	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1008		goto close_iter;
1009	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1010		goto close_iter;
1011
1012	hash_fd = bpf_map__fd(skel->maps.hashmap1);
1013	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1014		err = bpf_map_lookup_elem(map_fd, &i, &val);
1015		if (!ASSERT_OK(err, "map_lookup arraymap1"))
1016			goto close_iter;
1017		if (!ASSERT_EQ(i, val, "invalid_val arraymap1"))
1018			goto close_iter;
1019
1020		val = i + 4;
1021		err = bpf_map_lookup_elem(hash_fd, &val, &key);
1022		if (!ASSERT_OK(err, "map_lookup hashmap1"))
1023			goto close_iter;
1024		if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1"))
1025			goto close_iter;
1026	}
1027
1028close_iter:
1029	close(iter_fd);
1030free_link:
1031	bpf_link__destroy(link);
1032out:
1033	bpf_iter_bpf_array_map__destroy(skel);
1034}
1035
1036static void test_bpf_array_map_iter_fd(void)
1037{
1038	struct bpf_iter_bpf_array_map *skel;
1039
1040	skel = bpf_iter_bpf_array_map__open_and_load();
1041	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
1042		return;
1043
1044	do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
1045			    skel->maps.arraymap1);
1046
1047	bpf_iter_bpf_array_map__destroy(skel);
1048}
1049
1050static void test_bpf_percpu_array_map(void)
1051{
1052	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1053	struct bpf_iter_bpf_percpu_array_map *skel;
1054	__u32 expected_key = 0, expected_val = 0;
1055	union bpf_iter_link_info linfo;
1056	int err, i, j, map_fd, iter_fd;
1057	struct bpf_link *link;
1058	char buf[64];
1059	void *val;
1060	int len;
1061
1062	skel = bpf_iter_bpf_percpu_array_map__open();
1063	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
1064		return;
1065
1066	skel->rodata->num_cpus = bpf_num_possible_cpus();
1067	val = malloc(8 * bpf_num_possible_cpus());
1068	if (!ASSERT_OK_PTR(val, "malloc"))
1069		goto out;
1070
1071	err = bpf_iter_bpf_percpu_array_map__load(skel);
1072	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
1073		goto out;
1074
1075	/* update map values here */
1076	map_fd = bpf_map__fd(skel->maps.arraymap1);
1077	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1078		expected_key += i;
1079
1080		for (j = 0; j < bpf_num_possible_cpus(); j++) {
1081			*(__u32 *)(val + j * 8) = i + j;
1082			expected_val += i + j;
1083		}
1084
1085		err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
1086		if (!ASSERT_OK(err, "map_update"))
1087			goto out;
1088	}
1089
1090	memset(&linfo, 0, sizeof(linfo));
1091	linfo.map.map_fd = map_fd;
1092	opts.link_info = &linfo;
1093	opts.link_info_len = sizeof(linfo);
1094	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
1095	if (!ASSERT_OK_PTR(link, "attach_iter"))
1096		goto out;
1097
1098	iter_fd = bpf_iter_create(bpf_link__fd(link));
1099	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1100		goto free_link;
1101
1102	/* do some tests */
1103	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1104		;
1105	if (!ASSERT_GE(len, 0, "read"))
1106		goto close_iter;
1107
1108	/* test results */
1109	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1110		goto close_iter;
1111	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1112		goto close_iter;
1113
1114close_iter:
1115	close(iter_fd);
1116free_link:
1117	bpf_link__destroy(link);
1118out:
1119	bpf_iter_bpf_percpu_array_map__destroy(skel);
1120	free(val);
1121}
1122
1123/* An iterator program deletes all local storage in a map. */
1124static void test_bpf_sk_storage_delete(void)
1125{
1126	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1127	struct bpf_iter_bpf_sk_storage_helpers *skel;
1128	union bpf_iter_link_info linfo;
1129	int err, len, map_fd, iter_fd;
1130	struct bpf_link *link;
1131	int sock_fd = -1;
1132	__u32 val = 42;
1133	char buf[64];
1134
1135	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1136	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1137		return;
1138
1139	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1140
1141	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1142	if (!ASSERT_GE(sock_fd, 0, "socket"))
1143		goto out;
1144
1145	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1146	if (!ASSERT_OK(err, "map_update"))
1147		goto out;
1148
1149	memset(&linfo, 0, sizeof(linfo));
1150	linfo.map.map_fd = map_fd;
1151	opts.link_info = &linfo;
1152	opts.link_info_len = sizeof(linfo);
1153	link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
1154					&opts);
1155	if (!ASSERT_OK_PTR(link, "attach_iter"))
1156		goto out;
1157
1158	iter_fd = bpf_iter_create(bpf_link__fd(link));
1159	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1160		goto free_link;
1161
1162	/* do some tests */
1163	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1164		;
1165	if (!ASSERT_GE(len, 0, "read"))
1166		goto close_iter;
1167
1168	/* test results */
1169	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1170
1171	 /* Note: The following assertions serve to ensure
1172	  * the value was deleted. It does so by asserting
1173	  * that bpf_map_lookup_elem has failed. This might
1174	  * seem counterintuitive at first.
1175	  */
1176	ASSERT_ERR(err, "bpf_map_lookup_elem");
1177	ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem");
1178
1179close_iter:
1180	close(iter_fd);
1181free_link:
1182	bpf_link__destroy(link);
1183out:
1184	if (sock_fd >= 0)
1185		close(sock_fd);
1186	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1187}
1188
1189/* This creates a socket and its local storage. It then runs a task_iter BPF
1190 * program that replaces the existing socket local storage with the tgid of the
1191 * only task owning a file descriptor to this socket, this process, prog_tests.
1192 * It then runs a tcp socket iterator that negates the value in the existing
1193 * socket local storage, the test verifies that the resulting value is -pid.
1194 */
1195static void test_bpf_sk_storage_get(void)
1196{
1197	struct bpf_iter_bpf_sk_storage_helpers *skel;
1198	int err, map_fd, val = -1;
1199	int sock_fd = -1;
1200
1201	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1202	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1203		return;
1204
1205	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1206	if (!ASSERT_GE(sock_fd, 0, "socket"))
1207		goto out;
1208
1209	err = listen(sock_fd, 1);
1210	if (!ASSERT_OK(err, "listen"))
1211		goto close_socket;
1212
1213	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1214
1215	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1216	if (!ASSERT_OK(err, "bpf_map_update_elem"))
1217		goto close_socket;
1218
1219	do_dummy_read(skel->progs.fill_socket_owner);
1220
1221	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1222	if (!ASSERT_OK(err, "bpf_map_lookup_elem") ||
1223			!ASSERT_EQ(val, getpid(), "bpf_map_lookup_elem"))
 
1224		goto close_socket;
1225
1226	do_dummy_read(skel->progs.negate_socket_local_storage);
1227
1228	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1229	ASSERT_OK(err, "bpf_map_lookup_elem");
1230	ASSERT_EQ(val, -getpid(), "bpf_map_lookup_elem");
 
1231
1232close_socket:
1233	close(sock_fd);
1234out:
1235	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1236}
1237
1238static void test_bpf_sk_storage_map_iter_fd(void)
1239{
1240	struct bpf_iter_bpf_sk_storage_map *skel;
1241
1242	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1243	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1244		return;
1245
1246	do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
1247			    skel->maps.sk_stg_map);
1248
1249	bpf_iter_bpf_sk_storage_map__destroy(skel);
1250}
1251
1252static void test_bpf_sk_storage_map(void)
1253{
1254	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1255	int err, i, len, map_fd, iter_fd, num_sockets;
1256	struct bpf_iter_bpf_sk_storage_map *skel;
1257	union bpf_iter_link_info linfo;
1258	int sock_fd[3] = {-1, -1, -1};
1259	__u32 val, expected_val = 0;
1260	struct bpf_link *link;
1261	char buf[64];
1262
1263	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1264	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1265		return;
1266
1267	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1268	num_sockets = ARRAY_SIZE(sock_fd);
1269	for (i = 0; i < num_sockets; i++) {
1270		sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1271		if (!ASSERT_GE(sock_fd[i], 0, "socket"))
1272			goto out;
1273
1274		val = i + 1;
1275		expected_val += val;
1276
1277		err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1278					  BPF_NOEXIST);
1279		if (!ASSERT_OK(err, "map_update"))
1280			goto out;
1281	}
1282
1283	memset(&linfo, 0, sizeof(linfo));
1284	linfo.map.map_fd = map_fd;
1285	opts.link_info = &linfo;
1286	opts.link_info_len = sizeof(linfo);
1287	link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
1288	err = libbpf_get_error(link);
1289	if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
1290		if (!err)
1291			bpf_link__destroy(link);
1292		goto out;
1293	}
1294
1295	link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
1296	if (!ASSERT_OK_PTR(link, "attach_iter"))
1297		goto out;
1298
1299	iter_fd = bpf_iter_create(bpf_link__fd(link));
1300	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1301		goto free_link;
1302
1303	skel->bss->to_add_val = time(NULL);
1304	/* do some tests */
1305	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1306		;
1307	if (!ASSERT_GE(len, 0, "read"))
1308		goto close_iter;
1309
1310	/* test results */
1311	if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
1312		goto close_iter;
1313
1314	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1315		goto close_iter;
1316
1317	for (i = 0; i < num_sockets; i++) {
1318		err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
1319		if (!ASSERT_OK(err, "map_lookup") ||
1320		    !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
1321			break;
1322	}
1323
1324close_iter:
1325	close(iter_fd);
1326free_link:
1327	bpf_link__destroy(link);
1328out:
1329	for (i = 0; i < num_sockets; i++) {
1330		if (sock_fd[i] >= 0)
1331			close(sock_fd[i]);
1332	}
1333	bpf_iter_bpf_sk_storage_map__destroy(skel);
1334}
1335
1336static void test_rdonly_buf_out_of_bound(void)
1337{
1338	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1339	struct bpf_iter_test_kern5 *skel;
1340	union bpf_iter_link_info linfo;
1341	struct bpf_link *link;
1342
1343	skel = bpf_iter_test_kern5__open_and_load();
1344	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
1345		return;
1346
1347	memset(&linfo, 0, sizeof(linfo));
1348	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1349	opts.link_info = &linfo;
1350	opts.link_info_len = sizeof(linfo);
1351	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1352	if (!ASSERT_ERR_PTR(link, "attach_iter"))
1353		bpf_link__destroy(link);
1354
1355	bpf_iter_test_kern5__destroy(skel);
1356}
1357
1358static void test_buf_neg_offset(void)
1359{
1360	struct bpf_iter_test_kern6 *skel;
1361
1362	skel = bpf_iter_test_kern6__open_and_load();
1363	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
1364		bpf_iter_test_kern6__destroy(skel);
1365}
1366
1367static void test_link_iter(void)
1368{
1369	struct bpf_iter_bpf_link *skel;
1370
1371	skel = bpf_iter_bpf_link__open_and_load();
1372	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
1373		return;
1374
1375	do_dummy_read(skel->progs.dump_bpf_link);
1376
1377	bpf_iter_bpf_link__destroy(skel);
1378}
1379
1380static void test_ksym_iter(void)
1381{
1382	struct bpf_iter_ksym *skel;
1383
1384	skel = bpf_iter_ksym__open_and_load();
1385	if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
1386		return;
1387
1388	do_dummy_read(skel->progs.dump_ksym);
1389
1390	bpf_iter_ksym__destroy(skel);
1391}
1392
1393#define CMP_BUFFER_SIZE 1024
1394static char task_vma_output[CMP_BUFFER_SIZE];
1395static char proc_maps_output[CMP_BUFFER_SIZE];
1396
1397/* remove \0 and \t from str, and only keep the first line */
1398static void str_strip_first_line(char *str)
1399{
1400	char *dst = str, *src = str;
1401
1402	do {
1403		if (*src == ' ' || *src == '\t')
1404			src++;
1405		else
1406			*(dst++) = *(src++);
1407
1408	} while (*src != '\0' && *src != '\n');
1409
1410	*dst = '\0';
1411}
1412
1413static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
1414{
1415	int err, iter_fd = -1, proc_maps_fd = -1;
1416	struct bpf_iter_task_vmas *skel;
1417	int len, read_size = 4;
1418	char maps_path[64];
1419
1420	skel = bpf_iter_task_vmas__open();
1421	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
1422		return;
1423
1424	skel->bss->pid = getpid();
1425	skel->bss->one_task = opts ? 1 : 0;
1426
1427	err = bpf_iter_task_vmas__load(skel);
1428	if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
1429		goto out;
1430
1431	skel->links.proc_maps = bpf_program__attach_iter(
1432		skel->progs.proc_maps, opts);
1433
1434	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1435		skel->links.proc_maps = NULL;
1436		goto out;
1437	}
1438
1439	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1440	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1441		goto out;
1442
1443	/* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1444	 * to trigger seq_file corner cases.
1445	 */
1446	len = 0;
1447	while (len < CMP_BUFFER_SIZE) {
1448		err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1449					  MIN(read_size, CMP_BUFFER_SIZE - len));
1450		if (!err)
1451			break;
1452		if (!ASSERT_GE(err, 0, "read_iter_fd"))
1453			goto out;
1454		len += err;
1455	}
1456	if (opts)
1457		ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task");
1458
1459	/* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1460	snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1461	proc_maps_fd = open(maps_path, O_RDONLY);
1462	if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
1463		goto out;
1464	err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1465	if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
1466		goto out;
1467
1468	/* strip and compare the first line of the two files */
1469	str_strip_first_line(task_vma_output);
1470	str_strip_first_line(proc_maps_output);
1471
1472	ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
1473
1474	check_bpf_link_info(skel->progs.proc_maps);
1475
1476out:
1477	close(proc_maps_fd);
1478	close(iter_fd);
1479	bpf_iter_task_vmas__destroy(skel);
1480}
1481
1482static void test_task_vma_dead_task(void)
1483{
1484	struct bpf_iter_task_vmas *skel;
1485	int wstatus, child_pid = -1;
1486	time_t start_tm, cur_tm;
1487	int err, iter_fd = -1;
1488	int wait_sec = 3;
1489
1490	skel = bpf_iter_task_vmas__open();
1491	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
1492		return;
1493
1494	skel->bss->pid = getpid();
1495
1496	err = bpf_iter_task_vmas__load(skel);
1497	if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
1498		goto out;
1499
1500	skel->links.proc_maps = bpf_program__attach_iter(
1501		skel->progs.proc_maps, NULL);
1502
1503	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1504		skel->links.proc_maps = NULL;
1505		goto out;
1506	}
1507
1508	start_tm = time(NULL);
1509	cur_tm = start_tm;
1510
1511	child_pid = fork();
1512	if (child_pid == 0) {
1513		/* Fork short-lived processes in the background. */
1514		while (cur_tm < start_tm + wait_sec) {
1515			system("echo > /dev/null");
1516			cur_tm = time(NULL);
1517		}
1518		exit(0);
1519	}
1520
1521	if (!ASSERT_GE(child_pid, 0, "fork_child"))
1522		goto out;
1523
1524	while (cur_tm < start_tm + wait_sec) {
1525		iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1526		if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1527			goto out;
1528
1529		/* Drain all data from iter_fd. */
1530		while (cur_tm < start_tm + wait_sec) {
1531			err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE);
1532			if (!ASSERT_GE(err, 0, "read_iter_fd"))
1533				goto out;
1534
1535			cur_tm = time(NULL);
1536
1537			if (err == 0)
1538				break;
1539		}
1540
1541		close(iter_fd);
1542		iter_fd = -1;
1543	}
1544
1545	check_bpf_link_info(skel->progs.proc_maps);
1546
1547out:
1548	waitpid(child_pid, &wstatus, 0);
1549	close(iter_fd);
1550	bpf_iter_task_vmas__destroy(skel);
1551}
1552
1553void test_bpf_sockmap_map_iter_fd(void)
1554{
1555	struct bpf_iter_sockmap *skel;
1556
1557	skel = bpf_iter_sockmap__open_and_load();
1558	if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
1559		return;
1560
1561	do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
1562
1563	bpf_iter_sockmap__destroy(skel);
1564}
1565
1566static void test_task_vma(void)
1567{
1568	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1569	union bpf_iter_link_info linfo;
1570
1571	memset(&linfo, 0, sizeof(linfo));
1572	linfo.task.tid = getpid();
1573	opts.link_info = &linfo;
1574	opts.link_info_len = sizeof(linfo);
1575
1576	test_task_vma_common(&opts);
1577	test_task_vma_common(NULL);
1578}
1579
1580/* uprobe attach point */
1581static noinline int trigger_func(int arg)
1582{
1583	asm volatile ("");
1584	return arg + 1;
1585}
1586
1587static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
1588{
1589	struct bpf_iter_vma_offset *skel;
1590	char buf[16] = {};
1591	int iter_fd, len;
1592	int pgsz, shift;
1593
1594	skel = bpf_iter_vma_offset__open_and_load();
1595	if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load"))
1596		return;
1597
1598	skel->bss->pid = getpid();
1599	skel->bss->address = (uintptr_t)trigger_func;
1600	for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++)
1601		;
1602	skel->bss->page_shift = shift;
1603
1604	skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
1605	if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
1606		goto exit;
1607
1608	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
1609	if (!ASSERT_GT(iter_fd, 0, "create_iter"))
1610		goto exit;
1611
1612	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1613		;
1614	buf[15] = 0;
1615	ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
1616
1617	ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
1618	if (one_proc)
1619		ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1620	else
1621		ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1622
1623	close(iter_fd);
1624
1625exit:
1626	bpf_iter_vma_offset__destroy(skel);
1627}
1628
1629static void test_task_vma_offset(void)
1630{
1631	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1632	union bpf_iter_link_info linfo;
1633
1634	memset(&linfo, 0, sizeof(linfo));
1635	linfo.task.pid = getpid();
1636	opts.link_info = &linfo;
1637	opts.link_info_len = sizeof(linfo);
1638
1639	test_task_vma_offset_common(&opts, true);
1640
1641	linfo.task.pid = 0;
1642	linfo.task.tid = getpid();
1643	test_task_vma_offset_common(&opts, true);
1644
1645	test_task_vma_offset_common(NULL, false);
1646}
1647
1648void test_bpf_iter(void)
1649{
1650	ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init");
1651
1652	if (test__start_subtest("btf_id_or_null"))
1653		test_btf_id_or_null();
1654	if (test__start_subtest("ipv6_route"))
1655		test_ipv6_route();
1656	if (test__start_subtest("netlink"))
1657		test_netlink();
1658	if (test__start_subtest("bpf_map"))
1659		test_bpf_map();
1660	if (test__start_subtest("task_tid"))
1661		test_task_tid();
1662	if (test__start_subtest("task_pid"))
1663		test_task_pid();
1664	if (test__start_subtest("task_pidfd"))
1665		test_task_pidfd();
1666	if (test__start_subtest("task_sleepable"))
1667		test_task_sleepable();
1668	if (test__start_subtest("task_stack"))
1669		test_task_stack();
1670	if (test__start_subtest("task_file"))
1671		test_task_file();
1672	if (test__start_subtest("task_vma"))
1673		test_task_vma();
1674	if (test__start_subtest("task_vma_dead_task"))
1675		test_task_vma_dead_task();
1676	if (test__start_subtest("task_btf"))
1677		test_task_btf();
1678	if (test__start_subtest("tcp4"))
1679		test_tcp4();
1680	if (test__start_subtest("tcp6"))
1681		test_tcp6();
1682	if (test__start_subtest("udp4"))
1683		test_udp4();
1684	if (test__start_subtest("udp6"))
1685		test_udp6();
1686	if (test__start_subtest("unix"))
1687		test_unix();
1688	if (test__start_subtest("anon"))
1689		test_anon_iter(false);
1690	if (test__start_subtest("anon-read-one-char"))
1691		test_anon_iter(true);
1692	if (test__start_subtest("file"))
1693		test_file_iter();
1694	if (test__start_subtest("overflow"))
1695		test_overflow(false, false);
1696	if (test__start_subtest("overflow-e2big"))
1697		test_overflow(true, false);
1698	if (test__start_subtest("prog-ret-1"))
1699		test_overflow(false, true);
1700	if (test__start_subtest("bpf_hash_map"))
1701		test_bpf_hash_map();
1702	if (test__start_subtest("bpf_percpu_hash_map"))
1703		test_bpf_percpu_hash_map();
1704	if (test__start_subtest("bpf_array_map"))
1705		test_bpf_array_map();
1706	if (test__start_subtest("bpf_array_map_iter_fd"))
1707		test_bpf_array_map_iter_fd();
1708	if (test__start_subtest("bpf_percpu_array_map"))
1709		test_bpf_percpu_array_map();
1710	if (test__start_subtest("bpf_sk_storage_map"))
1711		test_bpf_sk_storage_map();
1712	if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
1713		test_bpf_sk_storage_map_iter_fd();
1714	if (test__start_subtest("bpf_sk_storage_delete"))
1715		test_bpf_sk_storage_delete();
1716	if (test__start_subtest("bpf_sk_storage_get"))
1717		test_bpf_sk_storage_get();
1718	if (test__start_subtest("rdonly-buf-out-of-bound"))
1719		test_rdonly_buf_out_of_bound();
1720	if (test__start_subtest("buf-neg-offset"))
1721		test_buf_neg_offset();
1722	if (test__start_subtest("link-iter"))
1723		test_link_iter();
1724	if (test__start_subtest("ksym"))
1725		test_ksym_iter();
1726	if (test__start_subtest("bpf_sockmap_map_iter_fd"))
1727		test_bpf_sockmap_map_iter_fd();
1728	if (test__start_subtest("vma_offset"))
1729		test_task_vma_offset();
1730}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2020 Facebook */
   3#include <test_progs.h>
   4#include <unistd.h>
   5#include <sys/syscall.h>
   6#include <task_local_storage_helpers.h>
   7#include "bpf_iter_ipv6_route.skel.h"
   8#include "bpf_iter_netlink.skel.h"
   9#include "bpf_iter_bpf_map.skel.h"
  10#include "bpf_iter_task.skel.h"
  11#include "bpf_iter_task_stack.skel.h"
  12#include "bpf_iter_task_file.skel.h"
  13#include "bpf_iter_task_vma.skel.h"
  14#include "bpf_iter_task_btf.skel.h"
  15#include "bpf_iter_tcp4.skel.h"
  16#include "bpf_iter_tcp6.skel.h"
  17#include "bpf_iter_udp4.skel.h"
  18#include "bpf_iter_udp6.skel.h"
  19#include "bpf_iter_unix.skel.h"
  20#include "bpf_iter_vma_offset.skel.h"
  21#include "bpf_iter_test_kern1.skel.h"
  22#include "bpf_iter_test_kern2.skel.h"
  23#include "bpf_iter_test_kern3.skel.h"
  24#include "bpf_iter_test_kern4.skel.h"
  25#include "bpf_iter_bpf_hash_map.skel.h"
  26#include "bpf_iter_bpf_percpu_hash_map.skel.h"
  27#include "bpf_iter_bpf_array_map.skel.h"
  28#include "bpf_iter_bpf_percpu_array_map.skel.h"
  29#include "bpf_iter_bpf_sk_storage_helpers.skel.h"
  30#include "bpf_iter_bpf_sk_storage_map.skel.h"
  31#include "bpf_iter_test_kern5.skel.h"
  32#include "bpf_iter_test_kern6.skel.h"
  33#include "bpf_iter_bpf_link.skel.h"
  34#include "bpf_iter_ksym.skel.h"
  35#include "bpf_iter_sockmap.skel.h"
  36
  37static int duration;
  38
  39static void test_btf_id_or_null(void)
  40{
  41	struct bpf_iter_test_kern3 *skel;
  42
  43	skel = bpf_iter_test_kern3__open_and_load();
  44	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
  45		bpf_iter_test_kern3__destroy(skel);
  46		return;
  47	}
  48}
  49
  50static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
  51{
  52	struct bpf_link *link;
  53	char buf[16] = {};
  54	int iter_fd, len;
  55
  56	link = bpf_program__attach_iter(prog, opts);
  57	if (!ASSERT_OK_PTR(link, "attach_iter"))
  58		return;
  59
  60	iter_fd = bpf_iter_create(bpf_link__fd(link));
  61	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
  62		goto free_link;
  63
  64	/* not check contents, but ensure read() ends without error */
  65	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
  66		;
  67	CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
  68
  69	close(iter_fd);
  70
  71free_link:
  72	bpf_link__destroy(link);
  73}
  74
  75static void do_dummy_read(struct bpf_program *prog)
  76{
  77	do_dummy_read_opts(prog, NULL);
  78}
  79
  80static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
  81				struct bpf_map *map)
  82{
  83	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
  84	union bpf_iter_link_info linfo;
  85	struct bpf_link *link;
  86	char buf[16] = {};
  87	int iter_fd, len;
  88
  89	memset(&linfo, 0, sizeof(linfo));
  90	linfo.map.map_fd = bpf_map__fd(map);
  91	opts.link_info = &linfo;
  92	opts.link_info_len = sizeof(linfo);
  93	link = bpf_program__attach_iter(prog, &opts);
  94	if (!ASSERT_OK_PTR(link, "attach_map_iter"))
  95		return;
  96
  97	iter_fd = bpf_iter_create(bpf_link__fd(link));
  98	if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
  99		bpf_link__destroy(link);
 100		return;
 101	}
 102
 103	/* Close link and map fd prematurely */
 104	bpf_link__destroy(link);
 105	bpf_object__destroy_skeleton(*skel);
 106	*skel = NULL;
 107
 108	/* Try to let map free work to run first if map is freed */
 109	usleep(100);
 110	/* Memory used by both sock map and sock local storage map are
 111	 * freed after two synchronize_rcu() calls, so wait for it
 112	 */
 113	kern_sync_rcu();
 114	kern_sync_rcu();
 115
 116	/* Read after both map fd and link fd are closed */
 117	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
 118		;
 119	ASSERT_GE(len, 0, "read_iterator");
 120
 121	close(iter_fd);
 122}
 123
 124static int read_fd_into_buffer(int fd, char *buf, int size)
 125{
 126	int bufleft = size;
 127	int len;
 128
 129	do {
 130		len = read(fd, buf, bufleft);
 131		if (len > 0) {
 132			buf += len;
 133			bufleft -= len;
 134		}
 135	} while (len > 0);
 136
 137	return len < 0 ? len : size - bufleft;
 138}
 139
 140static void test_ipv6_route(void)
 141{
 142	struct bpf_iter_ipv6_route *skel;
 143
 144	skel = bpf_iter_ipv6_route__open_and_load();
 145	if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
 146		return;
 147
 148	do_dummy_read(skel->progs.dump_ipv6_route);
 149
 150	bpf_iter_ipv6_route__destroy(skel);
 151}
 152
 153static void test_netlink(void)
 154{
 155	struct bpf_iter_netlink *skel;
 156
 157	skel = bpf_iter_netlink__open_and_load();
 158	if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
 159		return;
 160
 161	do_dummy_read(skel->progs.dump_netlink);
 162
 163	bpf_iter_netlink__destroy(skel);
 164}
 165
 166static void test_bpf_map(void)
 167{
 168	struct bpf_iter_bpf_map *skel;
 169
 170	skel = bpf_iter_bpf_map__open_and_load();
 171	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
 172		return;
 173
 174	do_dummy_read(skel->progs.dump_bpf_map);
 175
 176	bpf_iter_bpf_map__destroy(skel);
 177}
 178
 179static void check_bpf_link_info(const struct bpf_program *prog)
 180{
 181	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 182	union bpf_iter_link_info linfo;
 183	struct bpf_link_info info = {};
 184	struct bpf_link *link;
 185	__u32 info_len;
 186	int err;
 187
 188	memset(&linfo, 0, sizeof(linfo));
 189	linfo.task.tid = getpid();
 190	opts.link_info = &linfo;
 191	opts.link_info_len = sizeof(linfo);
 192
 193	link = bpf_program__attach_iter(prog, &opts);
 194	if (!ASSERT_OK_PTR(link, "attach_iter"))
 195		return;
 196
 197	info_len = sizeof(info);
 198	err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
 199	ASSERT_OK(err, "bpf_obj_get_info_by_fd");
 200	ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
 201
 202	bpf_link__destroy(link);
 203}
 204
 205static pthread_mutex_t do_nothing_mutex;
 206
 207static void *do_nothing_wait(void *arg)
 208{
 209	pthread_mutex_lock(&do_nothing_mutex);
 210	pthread_mutex_unlock(&do_nothing_mutex);
 211
 212	pthread_exit(arg);
 213}
 214
 215static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
 216				     int *num_unknown, int *num_known)
 217{
 218	struct bpf_iter_task *skel;
 219	pthread_t thread_id;
 220	void *ret;
 221
 222	skel = bpf_iter_task__open_and_load();
 223	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
 224		return;
 225
 226	ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
 227
 228	ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
 229		  "pthread_create");
 230
 231	skel->bss->tid = getpid();
 232
 233	do_dummy_read_opts(skel->progs.dump_task, opts);
 234
 235	*num_unknown = skel->bss->num_unknown_tid;
 236	*num_known = skel->bss->num_known_tid;
 237
 238	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
 239	ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
 240		     "pthread_join");
 241
 242	bpf_iter_task__destroy(skel);
 243}
 244
 245static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
 246{
 247	int num_unknown_tid, num_known_tid;
 248
 249	test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
 250	ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
 251	ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
 252}
 253
 254static void test_task_tid(void)
 255{
 256	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 257	union bpf_iter_link_info linfo;
 258	int num_unknown_tid, num_known_tid;
 259
 
 
 260	memset(&linfo, 0, sizeof(linfo));
 261	linfo.task.tid = getpid();
 262	opts.link_info = &linfo;
 263	opts.link_info_len = sizeof(linfo);
 264	test_task_common(&opts, 0, 1);
 265
 266	linfo.task.tid = 0;
 267	linfo.task.pid = getpid();
 268	test_task_common(&opts, 1, 1);
 
 
 
 269
 270	test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
 271	ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
 272	ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
 
 
 
 
 
 
 
 
 
 
 
 
 273}
 274
 275static void test_task_pid(void)
 276{
 277	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 278	union bpf_iter_link_info linfo;
 279
 280	memset(&linfo, 0, sizeof(linfo));
 281	linfo.task.pid = getpid();
 282	opts.link_info = &linfo;
 283	opts.link_info_len = sizeof(linfo);
 284
 285	test_task_common(&opts, 1, 1);
 286}
 287
 288static void test_task_pidfd(void)
 289{
 290	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 291	union bpf_iter_link_info linfo;
 292	int pidfd;
 293
 294	pidfd = sys_pidfd_open(getpid(), 0);
 295	if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open"))
 296		return;
 297
 298	memset(&linfo, 0, sizeof(linfo));
 299	linfo.task.pid_fd = pidfd;
 300	opts.link_info = &linfo;
 301	opts.link_info_len = sizeof(linfo);
 302
 303	test_task_common(&opts, 1, 1);
 304
 305	close(pidfd);
 306}
 307
 308static void test_task_sleepable(void)
 309{
 310	struct bpf_iter_task *skel;
 311
 312	skel = bpf_iter_task__open_and_load();
 313	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
 314		return;
 315
 316	do_dummy_read(skel->progs.dump_task_sleepable);
 317
 318	ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
 319		  "num_expected_failure_copy_from_user_task");
 320	ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
 321		  "num_success_copy_from_user_task");
 322
 323	bpf_iter_task__destroy(skel);
 324}
 325
 326static void test_task_stack(void)
 327{
 328	struct bpf_iter_task_stack *skel;
 329
 330	skel = bpf_iter_task_stack__open_and_load();
 331	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
 332		return;
 333
 334	do_dummy_read(skel->progs.dump_task_stack);
 335	do_dummy_read(skel->progs.get_task_user_stacks);
 336
 
 
 337	bpf_iter_task_stack__destroy(skel);
 338}
 339
 340static void test_task_file(void)
 341{
 342	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 343	struct bpf_iter_task_file *skel;
 344	union bpf_iter_link_info linfo;
 345	pthread_t thread_id;
 346	void *ret;
 347
 348	skel = bpf_iter_task_file__open_and_load();
 349	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
 350		return;
 351
 352	skel->bss->tgid = getpid();
 353
 354	ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
 355
 356	ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
 357		  "pthread_create");
 358
 359	memset(&linfo, 0, sizeof(linfo));
 360	linfo.task.tid = getpid();
 361	opts.link_info = &linfo;
 362	opts.link_info_len = sizeof(linfo);
 363
 364	do_dummy_read_opts(skel->progs.dump_task_file, &opts);
 365
 366	ASSERT_EQ(skel->bss->count, 0, "check_count");
 367	ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
 368
 369	skel->bss->last_tgid = 0;
 370	skel->bss->count = 0;
 371	skel->bss->unique_tgid_count = 0;
 372
 373	do_dummy_read(skel->progs.dump_task_file);
 374
 375	ASSERT_EQ(skel->bss->count, 0, "check_count");
 376	ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
 377
 378	check_bpf_link_info(skel->progs.dump_task_file);
 379
 380	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
 381	ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
 382	ASSERT_NULL(ret, "pthread_join");
 383
 384	bpf_iter_task_file__destroy(skel);
 385}
 386
 387#define TASKBUFSZ		32768
 388
 389static char taskbuf[TASKBUFSZ];
 390
 391static int do_btf_read(struct bpf_iter_task_btf *skel)
 392{
 393	struct bpf_program *prog = skel->progs.dump_task_struct;
 394	struct bpf_iter_task_btf__bss *bss = skel->bss;
 395	int iter_fd = -1, err;
 396	struct bpf_link *link;
 397	char *buf = taskbuf;
 398	int ret = 0;
 399
 400	link = bpf_program__attach_iter(prog, NULL);
 401	if (!ASSERT_OK_PTR(link, "attach_iter"))
 402		return ret;
 403
 404	iter_fd = bpf_iter_create(bpf_link__fd(link));
 405	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 406		goto free_link;
 407
 408	err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
 409	if (bss->skip) {
 410		printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
 411		ret = 1;
 412		test__skip();
 413		goto free_link;
 414	}
 415
 416	if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
 417		goto free_link;
 418
 419	ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
 420	      "check for btf representation of task_struct in iter data");
 421free_link:
 422	if (iter_fd > 0)
 423		close(iter_fd);
 424	bpf_link__destroy(link);
 425	return ret;
 426}
 427
 428static void test_task_btf(void)
 429{
 430	struct bpf_iter_task_btf__bss *bss;
 431	struct bpf_iter_task_btf *skel;
 432	int ret;
 433
 434	skel = bpf_iter_task_btf__open_and_load();
 435	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
 436		return;
 437
 438	bss = skel->bss;
 439
 440	ret = do_btf_read(skel);
 441	if (ret)
 442		goto cleanup;
 443
 444	if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
 445		goto cleanup;
 446
 447	ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
 448
 449cleanup:
 450	bpf_iter_task_btf__destroy(skel);
 451}
 452
 453static void test_tcp4(void)
 454{
 455	struct bpf_iter_tcp4 *skel;
 456
 457	skel = bpf_iter_tcp4__open_and_load();
 458	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
 459		return;
 460
 461	do_dummy_read(skel->progs.dump_tcp4);
 462
 463	bpf_iter_tcp4__destroy(skel);
 464}
 465
 466static void test_tcp6(void)
 467{
 468	struct bpf_iter_tcp6 *skel;
 469
 470	skel = bpf_iter_tcp6__open_and_load();
 471	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
 472		return;
 473
 474	do_dummy_read(skel->progs.dump_tcp6);
 475
 476	bpf_iter_tcp6__destroy(skel);
 477}
 478
 479static void test_udp4(void)
 480{
 481	struct bpf_iter_udp4 *skel;
 482
 483	skel = bpf_iter_udp4__open_and_load();
 484	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
 485		return;
 486
 487	do_dummy_read(skel->progs.dump_udp4);
 488
 489	bpf_iter_udp4__destroy(skel);
 490}
 491
 492static void test_udp6(void)
 493{
 494	struct bpf_iter_udp6 *skel;
 495
 496	skel = bpf_iter_udp6__open_and_load();
 497	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
 498		return;
 499
 500	do_dummy_read(skel->progs.dump_udp6);
 501
 502	bpf_iter_udp6__destroy(skel);
 503}
 504
 505static void test_unix(void)
 506{
 507	struct bpf_iter_unix *skel;
 508
 509	skel = bpf_iter_unix__open_and_load();
 510	if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
 511		return;
 512
 513	do_dummy_read(skel->progs.dump_unix);
 514
 515	bpf_iter_unix__destroy(skel);
 516}
 517
 518/* The expected string is less than 16 bytes */
 519static int do_read_with_fd(int iter_fd, const char *expected,
 520			   bool read_one_char)
 521{
 522	int len, read_buf_len, start;
 523	char buf[16] = {};
 524
 525	read_buf_len = read_one_char ? 1 : 16;
 526	start = 0;
 527	while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
 528		start += len;
 529		if (CHECK(start >= 16, "read", "read len %d\n", len))
 530			return -1;
 531		read_buf_len = read_one_char ? 1 : 16 - start;
 532	}
 533	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
 534		return -1;
 535
 536	if (!ASSERT_STREQ(buf, expected, "read"))
 537		return -1;
 538
 539	return 0;
 540}
 541
 542static void test_anon_iter(bool read_one_char)
 543{
 544	struct bpf_iter_test_kern1 *skel;
 545	struct bpf_link *link;
 546	int iter_fd, err;
 547
 548	skel = bpf_iter_test_kern1__open_and_load();
 549	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
 550		return;
 551
 552	err = bpf_iter_test_kern1__attach(skel);
 553	if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
 554		goto out;
 555	}
 556
 557	link = skel->links.dump_task;
 558	iter_fd = bpf_iter_create(bpf_link__fd(link));
 559	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 560		goto out;
 561
 562	do_read_with_fd(iter_fd, "abcd", read_one_char);
 563	close(iter_fd);
 564
 565out:
 566	bpf_iter_test_kern1__destroy(skel);
 567}
 568
 569static int do_read(const char *path, const char *expected)
 570{
 571	int err, iter_fd;
 572
 573	iter_fd = open(path, O_RDONLY);
 574	if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
 575		  path, strerror(errno)))
 576		return -1;
 577
 578	err = do_read_with_fd(iter_fd, expected, false);
 579	close(iter_fd);
 580	return err;
 581}
 582
 583static void test_file_iter(void)
 584{
 585	const char *path = "/sys/fs/bpf/bpf_iter_test1";
 586	struct bpf_iter_test_kern1 *skel1;
 587	struct bpf_iter_test_kern2 *skel2;
 588	struct bpf_link *link;
 589	int err;
 590
 591	skel1 = bpf_iter_test_kern1__open_and_load();
 592	if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
 593		return;
 594
 595	link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
 596	if (!ASSERT_OK_PTR(link, "attach_iter"))
 597		goto out;
 598
 599	/* unlink this path if it exists. */
 600	unlink(path);
 601
 602	err = bpf_link__pin(link, path);
 603	if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
 604		goto free_link;
 605
 606	err = do_read(path, "abcd");
 607	if (err)
 608		goto unlink_path;
 609
 610	/* file based iterator seems working fine. Let us a link update
 611	 * of the underlying link and `cat` the iterator again, its content
 612	 * should change.
 613	 */
 614	skel2 = bpf_iter_test_kern2__open_and_load();
 615	if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
 616		goto unlink_path;
 617
 618	err = bpf_link__update_program(link, skel2->progs.dump_task);
 619	if (!ASSERT_OK(err, "update_prog"))
 620		goto destroy_skel2;
 621
 622	do_read(path, "ABCD");
 623
 624destroy_skel2:
 625	bpf_iter_test_kern2__destroy(skel2);
 626unlink_path:
 627	unlink(path);
 628free_link:
 629	bpf_link__destroy(link);
 630out:
 631	bpf_iter_test_kern1__destroy(skel1);
 632}
 633
 634static void test_overflow(bool test_e2big_overflow, bool ret1)
 635{
 636	__u32 map_info_len, total_read_len, expected_read_len;
 637	int err, iter_fd, map1_fd, map2_fd, len;
 638	struct bpf_map_info map_info = {};
 639	struct bpf_iter_test_kern4 *skel;
 640	struct bpf_link *link;
 641	__u32 iter_size;
 642	char *buf;
 643
 644	skel = bpf_iter_test_kern4__open();
 645	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
 646		return;
 647
 648	/* create two maps: bpf program will only do bpf_seq_write
 649	 * for these two maps. The goal is one map output almost
 650	 * fills seq_file buffer and then the other will trigger
 651	 * overflow and needs restart.
 652	 */
 653	map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
 654	if (CHECK(map1_fd < 0, "bpf_map_create",
 655		  "map_creation failed: %s\n", strerror(errno)))
 656		goto out;
 657	map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
 658	if (CHECK(map2_fd < 0, "bpf_map_create",
 659		  "map_creation failed: %s\n", strerror(errno)))
 660		goto free_map1;
 661
 662	/* bpf_seq_printf kernel buffer is 8 pages, so one map
 663	 * bpf_seq_write will mostly fill it, and the other map
 664	 * will partially fill and then trigger overflow and need
 665	 * bpf_seq_read restart.
 666	 */
 667	iter_size = sysconf(_SC_PAGE_SIZE) << 3;
 668
 669	if (test_e2big_overflow) {
 670		skel->rodata->print_len = (iter_size + 8) / 8;
 671		expected_read_len = 2 * (iter_size + 8);
 672	} else if (!ret1) {
 673		skel->rodata->print_len = (iter_size - 8) / 8;
 674		expected_read_len = 2 * (iter_size - 8);
 675	} else {
 676		skel->rodata->print_len = 1;
 677		expected_read_len = 2 * 8;
 678	}
 679	skel->rodata->ret1 = ret1;
 680
 681	if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
 682		  "bpf_iter_test_kern4__load"))
 683		goto free_map2;
 684
 685	/* setup filtering map_id in bpf program */
 686	map_info_len = sizeof(map_info);
 687	err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
 688	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
 689		  strerror(errno)))
 690		goto free_map2;
 691	skel->bss->map1_id = map_info.id;
 692
 693	err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
 694	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
 695		  strerror(errno)))
 696		goto free_map2;
 697	skel->bss->map2_id = map_info.id;
 698
 699	link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
 700	if (!ASSERT_OK_PTR(link, "attach_iter"))
 701		goto free_map2;
 702
 703	iter_fd = bpf_iter_create(bpf_link__fd(link));
 704	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 705		goto free_link;
 706
 707	buf = malloc(expected_read_len);
 708	if (!buf)
 709		goto close_iter;
 710
 711	/* do read */
 712	total_read_len = 0;
 713	if (test_e2big_overflow) {
 714		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
 715			total_read_len += len;
 716
 717		CHECK(len != -1 || errno != E2BIG, "read",
 718		      "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
 719			  len, strerror(errno));
 720		goto free_buf;
 721	} else if (!ret1) {
 722		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
 723			total_read_len += len;
 724
 725		if (CHECK(len < 0, "read", "read failed: %s\n",
 726			  strerror(errno)))
 727			goto free_buf;
 728	} else {
 729		do {
 730			len = read(iter_fd, buf, expected_read_len);
 731			if (len > 0)
 732				total_read_len += len;
 733		} while (len > 0 || len == -EAGAIN);
 734
 735		if (CHECK(len < 0, "read", "read failed: %s\n",
 736			  strerror(errno)))
 737			goto free_buf;
 738	}
 739
 740	if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
 741		goto free_buf;
 742
 743	if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
 744		goto free_buf;
 745
 746	if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
 747		goto free_buf;
 748
 749	ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
 750
 751free_buf:
 752	free(buf);
 753close_iter:
 754	close(iter_fd);
 755free_link:
 756	bpf_link__destroy(link);
 757free_map2:
 758	close(map2_fd);
 759free_map1:
 760	close(map1_fd);
 761out:
 762	bpf_iter_test_kern4__destroy(skel);
 763}
 764
 765static void test_bpf_hash_map(void)
 766{
 767	__u32 expected_key_a = 0, expected_key_b = 0;
 768	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 769	struct bpf_iter_bpf_hash_map *skel;
 770	int err, i, len, map_fd, iter_fd;
 771	union bpf_iter_link_info linfo;
 772	__u64 val, expected_val = 0;
 773	struct bpf_link *link;
 774	struct key_t {
 775		int a;
 776		int b;
 777		int c;
 778	} key;
 779	char buf[64];
 780
 781	skel = bpf_iter_bpf_hash_map__open();
 782	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
 783		return;
 784
 785	skel->bss->in_test_mode = true;
 786
 787	err = bpf_iter_bpf_hash_map__load(skel);
 788	if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
 789		goto out;
 790
 791	/* iterator with hashmap2 and hashmap3 should fail */
 792	memset(&linfo, 0, sizeof(linfo));
 793	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
 794	opts.link_info = &linfo;
 795	opts.link_info_len = sizeof(linfo);
 796	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
 797	if (!ASSERT_ERR_PTR(link, "attach_iter"))
 798		goto out;
 799
 800	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
 801	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
 802	if (!ASSERT_ERR_PTR(link, "attach_iter"))
 803		goto out;
 804
 805	/* hashmap1 should be good, update map values here */
 806	map_fd = bpf_map__fd(skel->maps.hashmap1);
 807	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
 808		key.a = i + 1;
 809		key.b = i + 2;
 810		key.c = i + 3;
 811		val = i + 4;
 812		expected_key_a += key.a;
 813		expected_key_b += key.b;
 814		expected_val += val;
 815
 816		err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
 817		if (!ASSERT_OK(err, "map_update"))
 818			goto out;
 819	}
 820
 821	/* Sleepable program is prohibited for hash map iterator */
 822	linfo.map.map_fd = map_fd;
 823	link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
 824	if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
 825		goto out;
 826
 827	linfo.map.map_fd = map_fd;
 828	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
 829	if (!ASSERT_OK_PTR(link, "attach_iter"))
 830		goto out;
 831
 832	iter_fd = bpf_iter_create(bpf_link__fd(link));
 833	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 834		goto free_link;
 835
 836	/* do some tests */
 837	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
 838		;
 839	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
 840		goto close_iter;
 841
 842	/* test results */
 843	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
 844		goto close_iter;
 845	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
 846		goto close_iter;
 847	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
 848		goto close_iter;
 849
 850close_iter:
 851	close(iter_fd);
 852free_link:
 853	bpf_link__destroy(link);
 854out:
 855	bpf_iter_bpf_hash_map__destroy(skel);
 856}
 857
 858static void test_bpf_percpu_hash_map(void)
 859{
 860	__u32 expected_key_a = 0, expected_key_b = 0;
 861	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 862	struct bpf_iter_bpf_percpu_hash_map *skel;
 863	int err, i, j, len, map_fd, iter_fd;
 864	union bpf_iter_link_info linfo;
 865	__u32 expected_val = 0;
 866	struct bpf_link *link;
 867	struct key_t {
 868		int a;
 869		int b;
 870		int c;
 871	} key;
 872	char buf[64];
 873	void *val;
 874
 875	skel = bpf_iter_bpf_percpu_hash_map__open();
 876	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
 877		return;
 878
 879	skel->rodata->num_cpus = bpf_num_possible_cpus();
 880	val = malloc(8 * bpf_num_possible_cpus());
 
 
 881
 882	err = bpf_iter_bpf_percpu_hash_map__load(skel);
 883	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
 884		goto out;
 885
 886	/* update map values here */
 887	map_fd = bpf_map__fd(skel->maps.hashmap1);
 888	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
 889		key.a = i + 1;
 890		key.b = i + 2;
 891		key.c = i + 3;
 892		expected_key_a += key.a;
 893		expected_key_b += key.b;
 894
 895		for (j = 0; j < bpf_num_possible_cpus(); j++) {
 896			*(__u32 *)(val + j * 8) = i + j;
 897			expected_val += i + j;
 898		}
 899
 900		err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
 901		if (!ASSERT_OK(err, "map_update"))
 902			goto out;
 903	}
 904
 905	memset(&linfo, 0, sizeof(linfo));
 906	linfo.map.map_fd = map_fd;
 907	opts.link_info = &linfo;
 908	opts.link_info_len = sizeof(linfo);
 909	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
 910	if (!ASSERT_OK_PTR(link, "attach_iter"))
 911		goto out;
 912
 913	iter_fd = bpf_iter_create(bpf_link__fd(link));
 914	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 915		goto free_link;
 916
 917	/* do some tests */
 918	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
 919		;
 920	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
 921		goto close_iter;
 922
 923	/* test results */
 924	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
 925		goto close_iter;
 926	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
 927		goto close_iter;
 928	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
 929		goto close_iter;
 930
 931close_iter:
 932	close(iter_fd);
 933free_link:
 934	bpf_link__destroy(link);
 935out:
 936	bpf_iter_bpf_percpu_hash_map__destroy(skel);
 937	free(val);
 938}
 939
 940static void test_bpf_array_map(void)
 941{
 942	__u64 val, expected_val = 0, res_first_val, first_val = 0;
 943	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 944	__u32 key, expected_key = 0, res_first_key;
 945	int err, i, map_fd, hash_fd, iter_fd;
 946	struct bpf_iter_bpf_array_map *skel;
 947	union bpf_iter_link_info linfo;
 948	struct bpf_link *link;
 949	char buf[64] = {};
 950	int len, start;
 951
 952	skel = bpf_iter_bpf_array_map__open_and_load();
 953	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
 954		return;
 955
 956	map_fd = bpf_map__fd(skel->maps.arraymap1);
 957	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
 958		val = i + 4;
 959		expected_key += i;
 960		expected_val += val;
 961
 962		if (i == 0)
 963			first_val = val;
 964
 965		err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
 966		if (!ASSERT_OK(err, "map_update"))
 967			goto out;
 968	}
 969
 970	memset(&linfo, 0, sizeof(linfo));
 971	linfo.map.map_fd = map_fd;
 972	opts.link_info = &linfo;
 973	opts.link_info_len = sizeof(linfo);
 974	link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
 975	if (!ASSERT_OK_PTR(link, "attach_iter"))
 976		goto out;
 977
 978	iter_fd = bpf_iter_create(bpf_link__fd(link));
 979	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
 980		goto free_link;
 981
 982	/* do some tests */
 983	start = 0;
 984	while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
 985		start += len;
 986	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
 987		goto close_iter;
 988
 989	/* test results */
 990	res_first_key = *(__u32 *)buf;
 991	res_first_val = *(__u64 *)(buf + sizeof(__u32));
 992	if (CHECK(res_first_key != 0 || res_first_val != first_val,
 993		  "bpf_seq_write",
 994		  "seq_write failure: first key %u vs expected 0, "
 995		  " first value %llu vs expected %llu\n",
 996		  res_first_key, res_first_val, first_val))
 997		goto close_iter;
 998
 999	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1000		goto close_iter;
1001	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1002		goto close_iter;
1003
1004	hash_fd = bpf_map__fd(skel->maps.hashmap1);
1005	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1006		err = bpf_map_lookup_elem(map_fd, &i, &val);
1007		if (!ASSERT_OK(err, "map_lookup arraymap1"))
1008			goto close_iter;
1009		if (!ASSERT_EQ(i, val, "invalid_val arraymap1"))
1010			goto close_iter;
1011
1012		val = i + 4;
1013		err = bpf_map_lookup_elem(hash_fd, &val, &key);
1014		if (!ASSERT_OK(err, "map_lookup hashmap1"))
1015			goto close_iter;
1016		if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1"))
1017			goto close_iter;
1018	}
1019
1020close_iter:
1021	close(iter_fd);
1022free_link:
1023	bpf_link__destroy(link);
1024out:
1025	bpf_iter_bpf_array_map__destroy(skel);
1026}
1027
1028static void test_bpf_array_map_iter_fd(void)
1029{
1030	struct bpf_iter_bpf_array_map *skel;
1031
1032	skel = bpf_iter_bpf_array_map__open_and_load();
1033	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
1034		return;
1035
1036	do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
1037			    skel->maps.arraymap1);
1038
1039	bpf_iter_bpf_array_map__destroy(skel);
1040}
1041
1042static void test_bpf_percpu_array_map(void)
1043{
1044	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1045	struct bpf_iter_bpf_percpu_array_map *skel;
1046	__u32 expected_key = 0, expected_val = 0;
1047	union bpf_iter_link_info linfo;
1048	int err, i, j, map_fd, iter_fd;
1049	struct bpf_link *link;
1050	char buf[64];
1051	void *val;
1052	int len;
1053
1054	skel = bpf_iter_bpf_percpu_array_map__open();
1055	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
1056		return;
1057
1058	skel->rodata->num_cpus = bpf_num_possible_cpus();
1059	val = malloc(8 * bpf_num_possible_cpus());
 
 
1060
1061	err = bpf_iter_bpf_percpu_array_map__load(skel);
1062	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
1063		goto out;
1064
1065	/* update map values here */
1066	map_fd = bpf_map__fd(skel->maps.arraymap1);
1067	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1068		expected_key += i;
1069
1070		for (j = 0; j < bpf_num_possible_cpus(); j++) {
1071			*(__u32 *)(val + j * 8) = i + j;
1072			expected_val += i + j;
1073		}
1074
1075		err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
1076		if (!ASSERT_OK(err, "map_update"))
1077			goto out;
1078	}
1079
1080	memset(&linfo, 0, sizeof(linfo));
1081	linfo.map.map_fd = map_fd;
1082	opts.link_info = &linfo;
1083	opts.link_info_len = sizeof(linfo);
1084	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
1085	if (!ASSERT_OK_PTR(link, "attach_iter"))
1086		goto out;
1087
1088	iter_fd = bpf_iter_create(bpf_link__fd(link));
1089	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1090		goto free_link;
1091
1092	/* do some tests */
1093	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1094		;
1095	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1096		goto close_iter;
1097
1098	/* test results */
1099	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1100		goto close_iter;
1101	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1102		goto close_iter;
1103
1104close_iter:
1105	close(iter_fd);
1106free_link:
1107	bpf_link__destroy(link);
1108out:
1109	bpf_iter_bpf_percpu_array_map__destroy(skel);
1110	free(val);
1111}
1112
1113/* An iterator program deletes all local storage in a map. */
1114static void test_bpf_sk_storage_delete(void)
1115{
1116	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1117	struct bpf_iter_bpf_sk_storage_helpers *skel;
1118	union bpf_iter_link_info linfo;
1119	int err, len, map_fd, iter_fd;
1120	struct bpf_link *link;
1121	int sock_fd = -1;
1122	__u32 val = 42;
1123	char buf[64];
1124
1125	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1126	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1127		return;
1128
1129	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1130
1131	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1132	if (!ASSERT_GE(sock_fd, 0, "socket"))
1133		goto out;
 
1134	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1135	if (!ASSERT_OK(err, "map_update"))
1136		goto out;
1137
1138	memset(&linfo, 0, sizeof(linfo));
1139	linfo.map.map_fd = map_fd;
1140	opts.link_info = &linfo;
1141	opts.link_info_len = sizeof(linfo);
1142	link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
1143					&opts);
1144	if (!ASSERT_OK_PTR(link, "attach_iter"))
1145		goto out;
1146
1147	iter_fd = bpf_iter_create(bpf_link__fd(link));
1148	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1149		goto free_link;
1150
1151	/* do some tests */
1152	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1153		;
1154	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1155		goto close_iter;
1156
1157	/* test results */
1158	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1159	if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
1160		  "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
1161		goto close_iter;
 
 
 
 
 
1162
1163close_iter:
1164	close(iter_fd);
1165free_link:
1166	bpf_link__destroy(link);
1167out:
1168	if (sock_fd >= 0)
1169		close(sock_fd);
1170	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1171}
1172
1173/* This creates a socket and its local storage. It then runs a task_iter BPF
1174 * program that replaces the existing socket local storage with the tgid of the
1175 * only task owning a file descriptor to this socket, this process, prog_tests.
1176 * It then runs a tcp socket iterator that negates the value in the existing
1177 * socket local storage, the test verifies that the resulting value is -pid.
1178 */
1179static void test_bpf_sk_storage_get(void)
1180{
1181	struct bpf_iter_bpf_sk_storage_helpers *skel;
1182	int err, map_fd, val = -1;
1183	int sock_fd = -1;
1184
1185	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1186	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1187		return;
1188
1189	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1190	if (!ASSERT_GE(sock_fd, 0, "socket"))
1191		goto out;
1192
1193	err = listen(sock_fd, 1);
1194	if (!ASSERT_OK(err, "listen"))
1195		goto close_socket;
1196
1197	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1198
1199	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1200	if (!ASSERT_OK(err, "bpf_map_update_elem"))
1201		goto close_socket;
1202
1203	do_dummy_read(skel->progs.fill_socket_owner);
1204
1205	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1206	if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
1207	    "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1208	    getpid(), val, err))
1209		goto close_socket;
1210
1211	do_dummy_read(skel->progs.negate_socket_local_storage);
1212
1213	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1214	CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1215	      "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1216	      -getpid(), val, err);
1217
1218close_socket:
1219	close(sock_fd);
1220out:
1221	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1222}
1223
1224static void test_bpf_sk_stoarge_map_iter_fd(void)
1225{
1226	struct bpf_iter_bpf_sk_storage_map *skel;
1227
1228	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1229	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1230		return;
1231
1232	do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
1233			    skel->maps.sk_stg_map);
1234
1235	bpf_iter_bpf_sk_storage_map__destroy(skel);
1236}
1237
1238static void test_bpf_sk_storage_map(void)
1239{
1240	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1241	int err, i, len, map_fd, iter_fd, num_sockets;
1242	struct bpf_iter_bpf_sk_storage_map *skel;
1243	union bpf_iter_link_info linfo;
1244	int sock_fd[3] = {-1, -1, -1};
1245	__u32 val, expected_val = 0;
1246	struct bpf_link *link;
1247	char buf[64];
1248
1249	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1250	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1251		return;
1252
1253	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1254	num_sockets = ARRAY_SIZE(sock_fd);
1255	for (i = 0; i < num_sockets; i++) {
1256		sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1257		if (!ASSERT_GE(sock_fd[i], 0, "socket"))
1258			goto out;
1259
1260		val = i + 1;
1261		expected_val += val;
1262
1263		err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1264					  BPF_NOEXIST);
1265		if (!ASSERT_OK(err, "map_update"))
1266			goto out;
1267	}
1268
1269	memset(&linfo, 0, sizeof(linfo));
1270	linfo.map.map_fd = map_fd;
1271	opts.link_info = &linfo;
1272	opts.link_info_len = sizeof(linfo);
1273	link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
1274	err = libbpf_get_error(link);
1275	if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
1276		if (!err)
1277			bpf_link__destroy(link);
1278		goto out;
1279	}
1280
1281	link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
1282	if (!ASSERT_OK_PTR(link, "attach_iter"))
1283		goto out;
1284
1285	iter_fd = bpf_iter_create(bpf_link__fd(link));
1286	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1287		goto free_link;
1288
1289	skel->bss->to_add_val = time(NULL);
1290	/* do some tests */
1291	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1292		;
1293	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1294		goto close_iter;
1295
1296	/* test results */
1297	if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
1298		goto close_iter;
1299
1300	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1301		goto close_iter;
1302
1303	for (i = 0; i < num_sockets; i++) {
1304		err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
1305		if (!ASSERT_OK(err, "map_lookup") ||
1306		    !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
1307			break;
1308	}
1309
1310close_iter:
1311	close(iter_fd);
1312free_link:
1313	bpf_link__destroy(link);
1314out:
1315	for (i = 0; i < num_sockets; i++) {
1316		if (sock_fd[i] >= 0)
1317			close(sock_fd[i]);
1318	}
1319	bpf_iter_bpf_sk_storage_map__destroy(skel);
1320}
1321
1322static void test_rdonly_buf_out_of_bound(void)
1323{
1324	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1325	struct bpf_iter_test_kern5 *skel;
1326	union bpf_iter_link_info linfo;
1327	struct bpf_link *link;
1328
1329	skel = bpf_iter_test_kern5__open_and_load();
1330	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
1331		return;
1332
1333	memset(&linfo, 0, sizeof(linfo));
1334	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1335	opts.link_info = &linfo;
1336	opts.link_info_len = sizeof(linfo);
1337	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1338	if (!ASSERT_ERR_PTR(link, "attach_iter"))
1339		bpf_link__destroy(link);
1340
1341	bpf_iter_test_kern5__destroy(skel);
1342}
1343
1344static void test_buf_neg_offset(void)
1345{
1346	struct bpf_iter_test_kern6 *skel;
1347
1348	skel = bpf_iter_test_kern6__open_and_load();
1349	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
1350		bpf_iter_test_kern6__destroy(skel);
1351}
1352
1353static void test_link_iter(void)
1354{
1355	struct bpf_iter_bpf_link *skel;
1356
1357	skel = bpf_iter_bpf_link__open_and_load();
1358	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
1359		return;
1360
1361	do_dummy_read(skel->progs.dump_bpf_link);
1362
1363	bpf_iter_bpf_link__destroy(skel);
1364}
1365
1366static void test_ksym_iter(void)
1367{
1368	struct bpf_iter_ksym *skel;
1369
1370	skel = bpf_iter_ksym__open_and_load();
1371	if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
1372		return;
1373
1374	do_dummy_read(skel->progs.dump_ksym);
1375
1376	bpf_iter_ksym__destroy(skel);
1377}
1378
1379#define CMP_BUFFER_SIZE 1024
1380static char task_vma_output[CMP_BUFFER_SIZE];
1381static char proc_maps_output[CMP_BUFFER_SIZE];
1382
1383/* remove \0 and \t from str, and only keep the first line */
1384static void str_strip_first_line(char *str)
1385{
1386	char *dst = str, *src = str;
1387
1388	do {
1389		if (*src == ' ' || *src == '\t')
1390			src++;
1391		else
1392			*(dst++) = *(src++);
1393
1394	} while (*src != '\0' && *src != '\n');
1395
1396	*dst = '\0';
1397}
1398
1399static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
1400{
1401	int err, iter_fd = -1, proc_maps_fd = -1;
1402	struct bpf_iter_task_vma *skel;
1403	int len, read_size = 4;
1404	char maps_path[64];
1405
1406	skel = bpf_iter_task_vma__open();
1407	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
1408		return;
1409
1410	skel->bss->pid = getpid();
1411	skel->bss->one_task = opts ? 1 : 0;
1412
1413	err = bpf_iter_task_vma__load(skel);
1414	if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
1415		goto out;
1416
1417	skel->links.proc_maps = bpf_program__attach_iter(
1418		skel->progs.proc_maps, opts);
1419
1420	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1421		skel->links.proc_maps = NULL;
1422		goto out;
1423	}
1424
1425	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1426	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1427		goto out;
1428
1429	/* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1430	 * to trigger seq_file corner cases.
1431	 */
1432	len = 0;
1433	while (len < CMP_BUFFER_SIZE) {
1434		err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1435					  MIN(read_size, CMP_BUFFER_SIZE - len));
1436		if (!err)
1437			break;
1438		if (!ASSERT_GE(err, 0, "read_iter_fd"))
1439			goto out;
1440		len += err;
1441	}
1442	if (opts)
1443		ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task");
1444
1445	/* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1446	snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1447	proc_maps_fd = open(maps_path, O_RDONLY);
1448	if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
1449		goto out;
1450	err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1451	if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
1452		goto out;
1453
1454	/* strip and compare the first line of the two files */
1455	str_strip_first_line(task_vma_output);
1456	str_strip_first_line(proc_maps_output);
1457
1458	ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
1459
1460	check_bpf_link_info(skel->progs.proc_maps);
1461
1462out:
1463	close(proc_maps_fd);
1464	close(iter_fd);
1465	bpf_iter_task_vma__destroy(skel);
1466}
1467
1468static void test_task_vma_dead_task(void)
1469{
1470	struct bpf_iter_task_vma *skel;
1471	int wstatus, child_pid = -1;
1472	time_t start_tm, cur_tm;
1473	int err, iter_fd = -1;
1474	int wait_sec = 3;
1475
1476	skel = bpf_iter_task_vma__open();
1477	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
1478		return;
1479
1480	skel->bss->pid = getpid();
1481
1482	err = bpf_iter_task_vma__load(skel);
1483	if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
1484		goto out;
1485
1486	skel->links.proc_maps = bpf_program__attach_iter(
1487		skel->progs.proc_maps, NULL);
1488
1489	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1490		skel->links.proc_maps = NULL;
1491		goto out;
1492	}
1493
1494	start_tm = time(NULL);
1495	cur_tm = start_tm;
1496
1497	child_pid = fork();
1498	if (child_pid == 0) {
1499		/* Fork short-lived processes in the background. */
1500		while (cur_tm < start_tm + wait_sec) {
1501			system("echo > /dev/null");
1502			cur_tm = time(NULL);
1503		}
1504		exit(0);
1505	}
1506
1507	if (!ASSERT_GE(child_pid, 0, "fork_child"))
1508		goto out;
1509
1510	while (cur_tm < start_tm + wait_sec) {
1511		iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1512		if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1513			goto out;
1514
1515		/* Drain all data from iter_fd. */
1516		while (cur_tm < start_tm + wait_sec) {
1517			err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE);
1518			if (!ASSERT_GE(err, 0, "read_iter_fd"))
1519				goto out;
1520
1521			cur_tm = time(NULL);
1522
1523			if (err == 0)
1524				break;
1525		}
1526
1527		close(iter_fd);
1528		iter_fd = -1;
1529	}
1530
1531	check_bpf_link_info(skel->progs.proc_maps);
1532
1533out:
1534	waitpid(child_pid, &wstatus, 0);
1535	close(iter_fd);
1536	bpf_iter_task_vma__destroy(skel);
1537}
1538
1539void test_bpf_sockmap_map_iter_fd(void)
1540{
1541	struct bpf_iter_sockmap *skel;
1542
1543	skel = bpf_iter_sockmap__open_and_load();
1544	if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
1545		return;
1546
1547	do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
1548
1549	bpf_iter_sockmap__destroy(skel);
1550}
1551
1552static void test_task_vma(void)
1553{
1554	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1555	union bpf_iter_link_info linfo;
1556
1557	memset(&linfo, 0, sizeof(linfo));
1558	linfo.task.tid = getpid();
1559	opts.link_info = &linfo;
1560	opts.link_info_len = sizeof(linfo);
1561
1562	test_task_vma_common(&opts);
1563	test_task_vma_common(NULL);
1564}
1565
1566/* uprobe attach point */
1567static noinline int trigger_func(int arg)
1568{
1569	asm volatile ("");
1570	return arg + 1;
1571}
1572
1573static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
1574{
1575	struct bpf_iter_vma_offset *skel;
1576	char buf[16] = {};
1577	int iter_fd, len;
1578	int pgsz, shift;
1579
1580	skel = bpf_iter_vma_offset__open_and_load();
1581	if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load"))
1582		return;
1583
1584	skel->bss->pid = getpid();
1585	skel->bss->address = (uintptr_t)trigger_func;
1586	for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++)
1587		;
1588	skel->bss->page_shift = shift;
1589
1590	skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
1591	if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
1592		goto exit;
1593
1594	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
1595	if (!ASSERT_GT(iter_fd, 0, "create_iter"))
1596		goto exit;
1597
1598	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1599		;
1600	buf[15] = 0;
1601	ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
1602
1603	ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
1604	if (one_proc)
1605		ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1606	else
1607		ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1608
1609	close(iter_fd);
1610
1611exit:
1612	bpf_iter_vma_offset__destroy(skel);
1613}
1614
1615static void test_task_vma_offset(void)
1616{
1617	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1618	union bpf_iter_link_info linfo;
1619
1620	memset(&linfo, 0, sizeof(linfo));
1621	linfo.task.pid = getpid();
1622	opts.link_info = &linfo;
1623	opts.link_info_len = sizeof(linfo);
1624
1625	test_task_vma_offset_common(&opts, true);
1626
1627	linfo.task.pid = 0;
1628	linfo.task.tid = getpid();
1629	test_task_vma_offset_common(&opts, true);
1630
1631	test_task_vma_offset_common(NULL, false);
1632}
1633
1634void test_bpf_iter(void)
1635{
1636	ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init");
1637
1638	if (test__start_subtest("btf_id_or_null"))
1639		test_btf_id_or_null();
1640	if (test__start_subtest("ipv6_route"))
1641		test_ipv6_route();
1642	if (test__start_subtest("netlink"))
1643		test_netlink();
1644	if (test__start_subtest("bpf_map"))
1645		test_bpf_map();
1646	if (test__start_subtest("task_tid"))
1647		test_task_tid();
1648	if (test__start_subtest("task_pid"))
1649		test_task_pid();
1650	if (test__start_subtest("task_pidfd"))
1651		test_task_pidfd();
1652	if (test__start_subtest("task_sleepable"))
1653		test_task_sleepable();
1654	if (test__start_subtest("task_stack"))
1655		test_task_stack();
1656	if (test__start_subtest("task_file"))
1657		test_task_file();
1658	if (test__start_subtest("task_vma"))
1659		test_task_vma();
1660	if (test__start_subtest("task_vma_dead_task"))
1661		test_task_vma_dead_task();
1662	if (test__start_subtest("task_btf"))
1663		test_task_btf();
1664	if (test__start_subtest("tcp4"))
1665		test_tcp4();
1666	if (test__start_subtest("tcp6"))
1667		test_tcp6();
1668	if (test__start_subtest("udp4"))
1669		test_udp4();
1670	if (test__start_subtest("udp6"))
1671		test_udp6();
1672	if (test__start_subtest("unix"))
1673		test_unix();
1674	if (test__start_subtest("anon"))
1675		test_anon_iter(false);
1676	if (test__start_subtest("anon-read-one-char"))
1677		test_anon_iter(true);
1678	if (test__start_subtest("file"))
1679		test_file_iter();
1680	if (test__start_subtest("overflow"))
1681		test_overflow(false, false);
1682	if (test__start_subtest("overflow-e2big"))
1683		test_overflow(true, false);
1684	if (test__start_subtest("prog-ret-1"))
1685		test_overflow(false, true);
1686	if (test__start_subtest("bpf_hash_map"))
1687		test_bpf_hash_map();
1688	if (test__start_subtest("bpf_percpu_hash_map"))
1689		test_bpf_percpu_hash_map();
1690	if (test__start_subtest("bpf_array_map"))
1691		test_bpf_array_map();
1692	if (test__start_subtest("bpf_array_map_iter_fd"))
1693		test_bpf_array_map_iter_fd();
1694	if (test__start_subtest("bpf_percpu_array_map"))
1695		test_bpf_percpu_array_map();
1696	if (test__start_subtest("bpf_sk_storage_map"))
1697		test_bpf_sk_storage_map();
1698	if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
1699		test_bpf_sk_stoarge_map_iter_fd();
1700	if (test__start_subtest("bpf_sk_storage_delete"))
1701		test_bpf_sk_storage_delete();
1702	if (test__start_subtest("bpf_sk_storage_get"))
1703		test_bpf_sk_storage_get();
1704	if (test__start_subtest("rdonly-buf-out-of-bound"))
1705		test_rdonly_buf_out_of_bound();
1706	if (test__start_subtest("buf-neg-offset"))
1707		test_buf_neg_offset();
1708	if (test__start_subtest("link-iter"))
1709		test_link_iter();
1710	if (test__start_subtest("ksym"))
1711		test_ksym_iter();
1712	if (test__start_subtest("bpf_sockmap_map_iter_fd"))
1713		test_bpf_sockmap_map_iter_fd();
1714	if (test__start_subtest("vma_offset"))
1715		test_task_vma_offset();
1716}