Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1/* Copyright (c) 2017 Facebook
   2 *
   3 * This program is free software; you can redistribute it and/or
   4 * modify it under the terms of version 2 of the GNU General Public
   5 * License as published by the Free Software Foundation.
   6 */
   7#include <stdio.h>
   8#include <unistd.h>
   9#include <errno.h>
  10#include <string.h>
  11#include <assert.h>
  12#include <stdlib.h>
  13#include <time.h>
  14
  15#include <linux/types.h>
  16typedef __u16 __sum16;
  17#include <arpa/inet.h>
  18#include <linux/if_ether.h>
  19#include <linux/if_packet.h>
  20#include <linux/ip.h>
  21#include <linux/ipv6.h>
  22#include <linux/tcp.h>
  23#include <linux/filter.h>
  24#include <linux/perf_event.h>
  25#include <linux/unistd.h>
  26
  27#include <sys/ioctl.h>
  28#include <sys/wait.h>
  29#include <sys/types.h>
  30#include <fcntl.h>
  31
  32#include <linux/bpf.h>
  33#include <linux/err.h>
  34#include <bpf/bpf.h>
  35#include <bpf/libbpf.h>
  36
  37#include "test_iptunnel_common.h"
  38#include "bpf_util.h"
  39#include "bpf_endian.h"
  40#include "bpf_rlimit.h"
  41
  42static int error_cnt, pass_cnt;
  43
  44#define MAGIC_BYTES 123
  45
  46/* ipv4 test vector */
  47static struct {
  48	struct ethhdr eth;
  49	struct iphdr iph;
  50	struct tcphdr tcp;
  51} __packed pkt_v4 = {
  52	.eth.h_proto = bpf_htons(ETH_P_IP),
  53	.iph.ihl = 5,
  54	.iph.protocol = 6,
  55	.iph.tot_len = bpf_htons(MAGIC_BYTES),
  56	.tcp.urg_ptr = 123,
  57};
  58
  59/* ipv6 test vector */
  60static struct {
  61	struct ethhdr eth;
  62	struct ipv6hdr iph;
  63	struct tcphdr tcp;
  64} __packed pkt_v6 = {
  65	.eth.h_proto = bpf_htons(ETH_P_IPV6),
  66	.iph.nexthdr = 6,
  67	.iph.payload_len = bpf_htons(MAGIC_BYTES),
  68	.tcp.urg_ptr = 123,
  69};
  70
  71#define CHECK(condition, tag, format...) ({				\
  72	int __ret = !!(condition);					\
  73	if (__ret) {							\
  74		error_cnt++;						\
  75		printf("%s:FAIL:%s ", __func__, tag);			\
  76		printf(format);						\
  77	} else {							\
  78		pass_cnt++;						\
  79		printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
  80	}								\
  81	__ret;								\
  82})
  83
  84static int bpf_find_map(const char *test, struct bpf_object *obj,
  85			const char *name)
  86{
  87	struct bpf_map *map;
  88
  89	map = bpf_object__find_map_by_name(obj, name);
  90	if (!map) {
  91		printf("%s:FAIL:map '%s' not found\n", test, name);
  92		error_cnt++;
  93		return -1;
  94	}
  95	return bpf_map__fd(map);
  96}
  97
  98static void test_pkt_access(void)
  99{
 100	const char *file = "./test_pkt_access.o";
 101	struct bpf_object *obj;
 102	__u32 duration, retval;
 103	int err, prog_fd;
 104
 105	err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
 106	if (err) {
 107		error_cnt++;
 108		return;
 109	}
 110
 111	err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
 112				NULL, NULL, &retval, &duration);
 113	CHECK(err || errno || retval, "ipv4",
 114	      "err %d errno %d retval %d duration %d\n",
 115	      err, errno, retval, duration);
 116
 117	err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
 118				NULL, NULL, &retval, &duration);
 119	CHECK(err || errno || retval, "ipv6",
 120	      "err %d errno %d retval %d duration %d\n",
 121	      err, errno, retval, duration);
 122	bpf_object__close(obj);
 123}
 124
 125static void test_xdp(void)
 126{
 127	struct vip key4 = {.protocol = 6, .family = AF_INET};
 128	struct vip key6 = {.protocol = 6, .family = AF_INET6};
 129	struct iptnl_info value4 = {.family = AF_INET};
 130	struct iptnl_info value6 = {.family = AF_INET6};
 131	const char *file = "./test_xdp.o";
 132	struct bpf_object *obj;
 133	char buf[128];
 134	struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
 135	struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
 136	__u32 duration, retval, size;
 137	int err, prog_fd, map_fd;
 138
 139	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
 140	if (err) {
 141		error_cnt++;
 142		return;
 143	}
 144
 145	map_fd = bpf_find_map(__func__, obj, "vip2tnl");
 146	if (map_fd < 0)
 147		goto out;
 148	bpf_map_update_elem(map_fd, &key4, &value4, 0);
 149	bpf_map_update_elem(map_fd, &key6, &value6, 0);
 150
 151	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
 152				buf, &size, &retval, &duration);
 153
 154	CHECK(err || errno || retval != XDP_TX || size != 74 ||
 155	      iph->protocol != IPPROTO_IPIP, "ipv4",
 156	      "err %d errno %d retval %d size %d\n",
 157	      err, errno, retval, size);
 158
 159	err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
 160				buf, &size, &retval, &duration);
 161	CHECK(err || errno || retval != XDP_TX || size != 114 ||
 162	      iph6->nexthdr != IPPROTO_IPV6, "ipv6",
 163	      "err %d errno %d retval %d size %d\n",
 164	      err, errno, retval, size);
 165out:
 166	bpf_object__close(obj);
 167}
 168
 169#define MAGIC_VAL 0x1234
 170#define NUM_ITER 100000
 171#define VIP_NUM 5
 172
 173static void test_l4lb(const char *file)
 174{
 175	unsigned int nr_cpus = bpf_num_possible_cpus();
 176	struct vip key = {.protocol = 6};
 177	struct vip_meta {
 178		__u32 flags;
 179		__u32 vip_num;
 180	} value = {.vip_num = VIP_NUM};
 181	__u32 stats_key = VIP_NUM;
 182	struct vip_stats {
 183		__u64 bytes;
 184		__u64 pkts;
 185	} stats[nr_cpus];
 186	struct real_definition {
 187		union {
 188			__be32 dst;
 189			__be32 dstv6[4];
 190		};
 191		__u8 flags;
 192	} real_def = {.dst = MAGIC_VAL};
 193	__u32 ch_key = 11, real_num = 3;
 194	__u32 duration, retval, size;
 195	int err, i, prog_fd, map_fd;
 196	__u64 bytes = 0, pkts = 0;
 197	struct bpf_object *obj;
 198	char buf[128];
 199	u32 *magic = (u32 *)buf;
 200
 201	err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
 202	if (err) {
 203		error_cnt++;
 204		return;
 205	}
 206
 207	map_fd = bpf_find_map(__func__, obj, "vip_map");
 208	if (map_fd < 0)
 209		goto out;
 210	bpf_map_update_elem(map_fd, &key, &value, 0);
 211
 212	map_fd = bpf_find_map(__func__, obj, "ch_rings");
 213	if (map_fd < 0)
 214		goto out;
 215	bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
 216
 217	map_fd = bpf_find_map(__func__, obj, "reals");
 218	if (map_fd < 0)
 219		goto out;
 220	bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
 221
 222	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
 223				buf, &size, &retval, &duration);
 224	CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
 225	      *magic != MAGIC_VAL, "ipv4",
 226	      "err %d errno %d retval %d size %d magic %x\n",
 227	      err, errno, retval, size, *magic);
 228
 229	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
 230				buf, &size, &retval, &duration);
 231	CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
 232	      *magic != MAGIC_VAL, "ipv6",
 233	      "err %d errno %d retval %d size %d magic %x\n",
 234	      err, errno, retval, size, *magic);
 235
 236	map_fd = bpf_find_map(__func__, obj, "stats");
 237	if (map_fd < 0)
 238		goto out;
 239	bpf_map_lookup_elem(map_fd, &stats_key, stats);
 240	for (i = 0; i < nr_cpus; i++) {
 241		bytes += stats[i].bytes;
 242		pkts += stats[i].pkts;
 243	}
 244	if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
 245		error_cnt++;
 246		printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
 247	}
 248out:
 249	bpf_object__close(obj);
 250}
 251
 252static void test_l4lb_all(void)
 253{
 254	const char *file1 = "./test_l4lb.o";
 255	const char *file2 = "./test_l4lb_noinline.o";
 256
 257	test_l4lb(file1);
 258	test_l4lb(file2);
 259}
 260
 261static void test_xdp_noinline(void)
 262{
 263	const char *file = "./test_xdp_noinline.o";
 264	unsigned int nr_cpus = bpf_num_possible_cpus();
 265	struct vip key = {.protocol = 6};
 266	struct vip_meta {
 267		__u32 flags;
 268		__u32 vip_num;
 269	} value = {.vip_num = VIP_NUM};
 270	__u32 stats_key = VIP_NUM;
 271	struct vip_stats {
 272		__u64 bytes;
 273		__u64 pkts;
 274	} stats[nr_cpus];
 275	struct real_definition {
 276		union {
 277			__be32 dst;
 278			__be32 dstv6[4];
 279		};
 280		__u8 flags;
 281	} real_def = {.dst = MAGIC_VAL};
 282	__u32 ch_key = 11, real_num = 3;
 283	__u32 duration, retval, size;
 284	int err, i, prog_fd, map_fd;
 285	__u64 bytes = 0, pkts = 0;
 286	struct bpf_object *obj;
 287	char buf[128];
 288	u32 *magic = (u32 *)buf;
 289
 290	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
 291	if (err) {
 292		error_cnt++;
 293		return;
 294	}
 295
 296	map_fd = bpf_find_map(__func__, obj, "vip_map");
 297	if (map_fd < 0)
 298		goto out;
 299	bpf_map_update_elem(map_fd, &key, &value, 0);
 300
 301	map_fd = bpf_find_map(__func__, obj, "ch_rings");
 302	if (map_fd < 0)
 303		goto out;
 304	bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
 305
 306	map_fd = bpf_find_map(__func__, obj, "reals");
 307	if (map_fd < 0)
 308		goto out;
 309	bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
 310
 311	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
 312				buf, &size, &retval, &duration);
 313	CHECK(err || errno || retval != 1 || size != 54 ||
 314	      *magic != MAGIC_VAL, "ipv4",
 315	      "err %d errno %d retval %d size %d magic %x\n",
 316	      err, errno, retval, size, *magic);
 317
 318	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
 319				buf, &size, &retval, &duration);
 320	CHECK(err || errno || retval != 1 || size != 74 ||
 321	      *magic != MAGIC_VAL, "ipv6",
 322	      "err %d errno %d retval %d size %d magic %x\n",
 323	      err, errno, retval, size, *magic);
 324
 325	map_fd = bpf_find_map(__func__, obj, "stats");
 326	if (map_fd < 0)
 327		goto out;
 328	bpf_map_lookup_elem(map_fd, &stats_key, stats);
 329	for (i = 0; i < nr_cpus; i++) {
 330		bytes += stats[i].bytes;
 331		pkts += stats[i].pkts;
 332	}
 333	if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
 334		error_cnt++;
 335		printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
 336	}
 337out:
 338	bpf_object__close(obj);
 339}
 340
 341static void test_tcp_estats(void)
 342{
 343	const char *file = "./test_tcp_estats.o";
 344	int err, prog_fd;
 345	struct bpf_object *obj;
 346	__u32 duration = 0;
 347
 348	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
 349	CHECK(err, "", "err %d errno %d\n", err, errno);
 350	if (err) {
 351		error_cnt++;
 352		return;
 353	}
 354
 355	bpf_object__close(obj);
 356}
 357
 358static inline __u64 ptr_to_u64(const void *ptr)
 359{
 360	return (__u64) (unsigned long) ptr;
 361}
 362
 363static void test_bpf_obj_id(void)
 364{
 365	const __u64 array_magic_value = 0xfaceb00c;
 366	const __u32 array_key = 0;
 367	const int nr_iters = 2;
 368	const char *file = "./test_obj_id.o";
 369	const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
 370	const char *expected_prog_name = "test_obj_id";
 371	const char *expected_map_name = "test_map_id";
 372	const __u64 nsec_per_sec = 1000000000;
 373
 374	struct bpf_object *objs[nr_iters];
 375	int prog_fds[nr_iters], map_fds[nr_iters];
 376	/* +1 to test for the info_len returned by kernel */
 377	struct bpf_prog_info prog_infos[nr_iters + 1];
 378	struct bpf_map_info map_infos[nr_iters + 1];
 379	/* Each prog only uses one map. +1 to test nr_map_ids
 380	 * returned by kernel.
 381	 */
 382	__u32 map_ids[nr_iters + 1];
 383	char jited_insns[128], xlated_insns[128], zeros[128];
 384	__u32 i, next_id, info_len, nr_id_found, duration = 0;
 385	struct timespec real_time_ts, boot_time_ts;
 386	int sysctl_fd, jit_enabled = 0, err = 0;
 387	__u64 array_value;
 388	uid_t my_uid = getuid();
 389	time_t now, load_time;
 390
 391	sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
 392	if (sysctl_fd != -1) {
 393		char tmpc;
 394
 395		if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
 396			jit_enabled = (tmpc != '0');
 397		close(sysctl_fd);
 398	}
 399
 400	err = bpf_prog_get_fd_by_id(0);
 401	CHECK(err >= 0 || errno != ENOENT,
 402	      "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
 403
 404	err = bpf_map_get_fd_by_id(0);
 405	CHECK(err >= 0 || errno != ENOENT,
 406	      "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
 407
 408	for (i = 0; i < nr_iters; i++)
 409		objs[i] = NULL;
 410
 411	/* Check bpf_obj_get_info_by_fd() */
 412	bzero(zeros, sizeof(zeros));
 413	for (i = 0; i < nr_iters; i++) {
 414		now = time(NULL);
 415		err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
 416				    &objs[i], &prog_fds[i]);
 417		/* test_obj_id.o is a dumb prog. It should never fail
 418		 * to load.
 419		 */
 420		if (err)
 421			error_cnt++;
 422		assert(!err);
 423
 424		/* Insert a magic value to the map */
 425		map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
 426		assert(map_fds[i] >= 0);
 427		err = bpf_map_update_elem(map_fds[i], &array_key,
 428					  &array_magic_value, 0);
 429		assert(!err);
 430
 431		/* Check getting map info */
 432		info_len = sizeof(struct bpf_map_info) * 2;
 433		bzero(&map_infos[i], info_len);
 434		err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
 435					     &info_len);
 436		if (CHECK(err ||
 437			  map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
 438			  map_infos[i].key_size != sizeof(__u32) ||
 439			  map_infos[i].value_size != sizeof(__u64) ||
 440			  map_infos[i].max_entries != 1 ||
 441			  map_infos[i].map_flags != 0 ||
 442			  info_len != sizeof(struct bpf_map_info) ||
 443			  strcmp((char *)map_infos[i].name, expected_map_name),
 444			  "get-map-info(fd)",
 445			  "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
 446			  err, errno,
 447			  map_infos[i].type, BPF_MAP_TYPE_ARRAY,
 448			  info_len, sizeof(struct bpf_map_info),
 449			  map_infos[i].key_size,
 450			  map_infos[i].value_size,
 451			  map_infos[i].max_entries,
 452			  map_infos[i].map_flags,
 453			  map_infos[i].name, expected_map_name))
 454			goto done;
 455
 456		/* Check getting prog info */
 457		info_len = sizeof(struct bpf_prog_info) * 2;
 458		bzero(&prog_infos[i], info_len);
 459		bzero(jited_insns, sizeof(jited_insns));
 460		bzero(xlated_insns, sizeof(xlated_insns));
 461		prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
 462		prog_infos[i].jited_prog_len = sizeof(jited_insns);
 463		prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
 464		prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
 465		prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
 466		prog_infos[i].nr_map_ids = 2;
 467		err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
 468		assert(!err);
 469		err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
 470		assert(!err);
 471		err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
 472					     &info_len);
 473		load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
 474			+ (prog_infos[i].load_time / nsec_per_sec);
 475		if (CHECK(err ||
 476			  prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
 477			  info_len != sizeof(struct bpf_prog_info) ||
 478			  (jit_enabled && !prog_infos[i].jited_prog_len) ||
 479			  (jit_enabled &&
 480			   !memcmp(jited_insns, zeros, sizeof(zeros))) ||
 481			  !prog_infos[i].xlated_prog_len ||
 482			  !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
 483			  load_time < now - 60 || load_time > now + 60 ||
 484			  prog_infos[i].created_by_uid != my_uid ||
 485			  prog_infos[i].nr_map_ids != 1 ||
 486			  *(int *)prog_infos[i].map_ids != map_infos[i].id ||
 487			  strcmp((char *)prog_infos[i].name, expected_prog_name),
 488			  "get-prog-info(fd)",
 489			  "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
 490			  err, errno, i,
 491			  prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
 492			  info_len, sizeof(struct bpf_prog_info),
 493			  jit_enabled,
 494			  prog_infos[i].jited_prog_len,
 495			  prog_infos[i].xlated_prog_len,
 496			  !!memcmp(jited_insns, zeros, sizeof(zeros)),
 497			  !!memcmp(xlated_insns, zeros, sizeof(zeros)),
 498			  load_time, now,
 499			  prog_infos[i].created_by_uid, my_uid,
 500			  prog_infos[i].nr_map_ids, 1,
 501			  *(int *)prog_infos[i].map_ids, map_infos[i].id,
 502			  prog_infos[i].name, expected_prog_name))
 503			goto done;
 504	}
 505
 506	/* Check bpf_prog_get_next_id() */
 507	nr_id_found = 0;
 508	next_id = 0;
 509	while (!bpf_prog_get_next_id(next_id, &next_id)) {
 510		struct bpf_prog_info prog_info = {};
 511		__u32 saved_map_id;
 512		int prog_fd;
 513
 514		info_len = sizeof(prog_info);
 515
 516		prog_fd = bpf_prog_get_fd_by_id(next_id);
 517		if (prog_fd < 0 && errno == ENOENT)
 518			/* The bpf_prog is in the dead row */
 519			continue;
 520		if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
 521			  "prog_fd %d next_id %d errno %d\n",
 522			  prog_fd, next_id, errno))
 523			break;
 524
 525		for (i = 0; i < nr_iters; i++)
 526			if (prog_infos[i].id == next_id)
 527				break;
 528
 529		if (i == nr_iters)
 530			continue;
 531
 532		nr_id_found++;
 533
 534		/* Negative test:
 535		 * prog_info.nr_map_ids = 1
 536		 * prog_info.map_ids = NULL
 537		 */
 538		prog_info.nr_map_ids = 1;
 539		err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
 540		if (CHECK(!err || errno != EFAULT,
 541			  "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
 542			  err, errno, EFAULT))
 543			break;
 544		bzero(&prog_info, sizeof(prog_info));
 545		info_len = sizeof(prog_info);
 546
 547		saved_map_id = *(int *)(prog_infos[i].map_ids);
 548		prog_info.map_ids = prog_infos[i].map_ids;
 549		prog_info.nr_map_ids = 2;
 550		err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
 551		prog_infos[i].jited_prog_insns = 0;
 552		prog_infos[i].xlated_prog_insns = 0;
 553		CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
 554		      memcmp(&prog_info, &prog_infos[i], info_len) ||
 555		      *(int *)prog_info.map_ids != saved_map_id,
 556		      "get-prog-info(next_id->fd)",
 557		      "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
 558		      err, errno, info_len, sizeof(struct bpf_prog_info),
 559		      memcmp(&prog_info, &prog_infos[i], info_len),
 560		      *(int *)prog_info.map_ids, saved_map_id);
 561		close(prog_fd);
 562	}
 563	CHECK(nr_id_found != nr_iters,
 564	      "check total prog id found by get_next_id",
 565	      "nr_id_found %u(%u)\n",
 566	      nr_id_found, nr_iters);
 567
 568	/* Check bpf_map_get_next_id() */
 569	nr_id_found = 0;
 570	next_id = 0;
 571	while (!bpf_map_get_next_id(next_id, &next_id)) {
 572		struct bpf_map_info map_info = {};
 573		int map_fd;
 574
 575		info_len = sizeof(map_info);
 576
 577		map_fd = bpf_map_get_fd_by_id(next_id);
 578		if (map_fd < 0 && errno == ENOENT)
 579			/* The bpf_map is in the dead row */
 580			continue;
 581		if (CHECK(map_fd < 0, "get-map-fd(next_id)",
 582			  "map_fd %d next_id %u errno %d\n",
 583			  map_fd, next_id, errno))
 584			break;
 585
 586		for (i = 0; i < nr_iters; i++)
 587			if (map_infos[i].id == next_id)
 588				break;
 589
 590		if (i == nr_iters)
 591			continue;
 592
 593		nr_id_found++;
 594
 595		err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
 596		assert(!err);
 597
 598		err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
 599		CHECK(err || info_len != sizeof(struct bpf_map_info) ||
 600		      memcmp(&map_info, &map_infos[i], info_len) ||
 601		      array_value != array_magic_value,
 602		      "check get-map-info(next_id->fd)",
 603		      "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
 604		      err, errno, info_len, sizeof(struct bpf_map_info),
 605		      memcmp(&map_info, &map_infos[i], info_len),
 606		      array_value, array_magic_value);
 607
 608		close(map_fd);
 609	}
 610	CHECK(nr_id_found != nr_iters,
 611	      "check total map id found by get_next_id",
 612	      "nr_id_found %u(%u)\n",
 613	      nr_id_found, nr_iters);
 614
 615done:
 616	for (i = 0; i < nr_iters; i++)
 617		bpf_object__close(objs[i]);
 618}
 619
 620static void test_pkt_md_access(void)
 621{
 622	const char *file = "./test_pkt_md_access.o";
 623	struct bpf_object *obj;
 624	__u32 duration, retval;
 625	int err, prog_fd;
 626
 627	err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
 628	if (err) {
 629		error_cnt++;
 630		return;
 631	}
 632
 633	err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
 634				NULL, NULL, &retval, &duration);
 635	CHECK(err || retval, "",
 636	      "err %d errno %d retval %d duration %d\n",
 637	      err, errno, retval, duration);
 638
 639	bpf_object__close(obj);
 640}
 641
 642static void test_obj_name(void)
 643{
 644	struct {
 645		const char *name;
 646		int success;
 647		int expected_errno;
 648	} tests[] = {
 649		{ "", 1, 0 },
 650		{ "_123456789ABCDE", 1, 0 },
 651		{ "_123456789ABCDEF", 0, EINVAL },
 652		{ "_123456789ABCD\n", 0, EINVAL },
 653	};
 654	struct bpf_insn prog[] = {
 655		BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
 656		BPF_EXIT_INSN(),
 657	};
 658	__u32 duration = 0;
 659	int i;
 660
 661	for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
 662		size_t name_len = strlen(tests[i].name) + 1;
 663		union bpf_attr attr;
 664		size_t ncopy;
 665		int fd;
 666
 667		/* test different attr.prog_name during BPF_PROG_LOAD */
 668		ncopy = name_len < sizeof(attr.prog_name) ?
 669			name_len : sizeof(attr.prog_name);
 670		bzero(&attr, sizeof(attr));
 671		attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
 672		attr.insn_cnt = 2;
 673		attr.insns = ptr_to_u64(prog);
 674		attr.license = ptr_to_u64("");
 675		memcpy(attr.prog_name, tests[i].name, ncopy);
 676
 677		fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
 678		CHECK((tests[i].success && fd < 0) ||
 679		      (!tests[i].success && fd != -1) ||
 680		      (!tests[i].success && errno != tests[i].expected_errno),
 681		      "check-bpf-prog-name",
 682		      "fd %d(%d) errno %d(%d)\n",
 683		       fd, tests[i].success, errno, tests[i].expected_errno);
 684
 685		if (fd != -1)
 686			close(fd);
 687
 688		/* test different attr.map_name during BPF_MAP_CREATE */
 689		ncopy = name_len < sizeof(attr.map_name) ?
 690			name_len : sizeof(attr.map_name);
 691		bzero(&attr, sizeof(attr));
 692		attr.map_type = BPF_MAP_TYPE_ARRAY;
 693		attr.key_size = 4;
 694		attr.value_size = 4;
 695		attr.max_entries = 1;
 696		attr.map_flags = 0;
 697		memcpy(attr.map_name, tests[i].name, ncopy);
 698		fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
 699		CHECK((tests[i].success && fd < 0) ||
 700		      (!tests[i].success && fd != -1) ||
 701		      (!tests[i].success && errno != tests[i].expected_errno),
 702		      "check-bpf-map-name",
 703		      "fd %d(%d) errno %d(%d)\n",
 704		      fd, tests[i].success, errno, tests[i].expected_errno);
 705
 706		if (fd != -1)
 707			close(fd);
 708	}
 709}
 710
 711static void test_tp_attach_query(void)
 712{
 713	const int num_progs = 3;
 714	int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
 715	__u32 duration = 0, info_len, saved_prog_ids[num_progs];
 716	const char *file = "./test_tracepoint.o";
 717	struct perf_event_query_bpf *query;
 718	struct perf_event_attr attr = {};
 719	struct bpf_object *obj[num_progs];
 720	struct bpf_prog_info prog_info;
 721	char buf[256];
 722
 723	snprintf(buf, sizeof(buf),
 724		 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
 725	efd = open(buf, O_RDONLY, 0);
 726	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
 727		return;
 728	bytes = read(efd, buf, sizeof(buf));
 729	close(efd);
 730	if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
 731		  "read", "bytes %d errno %d\n", bytes, errno))
 732		return;
 733
 734	attr.config = strtol(buf, NULL, 0);
 735	attr.type = PERF_TYPE_TRACEPOINT;
 736	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
 737	attr.sample_period = 1;
 738	attr.wakeup_events = 1;
 739
 740	query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
 741	for (i = 0; i < num_progs; i++) {
 742		err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
 743				    &prog_fd[i]);
 744		if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
 745			goto cleanup1;
 746
 747		bzero(&prog_info, sizeof(prog_info));
 748		prog_info.jited_prog_len = 0;
 749		prog_info.xlated_prog_len = 0;
 750		prog_info.nr_map_ids = 0;
 751		info_len = sizeof(prog_info);
 752		err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
 753		if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
 754			  err, errno))
 755			goto cleanup1;
 756		saved_prog_ids[i] = prog_info.id;
 757
 758		pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
 759				    0 /* cpu 0 */, -1 /* group id */,
 760				    0 /* flags */);
 761		if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
 762			  pmu_fd[i], errno))
 763			goto cleanup2;
 764		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
 765		if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
 766			  err, errno))
 767			goto cleanup3;
 768
 769		if (i == 0) {
 770			/* check NULL prog array query */
 771			query->ids_len = num_progs;
 772			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
 773			if (CHECK(err || query->prog_cnt != 0,
 774				  "perf_event_ioc_query_bpf",
 775				  "err %d errno %d query->prog_cnt %u\n",
 776				  err, errno, query->prog_cnt))
 777				goto cleanup3;
 778		}
 779
 780		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
 781		if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
 782			  err, errno))
 783			goto cleanup3;
 784
 785		if (i == 1) {
 786			/* try to get # of programs only */
 787			query->ids_len = 0;
 788			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
 789			if (CHECK(err || query->prog_cnt != 2,
 790				  "perf_event_ioc_query_bpf",
 791				  "err %d errno %d query->prog_cnt %u\n",
 792				  err, errno, query->prog_cnt))
 793				goto cleanup3;
 794
 795			/* try a few negative tests */
 796			/* invalid query pointer */
 797			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
 798				    (struct perf_event_query_bpf *)0x1);
 799			if (CHECK(!err || errno != EFAULT,
 800				  "perf_event_ioc_query_bpf",
 801				  "err %d errno %d\n", err, errno))
 802				goto cleanup3;
 803
 804			/* no enough space */
 805			query->ids_len = 1;
 806			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
 807			if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
 808				  "perf_event_ioc_query_bpf",
 809				  "err %d errno %d query->prog_cnt %u\n",
 810				  err, errno, query->prog_cnt))
 811				goto cleanup3;
 812		}
 813
 814		query->ids_len = num_progs;
 815		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
 816		if (CHECK(err || query->prog_cnt != (i + 1),
 817			  "perf_event_ioc_query_bpf",
 818			  "err %d errno %d query->prog_cnt %u\n",
 819			  err, errno, query->prog_cnt))
 820			goto cleanup3;
 821		for (j = 0; j < i + 1; j++)
 822			if (CHECK(saved_prog_ids[j] != query->ids[j],
 823				  "perf_event_ioc_query_bpf",
 824				  "#%d saved_prog_id %x query prog_id %x\n",
 825				  j, saved_prog_ids[j], query->ids[j]))
 826				goto cleanup3;
 827	}
 828
 829	i = num_progs - 1;
 830	for (; i >= 0; i--) {
 831 cleanup3:
 832		ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
 833 cleanup2:
 834		close(pmu_fd[i]);
 835 cleanup1:
 836		bpf_object__close(obj[i]);
 837	}
 838	free(query);
 839}
 840
 841static int compare_map_keys(int map1_fd, int map2_fd)
 842{
 843	__u32 key, next_key;
 844	char val_buf[PERF_MAX_STACK_DEPTH *
 845		     sizeof(struct bpf_stack_build_id)];
 846	int err;
 847
 848	err = bpf_map_get_next_key(map1_fd, NULL, &key);
 849	if (err)
 850		return err;
 851	err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
 852	if (err)
 853		return err;
 854
 855	while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
 856		err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
 857		if (err)
 858			return err;
 859
 860		key = next_key;
 861	}
 862	if (errno != ENOENT)
 863		return -1;
 864
 865	return 0;
 866}
 867
 868static void test_stacktrace_map()
 869{
 870	int control_map_fd, stackid_hmap_fd, stackmap_fd;
 871	const char *file = "./test_stacktrace_map.o";
 872	int bytes, efd, err, pmu_fd, prog_fd;
 873	struct perf_event_attr attr = {};
 874	__u32 key, val, duration = 0;
 875	struct bpf_object *obj;
 876	char buf[256];
 877
 878	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
 879	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
 880		return;
 881
 882	/* Get the ID for the sched/sched_switch tracepoint */
 883	snprintf(buf, sizeof(buf),
 884		 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
 885	efd = open(buf, O_RDONLY, 0);
 886	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
 887		goto close_prog;
 888
 889	bytes = read(efd, buf, sizeof(buf));
 890	close(efd);
 891	if (bytes <= 0 || bytes >= sizeof(buf))
 892		goto close_prog;
 893
 894	/* Open the perf event and attach bpf progrram */
 895	attr.config = strtol(buf, NULL, 0);
 896	attr.type = PERF_TYPE_TRACEPOINT;
 897	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
 898	attr.sample_period = 1;
 899	attr.wakeup_events = 1;
 900	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
 901			 0 /* cpu 0 */, -1 /* group id */,
 902			 0 /* flags */);
 903	if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
 904		  pmu_fd, errno))
 905		goto close_prog;
 906
 907	err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
 908	if (err)
 909		goto disable_pmu;
 910
 911	err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
 912	if (err)
 913		goto disable_pmu;
 914
 915	/* find map fds */
 916	control_map_fd = bpf_find_map(__func__, obj, "control_map");
 917	if (control_map_fd < 0)
 918		goto disable_pmu;
 919
 920	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
 921	if (stackid_hmap_fd < 0)
 922		goto disable_pmu;
 923
 924	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
 925	if (stackmap_fd < 0)
 926		goto disable_pmu;
 927
 928	/* give some time for bpf program run */
 929	sleep(1);
 930
 931	/* disable stack trace collection */
 932	key = 0;
 933	val = 1;
 934	bpf_map_update_elem(control_map_fd, &key, &val, 0);
 935
 936	/* for every element in stackid_hmap, we can find a corresponding one
 937	 * in stackmap, and vise versa.
 938	 */
 939	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
 940	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
 941		  "err %d errno %d\n", err, errno))
 942		goto disable_pmu_noerr;
 943
 944	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
 945	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
 946		  "err %d errno %d\n", err, errno))
 947		goto disable_pmu_noerr;
 948
 949	goto disable_pmu_noerr;
 950disable_pmu:
 951	error_cnt++;
 952disable_pmu_noerr:
 953	ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
 954	close(pmu_fd);
 955close_prog:
 956	bpf_object__close(obj);
 957}
 958
 959static void test_stacktrace_map_raw_tp()
 960{
 961	int control_map_fd, stackid_hmap_fd, stackmap_fd;
 962	const char *file = "./test_stacktrace_map.o";
 963	int efd, err, prog_fd;
 964	__u32 key, val, duration = 0;
 965	struct bpf_object *obj;
 966
 967	err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
 968	if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
 969		return;
 970
 971	efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
 972	if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
 973		goto close_prog;
 974
 975	/* find map fds */
 976	control_map_fd = bpf_find_map(__func__, obj, "control_map");
 977	if (control_map_fd < 0)
 978		goto close_prog;
 979
 980	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
 981	if (stackid_hmap_fd < 0)
 982		goto close_prog;
 983
 984	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
 985	if (stackmap_fd < 0)
 986		goto close_prog;
 987
 988	/* give some time for bpf program run */
 989	sleep(1);
 990
 991	/* disable stack trace collection */
 992	key = 0;
 993	val = 1;
 994	bpf_map_update_elem(control_map_fd, &key, &val, 0);
 995
 996	/* for every element in stackid_hmap, we can find a corresponding one
 997	 * in stackmap, and vise versa.
 998	 */
 999	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1000	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1001		  "err %d errno %d\n", err, errno))
1002		goto close_prog;
1003
1004	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1005	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1006		  "err %d errno %d\n", err, errno))
1007		goto close_prog;
1008
1009	goto close_prog_noerr;
1010close_prog:
1011	error_cnt++;
1012close_prog_noerr:
1013	bpf_object__close(obj);
1014}
1015
1016static int extract_build_id(char *build_id, size_t size)
1017{
1018	FILE *fp;
1019	char *line = NULL;
1020	size_t len = 0;
1021
1022	fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
1023	if (fp == NULL)
1024		return -1;
1025
1026	if (getline(&line, &len, fp) == -1)
1027		goto err;
1028	fclose(fp);
1029
1030	if (len > size)
1031		len = size;
1032	memcpy(build_id, line, len);
1033	build_id[len] = '\0';
1034	return 0;
1035err:
1036	fclose(fp);
1037	return -1;
1038}
1039
1040static void test_stacktrace_build_id(void)
1041{
1042	int control_map_fd, stackid_hmap_fd, stackmap_fd;
1043	const char *file = "./test_stacktrace_build_id.o";
1044	int bytes, efd, err, pmu_fd, prog_fd;
1045	struct perf_event_attr attr = {};
1046	__u32 key, previous_key, val, duration = 0;
1047	struct bpf_object *obj;
1048	char buf[256];
1049	int i, j;
1050	struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1051	int build_id_matches = 0;
1052
1053	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1054	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1055		goto out;
1056
1057	/* Get the ID for the sched/sched_switch tracepoint */
1058	snprintf(buf, sizeof(buf),
1059		 "/sys/kernel/debug/tracing/events/random/urandom_read/id");
1060	efd = open(buf, O_RDONLY, 0);
1061	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1062		goto close_prog;
1063
1064	bytes = read(efd, buf, sizeof(buf));
1065	close(efd);
1066	if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
1067		  "read", "bytes %d errno %d\n", bytes, errno))
1068		goto close_prog;
1069
1070	/* Open the perf event and attach bpf progrram */
1071	attr.config = strtol(buf, NULL, 0);
1072	attr.type = PERF_TYPE_TRACEPOINT;
1073	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
1074	attr.sample_period = 1;
1075	attr.wakeup_events = 1;
1076	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1077			 0 /* cpu 0 */, -1 /* group id */,
1078			 0 /* flags */);
1079	if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
1080		  pmu_fd, errno))
1081		goto close_prog;
1082
1083	err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1084	if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1085		  err, errno))
1086		goto close_pmu;
1087
1088	err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1089	if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1090		  err, errno))
1091		goto disable_pmu;
1092
1093	/* find map fds */
1094	control_map_fd = bpf_find_map(__func__, obj, "control_map");
1095	if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1096		  "err %d errno %d\n", err, errno))
1097		goto disable_pmu;
1098
1099	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1100	if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1101		  "err %d errno %d\n", err, errno))
1102		goto disable_pmu;
1103
1104	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1105	if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1106		  err, errno))
1107		goto disable_pmu;
1108
1109	assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1110	       == 0);
1111	assert(system("./urandom_read") == 0);
1112	/* disable stack trace collection */
1113	key = 0;
1114	val = 1;
1115	bpf_map_update_elem(control_map_fd, &key, &val, 0);
1116
1117	/* for every element in stackid_hmap, we can find a corresponding one
1118	 * in stackmap, and vise versa.
1119	 */
1120	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1121	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1122		  "err %d errno %d\n", err, errno))
1123		goto disable_pmu;
1124
1125	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1126	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1127		  "err %d errno %d\n", err, errno))
1128		goto disable_pmu;
1129
1130	err = extract_build_id(buf, 256);
1131
1132	if (CHECK(err, "get build_id with readelf",
1133		  "err %d errno %d\n", err, errno))
1134		goto disable_pmu;
1135
1136	err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1137	if (CHECK(err, "get_next_key from stackmap",
1138		  "err %d, errno %d\n", err, errno))
1139		goto disable_pmu;
1140
1141	do {
1142		char build_id[64];
1143
1144		err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1145		if (CHECK(err, "lookup_elem from stackmap",
1146			  "err %d, errno %d\n", err, errno))
1147			goto disable_pmu;
1148		for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1149			if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1150			    id_offs[i].offset != 0) {
1151				for (j = 0; j < 20; ++j)
1152					sprintf(build_id + 2 * j, "%02x",
1153						id_offs[i].build_id[j] & 0xff);
1154				if (strstr(buf, build_id) != NULL)
1155					build_id_matches = 1;
1156			}
1157		previous_key = key;
1158	} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1159
1160	CHECK(build_id_matches < 1, "build id match",
1161	      "Didn't find expected build ID from the map\n");
1162
1163disable_pmu:
1164	ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1165
1166close_pmu:
1167	close(pmu_fd);
1168
1169close_prog:
1170	bpf_object__close(obj);
1171
1172out:
1173	return;
1174}
1175
1176int main(void)
1177{
1178	test_pkt_access();
1179	test_xdp();
1180	test_l4lb_all();
1181	test_xdp_noinline();
1182	test_tcp_estats();
1183	test_bpf_obj_id();
1184	test_pkt_md_access();
1185	test_obj_name();
1186	test_tp_attach_query();
1187	test_stacktrace_map();
1188	test_stacktrace_build_id();
1189	test_stacktrace_map_raw_tp();
1190
1191	printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
1192	return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
1193}