Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
 
   4#define _GNU_SOURCE
 
   5#include <errno.h>
   6#include <fcntl.h>
   7#include <signal.h>
   8#include <stdarg.h>
   9#include <stdio.h>
  10#include <stdlib.h>
  11#include <string.h>
  12#include <time.h>
  13#include <unistd.h>
  14#include <net/if.h>
  15#include <sys/ioctl.h>
  16#include <sys/types.h>
  17#include <sys/stat.h>
  18#include <sys/syscall.h>
  19#include <dirent.h>
  20
  21#include <linux/err.h>
  22#include <linux/perf_event.h>
  23#include <linux/sizes.h>
  24
  25#include <bpf/bpf.h>
  26#include <bpf/btf.h>
 
  27#include <bpf/libbpf.h>
  28#include <bpf/bpf_gen_internal.h>
  29#include <bpf/skel_internal.h>
  30
  31#include "cfg.h"
  32#include "main.h"
  33#include "xlated_dumper.h"
  34
  35#define BPF_METADATA_PREFIX "bpf_metadata_"
  36#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
  37
  38const char * const prog_type_name[] = {
  39	[BPF_PROG_TYPE_UNSPEC]			= "unspec",
  40	[BPF_PROG_TYPE_SOCKET_FILTER]		= "socket_filter",
  41	[BPF_PROG_TYPE_KPROBE]			= "kprobe",
  42	[BPF_PROG_TYPE_SCHED_CLS]		= "sched_cls",
  43	[BPF_PROG_TYPE_SCHED_ACT]		= "sched_act",
  44	[BPF_PROG_TYPE_TRACEPOINT]		= "tracepoint",
  45	[BPF_PROG_TYPE_XDP]			= "xdp",
  46	[BPF_PROG_TYPE_PERF_EVENT]		= "perf_event",
  47	[BPF_PROG_TYPE_CGROUP_SKB]		= "cgroup_skb",
  48	[BPF_PROG_TYPE_CGROUP_SOCK]		= "cgroup_sock",
  49	[BPF_PROG_TYPE_LWT_IN]			= "lwt_in",
  50	[BPF_PROG_TYPE_LWT_OUT]			= "lwt_out",
  51	[BPF_PROG_TYPE_LWT_XMIT]		= "lwt_xmit",
  52	[BPF_PROG_TYPE_SOCK_OPS]		= "sock_ops",
  53	[BPF_PROG_TYPE_SK_SKB]			= "sk_skb",
  54	[BPF_PROG_TYPE_CGROUP_DEVICE]		= "cgroup_device",
  55	[BPF_PROG_TYPE_SK_MSG]			= "sk_msg",
  56	[BPF_PROG_TYPE_RAW_TRACEPOINT]		= "raw_tracepoint",
  57	[BPF_PROG_TYPE_CGROUP_SOCK_ADDR]	= "cgroup_sock_addr",
  58	[BPF_PROG_TYPE_LWT_SEG6LOCAL]		= "lwt_seg6local",
  59	[BPF_PROG_TYPE_LIRC_MODE2]		= "lirc_mode2",
  60	[BPF_PROG_TYPE_SK_REUSEPORT]		= "sk_reuseport",
  61	[BPF_PROG_TYPE_FLOW_DISSECTOR]		= "flow_dissector",
  62	[BPF_PROG_TYPE_CGROUP_SYSCTL]		= "cgroup_sysctl",
  63	[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE]	= "raw_tracepoint_writable",
  64	[BPF_PROG_TYPE_CGROUP_SOCKOPT]		= "cgroup_sockopt",
  65	[BPF_PROG_TYPE_TRACING]			= "tracing",
  66	[BPF_PROG_TYPE_STRUCT_OPS]		= "struct_ops",
  67	[BPF_PROG_TYPE_EXT]			= "ext",
  68	[BPF_PROG_TYPE_LSM]			= "lsm",
  69	[BPF_PROG_TYPE_SK_LOOKUP]		= "sk_lookup",
  70};
  71
  72const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name);
  73
  74enum dump_mode {
  75	DUMP_JITED,
  76	DUMP_XLATED,
  77};
  78
 
 
 
 
 
 
 
 
 
 
 
 
  79static const char * const attach_type_strings[] = {
  80	[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
  81	[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
  82	[BPF_SK_SKB_VERDICT] = "skb_verdict",
  83	[BPF_SK_MSG_VERDICT] = "msg_verdict",
  84	[BPF_FLOW_DISSECTOR] = "flow_dissector",
  85	[__MAX_BPF_ATTACH_TYPE] = NULL,
  86};
  87
 
 
  88static enum bpf_attach_type parse_attach_type(const char *str)
  89{
  90	enum bpf_attach_type type;
  91
  92	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
 
 
 
 
 
 
 
 
  93		if (attach_type_strings[type] &&
  94		    is_prefix(str, attach_type_strings[type]))
  95			return type;
  96	}
  97
  98	return __MAX_BPF_ATTACH_TYPE;
  99}
 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 101static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
 102{
 103	struct timespec real_time_ts, boot_time_ts;
 104	time_t wallclock_secs;
 105	struct tm load_tm;
 106
 107	buf[--size] = '\0';
 108
 109	if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
 110	    clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
 111		perror("Can't read clocks");
 112		snprintf(buf, size, "%llu", nsecs / 1000000000);
 113		return;
 114	}
 115
 116	wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
 117		(real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
 118		1000000000;
 119
 120
 121	if (!localtime_r(&wallclock_secs, &load_tm)) {
 122		snprintf(buf, size, "%llu", nsecs / 1000000000);
 123		return;
 124	}
 125
 126	if (json_output)
 127		strftime(buf, size, "%s", &load_tm);
 128	else
 129		strftime(buf, size, "%FT%T%z", &load_tm);
 130}
 131
 132static void show_prog_maps(int fd, __u32 num_maps)
 133{
 134	struct bpf_prog_info info = {};
 135	__u32 len = sizeof(info);
 136	__u32 map_ids[num_maps];
 137	unsigned int i;
 138	int err;
 139
 140	info.nr_map_ids = num_maps;
 141	info.map_ids = ptr_to_u64(map_ids);
 142
 143	err = bpf_obj_get_info_by_fd(fd, &info, &len);
 144	if (err || !info.nr_map_ids)
 145		return;
 146
 147	if (json_output) {
 148		jsonw_name(json_wtr, "map_ids");
 149		jsonw_start_array(json_wtr);
 150		for (i = 0; i < info.nr_map_ids; i++)
 151			jsonw_uint(json_wtr, map_ids[i]);
 152		jsonw_end_array(json_wtr);
 153	} else {
 154		printf("  map_ids ");
 155		for (i = 0; i < info.nr_map_ids; i++)
 156			printf("%u%s", map_ids[i],
 157			       i == info.nr_map_ids - 1 ? "" : ",");
 158	}
 159}
 160
 161static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
 162{
 163	struct bpf_prog_info prog_info;
 164	__u32 prog_info_len;
 165	__u32 map_info_len;
 166	void *value = NULL;
 167	__u32 *map_ids;
 168	int nr_maps;
 169	int key = 0;
 170	int map_fd;
 171	int ret;
 172	__u32 i;
 173
 174	memset(&prog_info, 0, sizeof(prog_info));
 175	prog_info_len = sizeof(prog_info);
 176	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
 177	if (ret)
 178		return NULL;
 179
 180	if (!prog_info.nr_map_ids)
 181		return NULL;
 182
 183	map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
 184	if (!map_ids)
 185		return NULL;
 186
 187	nr_maps = prog_info.nr_map_ids;
 188	memset(&prog_info, 0, sizeof(prog_info));
 189	prog_info.nr_map_ids = nr_maps;
 190	prog_info.map_ids = ptr_to_u64(map_ids);
 191	prog_info_len = sizeof(prog_info);
 192
 193	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
 194	if (ret)
 195		goto free_map_ids;
 196
 197	for (i = 0; i < prog_info.nr_map_ids; i++) {
 198		map_fd = bpf_map_get_fd_by_id(map_ids[i]);
 199		if (map_fd < 0)
 200			goto free_map_ids;
 201
 202		memset(map_info, 0, sizeof(*map_info));
 203		map_info_len = sizeof(*map_info);
 204		ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
 205		if (ret < 0) {
 206			close(map_fd);
 207			goto free_map_ids;
 208		}
 209
 210		if (map_info->type != BPF_MAP_TYPE_ARRAY ||
 211		    map_info->key_size != sizeof(int) ||
 212		    map_info->max_entries != 1 ||
 213		    !map_info->btf_value_type_id ||
 214		    !strstr(map_info->name, ".rodata")) {
 215			close(map_fd);
 216			continue;
 217		}
 218
 219		value = malloc(map_info->value_size);
 220		if (!value) {
 221			close(map_fd);
 222			goto free_map_ids;
 223		}
 224
 225		if (bpf_map_lookup_elem(map_fd, &key, value)) {
 226			close(map_fd);
 227			free(value);
 228			value = NULL;
 229			goto free_map_ids;
 230		}
 231
 232		close(map_fd);
 233		break;
 234	}
 235
 236free_map_ids:
 237	free(map_ids);
 238	return value;
 239}
 240
 241static bool has_metadata_prefix(const char *s)
 242{
 243	return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
 244}
 245
 246static void show_prog_metadata(int fd, __u32 num_maps)
 247{
 248	const struct btf_type *t_datasec, *t_var;
 249	struct bpf_map_info map_info;
 250	struct btf_var_secinfo *vsi;
 251	bool printed_header = false;
 252	struct btf *btf = NULL;
 253	unsigned int i, vlen;
 254	void *value = NULL;
 255	const char *name;
 
 256	int err;
 257
 258	if (!num_maps)
 259		return;
 260
 261	memset(&map_info, 0, sizeof(map_info));
 262	value = find_metadata(fd, &map_info);
 263	if (!value)
 264		return;
 265
 266	err = btf__get_from_id(map_info.btf_id, &btf);
 267	if (err || !btf)
 268		goto out_free;
 269
 270	t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
 271	if (!btf_is_datasec(t_datasec))
 272		goto out_free;
 273
 274	vlen = btf_vlen(t_datasec);
 275	vsi = btf_var_secinfos(t_datasec);
 276
 277	/* We don't proceed to check the kinds of the elements of the DATASEC.
 278	 * The verifier enforces them to be BTF_KIND_VAR.
 279	 */
 280
 281	if (json_output) {
 282		struct btf_dumper d = {
 283			.btf = btf,
 284			.jw = json_wtr,
 285			.is_plain_text = false,
 286		};
 287
 288		for (i = 0; i < vlen; i++, vsi++) {
 289			t_var = btf__type_by_id(btf, vsi->type);
 290			name = btf__name_by_offset(btf, t_var->name_off);
 291
 292			if (!has_metadata_prefix(name))
 293				continue;
 294
 295			if (!printed_header) {
 296				jsonw_name(json_wtr, "metadata");
 297				jsonw_start_object(json_wtr);
 298				printed_header = true;
 299			}
 300
 301			jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
 302			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
 303			if (err) {
 304				p_err("btf dump failed: %d", err);
 305				break;
 306			}
 307		}
 308		if (printed_header)
 309			jsonw_end_object(json_wtr);
 310	} else {
 311		json_writer_t *btf_wtr = jsonw_new(stdout);
 312		struct btf_dumper d = {
 313			.btf = btf,
 314			.jw = btf_wtr,
 315			.is_plain_text = true,
 316		};
 317
 318		if (!btf_wtr) {
 319			p_err("jsonw alloc failed");
 320			goto out_free;
 321		}
 322
 323		for (i = 0; i < vlen; i++, vsi++) {
 324			t_var = btf__type_by_id(btf, vsi->type);
 325			name = btf__name_by_offset(btf, t_var->name_off);
 326
 327			if (!has_metadata_prefix(name))
 328				continue;
 329
 330			if (!printed_header) {
 331				printf("\tmetadata:");
 
 
 
 
 
 
 
 
 332				printed_header = true;
 333			}
 334
 335			printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
 336
 337			jsonw_reset(btf_wtr);
 338			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
 339			if (err) {
 340				p_err("btf dump failed: %d", err);
 341				break;
 342			}
 343		}
 344		if (printed_header)
 345			jsonw_destroy(&btf_wtr);
 346	}
 347
 348out_free:
 349	btf__free(btf);
 350	free(value);
 351}
 352
 353static void print_prog_header_json(struct bpf_prog_info *info)
 354{
 
 
 
 355	jsonw_uint_field(json_wtr, "id", info->id);
 356	if (info->type < ARRAY_SIZE(prog_type_name))
 357		jsonw_string_field(json_wtr, "type",
 358				   prog_type_name[info->type]);
 
 359	else
 360		jsonw_uint_field(json_wtr, "type", info->type);
 361
 362	if (*info->name)
 363		jsonw_string_field(json_wtr, "name", info->name);
 
 
 364
 365	jsonw_name(json_wtr, "tag");
 366	jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
 367		     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
 368		     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
 369
 370	jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
 371	if (info->run_time_ns) {
 372		jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
 373		jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
 374	}
 375	if (info->recursion_misses)
 376		jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
 377}
 378
 379static void print_prog_json(struct bpf_prog_info *info, int fd)
 380{
 381	char *memlock;
 382
 383	jsonw_start_object(json_wtr);
 384	print_prog_header_json(info);
 385	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
 386
 387	if (info->load_time) {
 388		char buf[32];
 389
 390		print_boot_time(info->load_time, buf, sizeof(buf));
 391
 392		/* Piggy back on load_time, since 0 uid is a valid one */
 393		jsonw_name(json_wtr, "loaded_at");
 394		jsonw_printf(json_wtr, "%s", buf);
 395		jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
 396	}
 397
 
 398	jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
 399
 400	if (info->jited_prog_len) {
 401		jsonw_bool_field(json_wtr, "jited", true);
 402		jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
 403	} else {
 404		jsonw_bool_field(json_wtr, "jited", false);
 405	}
 406
 407	memlock = get_fdinfo(fd, "memlock");
 408	if (memlock)
 409		jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
 410	free(memlock);
 411
 412	if (info->nr_map_ids)
 413		show_prog_maps(fd, info->nr_map_ids);
 414
 415	if (info->btf_id)
 416		jsonw_int_field(json_wtr, "btf_id", info->btf_id);
 417
 418	if (!hash_empty(prog_table.table)) {
 419		struct pinned_obj *obj;
 420
 421		jsonw_name(json_wtr, "pinned");
 422		jsonw_start_array(json_wtr);
 423		hash_for_each_possible(prog_table.table, obj, hash, info->id) {
 424			if (obj->id == info->id)
 425				jsonw_string(json_wtr, obj->path);
 426		}
 427		jsonw_end_array(json_wtr);
 428	}
 429
 430	emit_obj_refs_json(&refs_table, info->id, json_wtr);
 431
 432	show_prog_metadata(fd, info->nr_map_ids);
 433
 434	jsonw_end_object(json_wtr);
 435}
 436
 437static void print_prog_header_plain(struct bpf_prog_info *info)
 438{
 
 
 
 439	printf("%u: ", info->id);
 440	if (info->type < ARRAY_SIZE(prog_type_name))
 441		printf("%s  ", prog_type_name[info->type]);
 
 442	else
 443		printf("type %u  ", info->type);
 444
 445	if (*info->name)
 446		printf("name %s  ", info->name);
 
 
 447
 448	printf("tag ");
 449	fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
 450	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
 451	printf("%s", info->gpl_compatible ? "  gpl" : "");
 452	if (info->run_time_ns)
 453		printf(" run_time_ns %lld run_cnt %lld",
 454		       info->run_time_ns, info->run_cnt);
 455	if (info->recursion_misses)
 456		printf(" recursion_misses %lld", info->recursion_misses);
 457	printf("\n");
 458}
 459
 460static void print_prog_plain(struct bpf_prog_info *info, int fd)
 461{
 462	char *memlock;
 463
 464	print_prog_header_plain(info);
 465
 466	if (info->load_time) {
 467		char buf[32];
 468
 469		print_boot_time(info->load_time, buf, sizeof(buf));
 470
 471		/* Piggy back on load_time, since 0 uid is a valid one */
 472		printf("\tloaded_at %s  uid %u\n", buf, info->created_by_uid);
 473	}
 474
 475	printf("\txlated %uB", info->xlated_prog_len);
 476
 477	if (info->jited_prog_len)
 478		printf("  jited %uB", info->jited_prog_len);
 479	else
 480		printf("  not jited");
 481
 482	memlock = get_fdinfo(fd, "memlock");
 483	if (memlock)
 484		printf("  memlock %sB", memlock);
 485	free(memlock);
 486
 
 
 
 487	if (info->nr_map_ids)
 488		show_prog_maps(fd, info->nr_map_ids);
 489
 490	if (!hash_empty(prog_table.table)) {
 491		struct pinned_obj *obj;
 492
 493		hash_for_each_possible(prog_table.table, obj, hash, info->id) {
 494			if (obj->id == info->id)
 495				printf("\n\tpinned %s", obj->path);
 496		}
 497	}
 498
 499	if (info->btf_id)
 500		printf("\n\tbtf_id %d", info->btf_id);
 501
 502	emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
 503
 504	printf("\n");
 505
 506	show_prog_metadata(fd, info->nr_map_ids);
 507}
 508
 509static int show_prog(int fd)
 510{
 511	struct bpf_prog_info info = {};
 512	__u32 len = sizeof(info);
 513	int err;
 514
 515	err = bpf_obj_get_info_by_fd(fd, &info, &len);
 516	if (err) {
 517		p_err("can't get prog info: %s", strerror(errno));
 518		return -1;
 519	}
 520
 521	if (json_output)
 522		print_prog_json(&info, fd);
 523	else
 524		print_prog_plain(&info, fd);
 525
 526	return 0;
 527}
 528
 529static int do_show_subset(int argc, char **argv)
 530{
 531	int *fds = NULL;
 532	int nb_fds, i;
 533	int err = -1;
 534
 535	fds = malloc(sizeof(int));
 536	if (!fds) {
 537		p_err("mem alloc failed");
 538		return -1;
 539	}
 540	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 541	if (nb_fds < 1)
 542		goto exit_free;
 543
 544	if (json_output && nb_fds > 1)
 545		jsonw_start_array(json_wtr);	/* root array */
 546	for (i = 0; i < nb_fds; i++) {
 547		err = show_prog(fds[i]);
 548		if (err) {
 549			for (; i < nb_fds; i++)
 550				close(fds[i]);
 551			break;
 552		}
 553		close(fds[i]);
 554	}
 555	if (json_output && nb_fds > 1)
 556		jsonw_end_array(json_wtr);	/* root array */
 557
 558exit_free:
 559	free(fds);
 560	return err;
 561}
 562
 563static int do_show(int argc, char **argv)
 564{
 565	__u32 id = 0;
 566	int err;
 567	int fd;
 568
 569	if (show_pinned)
 570		build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
 
 
 
 
 
 
 
 571	build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
 572
 573	if (argc == 2)
 574		return do_show_subset(argc, argv);
 575
 576	if (argc)
 577		return BAD_ARG();
 578
 579	if (json_output)
 580		jsonw_start_array(json_wtr);
 581	while (true) {
 582		err = bpf_prog_get_next_id(id, &id);
 583		if (err) {
 584			if (errno == ENOENT) {
 585				err = 0;
 586				break;
 587			}
 588			p_err("can't get next program: %s%s", strerror(errno),
 589			      errno == EINVAL ? " -- kernel too old?" : "");
 590			err = -1;
 591			break;
 592		}
 593
 594		fd = bpf_prog_get_fd_by_id(id);
 595		if (fd < 0) {
 596			if (errno == ENOENT)
 597				continue;
 598			p_err("can't get prog by id (%u): %s",
 599			      id, strerror(errno));
 600			err = -1;
 601			break;
 602		}
 603
 604		err = show_prog(fd);
 605		close(fd);
 606		if (err)
 607			break;
 608	}
 609
 610	if (json_output)
 611		jsonw_end_array(json_wtr);
 612
 613	delete_obj_refs_table(&refs_table);
 
 
 
 614
 615	return err;
 616}
 617
 618static int
 619prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
 620	  char *filepath, bool opcodes, bool visual, bool linum)
 621{
 622	struct bpf_prog_linfo *prog_linfo = NULL;
 623	const char *disasm_opt = NULL;
 624	struct dump_data dd = {};
 625	void *func_info = NULL;
 626	struct btf *btf = NULL;
 627	char func_sig[1024];
 628	unsigned char *buf;
 629	__u32 member_len;
 
 630	ssize_t n;
 631	int fd;
 632
 633	if (mode == DUMP_JITED) {
 634		if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
 635			p_info("no instructions returned");
 636			return -1;
 637		}
 638		buf = u64_to_ptr(info->jited_prog_insns);
 639		member_len = info->jited_prog_len;
 640	} else {	/* DUMP_XLATED */
 641		if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
 642			p_err("error retrieving insn dump: kernel.kptr_restrict set?");
 643			return -1;
 644		}
 645		buf = u64_to_ptr(info->xlated_prog_insns);
 646		member_len = info->xlated_prog_len;
 647	}
 648
 649	if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
 650		p_err("failed to get btf");
 651		return -1;
 
 
 
 652	}
 653
 654	func_info = u64_to_ptr(info->func_info);
 655
 656	if (info->nr_line_info) {
 657		prog_linfo = bpf_prog_linfo__new(info);
 658		if (!prog_linfo)
 659			p_info("error in processing bpf_line_info.  continue without it.");
 660	}
 661
 662	if (filepath) {
 663		fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 664		if (fd < 0) {
 665			p_err("can't open file %s: %s", filepath,
 666			      strerror(errno));
 667			return -1;
 668		}
 669
 670		n = write(fd, buf, member_len);
 671		close(fd);
 672		if (n != (ssize_t)member_len) {
 673			p_err("error writing output file: %s",
 674			      n < 0 ? strerror(errno) : "short write");
 675			return -1;
 676		}
 677
 678		if (json_output)
 679			jsonw_null(json_wtr);
 680	} else if (mode == DUMP_JITED) {
 681		const char *name = NULL;
 682
 683		if (info->ifindex) {
 684			name = ifindex_to_bfd_params(info->ifindex,
 685						     info->netns_dev,
 686						     info->netns_ino,
 687						     &disasm_opt);
 688			if (!name)
 689				return -1;
 690		}
 691
 692		if (info->nr_jited_func_lens && info->jited_func_lens) {
 693			struct kernel_sym *sym = NULL;
 694			struct bpf_func_info *record;
 695			char sym_name[SYM_MAX_NAME];
 696			unsigned char *img = buf;
 697			__u64 *ksyms = NULL;
 698			__u32 *lens;
 699			__u32 i;
 700			if (info->nr_jited_ksyms) {
 701				kernel_syms_load(&dd);
 702				ksyms = u64_to_ptr(info->jited_ksyms);
 703			}
 704
 705			if (json_output)
 706				jsonw_start_array(json_wtr);
 707
 708			lens = u64_to_ptr(info->jited_func_lens);
 709			for (i = 0; i < info->nr_jited_func_lens; i++) {
 710				if (ksyms) {
 711					sym = kernel_syms_search(&dd, ksyms[i]);
 712					if (sym)
 713						sprintf(sym_name, "%s", sym->name);
 714					else
 715						sprintf(sym_name, "0x%016llx", ksyms[i]);
 716				} else {
 717					strcpy(sym_name, "unknown");
 718				}
 719
 720				if (func_info) {
 721					record = func_info + i * info->func_info_rec_size;
 722					btf_dumper_type_only(btf, record->type_id,
 723							     func_sig,
 724							     sizeof(func_sig));
 725				}
 726
 727				if (json_output) {
 728					jsonw_start_object(json_wtr);
 729					if (func_info && func_sig[0] != '\0') {
 730						jsonw_name(json_wtr, "proto");
 731						jsonw_string(json_wtr, func_sig);
 732					}
 733					jsonw_name(json_wtr, "name");
 734					jsonw_string(json_wtr, sym_name);
 735					jsonw_name(json_wtr, "insns");
 736				} else {
 737					if (func_info && func_sig[0] != '\0')
 738						printf("%s:\n", func_sig);
 739					printf("%s:\n", sym_name);
 740				}
 741
 742				disasm_print_insn(img, lens[i], opcodes,
 743						  name, disasm_opt, btf,
 744						  prog_linfo, ksyms[i], i,
 745						  linum);
 
 746
 747				img += lens[i];
 748
 749				if (json_output)
 750					jsonw_end_object(json_wtr);
 751				else
 752					printf("\n");
 753			}
 754
 755			if (json_output)
 756				jsonw_end_array(json_wtr);
 757		} else {
 758			disasm_print_insn(buf, member_len, opcodes, name,
 759					  disasm_opt, btf, NULL, 0, 0, false);
 
 
 760		}
 761	} else if (visual) {
 762		if (json_output)
 763			jsonw_null(json_wtr);
 764		else
 765			dump_xlated_cfg(buf, member_len);
 766	} else {
 767		kernel_syms_load(&dd);
 768		dd.nr_jited_ksyms = info->nr_jited_ksyms;
 769		dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
 770		dd.btf = btf;
 771		dd.func_info = func_info;
 772		dd.finfo_rec_size = info->func_info_rec_size;
 773		dd.prog_linfo = prog_linfo;
 774
 775		if (json_output)
 776			dump_xlated_json(&dd, buf, member_len, opcodes,
 777					 linum);
 
 778		else
 779			dump_xlated_plain(&dd, buf, member_len, opcodes,
 780					  linum);
 781		kernel_syms_destroy(&dd);
 782	}
 783
 784	btf__free(btf);
 785
 786	return 0;
 
 
 
 787}
 788
 789static int do_dump(int argc, char **argv)
 790{
 791	struct bpf_prog_info_linear *info_linear;
 
 
 
 792	char *filepath = NULL;
 793	bool opcodes = false;
 794	bool visual = false;
 795	enum dump_mode mode;
 796	bool linum = false;
 797	int *fds = NULL;
 798	int nb_fds, i = 0;
 
 799	int err = -1;
 800	__u64 arrays;
 801
 802	if (is_prefix(*argv, "jited")) {
 803		if (disasm_init())
 804			return -1;
 805		mode = DUMP_JITED;
 806	} else if (is_prefix(*argv, "xlated")) {
 807		mode = DUMP_XLATED;
 808	} else {
 809		p_err("expected 'xlated' or 'jited', got: %s", *argv);
 810		return -1;
 811	}
 812	NEXT_ARG();
 813
 814	if (argc < 2)
 815		usage();
 816
 817	fds = malloc(sizeof(int));
 818	if (!fds) {
 819		p_err("mem alloc failed");
 820		return -1;
 821	}
 822	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 823	if (nb_fds < 1)
 824		goto exit_free;
 825
 826	if (is_prefix(*argv, "file")) {
 827		NEXT_ARG();
 828		if (!argc) {
 829			p_err("expected file path");
 830			goto exit_close;
 831		}
 832		if (nb_fds > 1) {
 833			p_err("several programs matched");
 834			goto exit_close;
 835		}
 
 836
 837		filepath = *argv;
 838		NEXT_ARG();
 839	} else if (is_prefix(*argv, "opcodes")) {
 840		opcodes = true;
 841		NEXT_ARG();
 842	} else if (is_prefix(*argv, "visual")) {
 843		if (nb_fds > 1) {
 844			p_err("several programs matched");
 
 
 
 
 
 
 
 
 
 
 845			goto exit_close;
 846		}
 847
 848		visual = true;
 849		NEXT_ARG();
 850	} else if (is_prefix(*argv, "linum")) {
 851		linum = true;
 852		NEXT_ARG();
 853	}
 854
 855	if (argc) {
 856		usage();
 
 
 
 
 857		goto exit_close;
 858	}
 859
 860	if (mode == DUMP_JITED)
 861		arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
 862	else
 863		arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
 864
 865	arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
 866	arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
 867	arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
 868	arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
 869	arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 870
 871	if (json_output && nb_fds > 1)
 872		jsonw_start_array(json_wtr);	/* root array */
 873	for (i = 0; i < nb_fds; i++) {
 874		info_linear = bpf_program__get_prog_info_linear(fds[i], arrays);
 875		if (IS_ERR_OR_NULL(info_linear)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876			p_err("can't get prog info: %s", strerror(errno));
 877			break;
 878		}
 879
 880		if (json_output && nb_fds > 1) {
 881			jsonw_start_object(json_wtr);	/* prog object */
 882			print_prog_header_json(&info_linear->info);
 883			jsonw_name(json_wtr, "insns");
 884		} else if (nb_fds > 1) {
 885			print_prog_header_plain(&info_linear->info);
 886		}
 887
 888		err = prog_dump(&info_linear->info, mode, filepath, opcodes,
 889				visual, linum);
 890
 891		if (json_output && nb_fds > 1)
 892			jsonw_end_object(json_wtr);	/* prog object */
 893		else if (i != nb_fds - 1 && nb_fds > 1)
 894			printf("\n");
 895
 896		free(info_linear);
 897		if (err)
 898			break;
 899		close(fds[i]);
 900	}
 901	if (json_output && nb_fds > 1)
 902		jsonw_end_array(json_wtr);	/* root array */
 903
 904exit_close:
 905	for (; i < nb_fds; i++)
 906		close(fds[i]);
 907exit_free:
 
 908	free(fds);
 909	return err;
 910}
 911
 912static int do_pin(int argc, char **argv)
 913{
 914	int err;
 915
 916	err = do_pin_any(argc, argv, prog_parse_fd);
 917	if (!err && json_output)
 918		jsonw_null(json_wtr);
 919	return err;
 920}
 921
 922struct map_replace {
 923	int idx;
 924	int fd;
 925	char *name;
 926};
 927
 928static int map_replace_compar(const void *p1, const void *p2)
 929{
 930	const struct map_replace *a = p1, *b = p2;
 931
 932	return a->idx - b->idx;
 933}
 934
 935static int parse_attach_detach_args(int argc, char **argv, int *progfd,
 936				    enum bpf_attach_type *attach_type,
 937				    int *mapfd)
 938{
 939	if (!REQ_ARGS(3))
 940		return -EINVAL;
 941
 942	*progfd = prog_parse_fd(&argc, &argv);
 943	if (*progfd < 0)
 944		return *progfd;
 945
 946	*attach_type = parse_attach_type(*argv);
 947	if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
 948		p_err("invalid attach/detach type");
 949		return -EINVAL;
 950	}
 951
 952	if (*attach_type == BPF_FLOW_DISSECTOR) {
 953		*mapfd = 0;
 954		return 0;
 955	}
 956
 957	NEXT_ARG();
 958	if (!REQ_ARGS(2))
 959		return -EINVAL;
 960
 961	*mapfd = map_parse_fd(&argc, &argv);
 962	if (*mapfd < 0)
 963		return *mapfd;
 964
 965	return 0;
 966}
 967
 968static int do_attach(int argc, char **argv)
 969{
 970	enum bpf_attach_type attach_type;
 971	int err, progfd;
 972	int mapfd;
 973
 974	err = parse_attach_detach_args(argc, argv,
 975				       &progfd, &attach_type, &mapfd);
 976	if (err)
 977		return err;
 978
 979	err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
 980	if (err) {
 981		p_err("failed prog attach to map");
 982		return -EINVAL;
 983	}
 984
 985	if (json_output)
 986		jsonw_null(json_wtr);
 987	return 0;
 988}
 989
 990static int do_detach(int argc, char **argv)
 991{
 992	enum bpf_attach_type attach_type;
 993	int err, progfd;
 994	int mapfd;
 995
 996	err = parse_attach_detach_args(argc, argv,
 997				       &progfd, &attach_type, &mapfd);
 998	if (err)
 999		return err;
1000
1001	err = bpf_prog_detach2(progfd, mapfd, attach_type);
1002	if (err) {
1003		p_err("failed prog detach from map");
1004		return -EINVAL;
1005	}
1006
1007	if (json_output)
1008		jsonw_null(json_wtr);
1009	return 0;
1010}
1011
1012static int check_single_stdin(char *file_data_in, char *file_ctx_in)
1013{
1014	if (file_data_in && file_ctx_in &&
1015	    !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
1016		p_err("cannot use standard input for both data_in and ctx_in");
1017		return -1;
1018	}
1019
1020	return 0;
1021}
1022
1023static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
1024{
1025	size_t block_size = 256;
1026	size_t buf_size = block_size;
1027	size_t nb_read = 0;
1028	void *tmp;
1029	FILE *f;
1030
1031	if (!fname) {
1032		*data_ptr = NULL;
1033		*size = 0;
1034		return 0;
1035	}
1036
1037	if (!strcmp(fname, "-"))
1038		f = stdin;
1039	else
1040		f = fopen(fname, "r");
1041	if (!f) {
1042		p_err("failed to open %s: %s", fname, strerror(errno));
1043		return -1;
1044	}
1045
1046	*data_ptr = malloc(block_size);
1047	if (!*data_ptr) {
1048		p_err("failed to allocate memory for data_in/ctx_in: %s",
1049		      strerror(errno));
1050		goto err_fclose;
1051	}
1052
1053	while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
1054		if (feof(f))
1055			break;
1056		if (ferror(f)) {
1057			p_err("failed to read data_in/ctx_in from %s: %s",
1058			      fname, strerror(errno));
1059			goto err_free;
1060		}
1061		if (nb_read > buf_size - block_size) {
1062			if (buf_size == UINT32_MAX) {
1063				p_err("data_in/ctx_in is too long (max: %d)",
1064				      UINT32_MAX);
1065				goto err_free;
1066			}
1067			/* No space for fread()-ing next chunk; realloc() */
1068			buf_size *= 2;
1069			tmp = realloc(*data_ptr, buf_size);
1070			if (!tmp) {
1071				p_err("failed to reallocate data_in/ctx_in: %s",
1072				      strerror(errno));
1073				goto err_free;
1074			}
1075			*data_ptr = tmp;
1076		}
1077	}
1078	if (f != stdin)
1079		fclose(f);
1080
1081	*size = nb_read;
1082	return 0;
1083
1084err_free:
1085	free(*data_ptr);
1086	*data_ptr = NULL;
1087err_fclose:
1088	if (f != stdin)
1089		fclose(f);
1090	return -1;
1091}
1092
1093static void hex_print(void *data, unsigned int size, FILE *f)
1094{
1095	size_t i, j;
1096	char c;
1097
1098	for (i = 0; i < size; i += 16) {
1099		/* Row offset */
1100		fprintf(f, "%07zx\t", i);
1101
1102		/* Hexadecimal values */
1103		for (j = i; j < i + 16 && j < size; j++)
1104			fprintf(f, "%02x%s", *(uint8_t *)(data + j),
1105				j % 2 ? " " : "");
1106		for (; j < i + 16; j++)
1107			fprintf(f, "  %s", j % 2 ? " " : "");
1108
1109		/* ASCII values (if relevant), '.' otherwise */
1110		fprintf(f, "| ");
1111		for (j = i; j < i + 16 && j < size; j++) {
1112			c = *(char *)(data + j);
1113			if (c < ' ' || c > '~')
1114				c = '.';
1115			fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
1116		}
1117
1118		fprintf(f, "\n");
1119	}
1120}
1121
1122static int
1123print_run_output(void *data, unsigned int size, const char *fname,
1124		 const char *json_key)
1125{
1126	size_t nb_written;
1127	FILE *f;
1128
1129	if (!fname)
1130		return 0;
1131
1132	if (!strcmp(fname, "-")) {
1133		f = stdout;
1134		if (json_output) {
1135			jsonw_name(json_wtr, json_key);
1136			print_data_json(data, size);
1137		} else {
1138			hex_print(data, size, f);
1139		}
1140		return 0;
1141	}
1142
1143	f = fopen(fname, "w");
1144	if (!f) {
1145		p_err("failed to open %s: %s", fname, strerror(errno));
1146		return -1;
1147	}
1148
1149	nb_written = fwrite(data, 1, size, f);
1150	fclose(f);
1151	if (nb_written != size) {
1152		p_err("failed to write output data/ctx: %s", strerror(errno));
1153		return -1;
1154	}
1155
1156	return 0;
1157}
1158
1159static int alloc_run_data(void **data_ptr, unsigned int size_out)
1160{
1161	*data_ptr = calloc(size_out, 1);
1162	if (!*data_ptr) {
1163		p_err("failed to allocate memory for output data/ctx: %s",
1164		      strerror(errno));
1165		return -1;
1166	}
1167
1168	return 0;
1169}
1170
1171static int do_run(int argc, char **argv)
1172{
1173	char *data_fname_in = NULL, *data_fname_out = NULL;
1174	char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
1175	struct bpf_prog_test_run_attr test_attr = {0};
1176	const unsigned int default_size = SZ_32K;
1177	void *data_in = NULL, *data_out = NULL;
1178	void *ctx_in = NULL, *ctx_out = NULL;
1179	unsigned int repeat = 1;
1180	int fd, err;
 
1181
1182	if (!REQ_ARGS(4))
1183		return -1;
1184
1185	fd = prog_parse_fd(&argc, &argv);
1186	if (fd < 0)
1187		return -1;
1188
1189	while (argc) {
1190		if (detect_common_prefix(*argv, "data_in", "data_out",
1191					 "data_size_out", NULL))
1192			return -1;
1193		if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
1194					 "ctx_size_out", NULL))
1195			return -1;
1196
1197		if (is_prefix(*argv, "data_in")) {
1198			NEXT_ARG();
1199			if (!REQ_ARGS(1))
1200				return -1;
1201
1202			data_fname_in = GET_ARG();
1203			if (check_single_stdin(data_fname_in, ctx_fname_in))
1204				return -1;
1205		} else if (is_prefix(*argv, "data_out")) {
1206			NEXT_ARG();
1207			if (!REQ_ARGS(1))
1208				return -1;
1209
1210			data_fname_out = GET_ARG();
1211		} else if (is_prefix(*argv, "data_size_out")) {
1212			char *endptr;
1213
1214			NEXT_ARG();
1215			if (!REQ_ARGS(1))
1216				return -1;
1217
1218			test_attr.data_size_out = strtoul(*argv, &endptr, 0);
1219			if (*endptr) {
1220				p_err("can't parse %s as output data size",
1221				      *argv);
1222				return -1;
1223			}
1224			NEXT_ARG();
1225		} else if (is_prefix(*argv, "ctx_in")) {
1226			NEXT_ARG();
1227			if (!REQ_ARGS(1))
1228				return -1;
1229
1230			ctx_fname_in = GET_ARG();
1231			if (check_single_stdin(data_fname_in, ctx_fname_in))
1232				return -1;
1233		} else if (is_prefix(*argv, "ctx_out")) {
1234			NEXT_ARG();
1235			if (!REQ_ARGS(1))
1236				return -1;
1237
1238			ctx_fname_out = GET_ARG();
1239		} else if (is_prefix(*argv, "ctx_size_out")) {
1240			char *endptr;
1241
1242			NEXT_ARG();
1243			if (!REQ_ARGS(1))
1244				return -1;
1245
1246			test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
1247			if (*endptr) {
1248				p_err("can't parse %s as output context size",
1249				      *argv);
1250				return -1;
1251			}
1252			NEXT_ARG();
1253		} else if (is_prefix(*argv, "repeat")) {
1254			char *endptr;
1255
1256			NEXT_ARG();
1257			if (!REQ_ARGS(1))
1258				return -1;
1259
1260			repeat = strtoul(*argv, &endptr, 0);
1261			if (*endptr) {
1262				p_err("can't parse %s as repeat number",
1263				      *argv);
1264				return -1;
1265			}
1266			NEXT_ARG();
1267		} else {
1268			p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1269			      *argv);
1270			return -1;
1271		}
1272	}
1273
1274	err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
1275	if (err)
1276		return -1;
1277
1278	if (data_in) {
1279		if (!test_attr.data_size_out)
1280			test_attr.data_size_out = default_size;
1281		err = alloc_run_data(&data_out, test_attr.data_size_out);
1282		if (err)
1283			goto free_data_in;
1284	}
1285
1286	err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
1287	if (err)
1288		goto free_data_out;
1289
1290	if (ctx_in) {
1291		if (!test_attr.ctx_size_out)
1292			test_attr.ctx_size_out = default_size;
1293		err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
1294		if (err)
1295			goto free_ctx_in;
1296	}
1297
1298	test_attr.prog_fd	= fd;
1299	test_attr.repeat	= repeat;
1300	test_attr.data_in	= data_in;
1301	test_attr.data_out	= data_out;
1302	test_attr.ctx_in	= ctx_in;
1303	test_attr.ctx_out	= ctx_out;
1304
1305	err = bpf_prog_test_run_xattr(&test_attr);
1306	if (err) {
1307		p_err("failed to run program: %s", strerror(errno));
1308		goto free_ctx_out;
1309	}
1310
1311	err = 0;
1312
1313	if (json_output)
1314		jsonw_start_object(json_wtr);	/* root */
1315
1316	/* Do not exit on errors occurring when printing output data/context,
1317	 * we still want to print return value and duration for program run.
1318	 */
1319	if (test_attr.data_size_out)
1320		err += print_run_output(test_attr.data_out,
1321					test_attr.data_size_out,
1322					data_fname_out, "data_out");
1323	if (test_attr.ctx_size_out)
1324		err += print_run_output(test_attr.ctx_out,
1325					test_attr.ctx_size_out,
1326					ctx_fname_out, "ctx_out");
1327
1328	if (json_output) {
1329		jsonw_uint_field(json_wtr, "retval", test_attr.retval);
1330		jsonw_uint_field(json_wtr, "duration", test_attr.duration);
1331		jsonw_end_object(json_wtr);	/* root */
1332	} else {
1333		fprintf(stdout, "Return value: %u, duration%s: %uns\n",
1334			test_attr.retval,
1335			repeat > 1 ? " (average)" : "", test_attr.duration);
1336	}
1337
1338free_ctx_out:
1339	free(ctx_out);
1340free_ctx_in:
1341	free(ctx_in);
1342free_data_out:
1343	free(data_out);
1344free_data_in:
1345	free(data_in);
1346
1347	return err;
1348}
1349
1350static int
1351get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
1352		      enum bpf_attach_type *expected_attach_type)
1353{
1354	libbpf_print_fn_t print_backup;
1355	int ret;
1356
1357	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1358	if (!ret)
1359		return ret;
1360
1361	/* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1362	print_backup = libbpf_set_print(print_all_levels);
1363	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1364	libbpf_set_print(print_backup);
1365
1366	return ret;
1367}
1368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1369static int load_with_options(int argc, char **argv, bool first_prog_only)
1370{
1371	enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
1372	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
1373		.relaxed_maps = relaxed_maps,
1374	);
1375	struct bpf_object_load_attr load_attr = { 0 };
1376	enum bpf_attach_type expected_attach_type;
1377	struct map_replace *map_replace = NULL;
1378	struct bpf_program *prog = NULL, *pos;
1379	unsigned int old_map_fds = 0;
1380	const char *pinmaps = NULL;
 
 
 
1381	struct bpf_object *obj;
1382	struct bpf_map *map;
1383	const char *pinfile;
1384	unsigned int i, j;
1385	__u32 ifindex = 0;
1386	const char *file;
1387	int idx, err;
1388
1389
1390	if (!REQ_ARGS(2))
1391		return -1;
1392	file = GET_ARG();
1393	pinfile = GET_ARG();
1394
1395	while (argc) {
1396		if (is_prefix(*argv, "type")) {
1397			char *type;
1398
1399			NEXT_ARG();
1400
1401			if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
1402				p_err("program type already specified");
1403				goto err_free_reuse_maps;
1404			}
1405			if (!REQ_ARGS(1))
1406				goto err_free_reuse_maps;
1407
1408			/* Put a '/' at the end of type to appease libbpf */
1409			type = malloc(strlen(*argv) + 2);
1410			if (!type) {
1411				p_err("mem alloc failed");
1412				goto err_free_reuse_maps;
1413			}
1414			*type = 0;
1415			strcat(type, *argv);
1416			strcat(type, "/");
1417
1418			err = get_prog_type_by_name(type, &common_prog_type,
1419						    &expected_attach_type);
1420			free(type);
1421			if (err < 0)
1422				goto err_free_reuse_maps;
 
 
 
 
 
 
 
 
 
1423
1424			NEXT_ARG();
1425		} else if (is_prefix(*argv, "map")) {
1426			void *new_map_replace;
1427			char *endptr, *name;
1428			int fd;
1429
1430			NEXT_ARG();
1431
1432			if (!REQ_ARGS(4))
1433				goto err_free_reuse_maps;
1434
1435			if (is_prefix(*argv, "idx")) {
1436				NEXT_ARG();
1437
1438				idx = strtoul(*argv, &endptr, 0);
1439				if (*endptr) {
1440					p_err("can't parse %s as IDX", *argv);
1441					goto err_free_reuse_maps;
1442				}
1443				name = NULL;
1444			} else if (is_prefix(*argv, "name")) {
1445				NEXT_ARG();
1446
1447				name = *argv;
1448				idx = -1;
1449			} else {
1450				p_err("expected 'idx' or 'name', got: '%s'?",
1451				      *argv);
1452				goto err_free_reuse_maps;
1453			}
1454			NEXT_ARG();
1455
1456			fd = map_parse_fd(&argc, &argv);
1457			if (fd < 0)
1458				goto err_free_reuse_maps;
1459
1460			new_map_replace = reallocarray(map_replace,
1461						       old_map_fds + 1,
1462						       sizeof(*map_replace));
1463			if (!new_map_replace) {
1464				p_err("mem alloc failed");
1465				goto err_free_reuse_maps;
1466			}
1467			map_replace = new_map_replace;
1468
1469			map_replace[old_map_fds].idx = idx;
1470			map_replace[old_map_fds].name = name;
1471			map_replace[old_map_fds].fd = fd;
1472			old_map_fds++;
1473		} else if (is_prefix(*argv, "dev")) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1474			NEXT_ARG();
1475
1476			if (ifindex) {
1477				p_err("offload device already specified");
 
 
 
1478				goto err_free_reuse_maps;
1479			}
1480			if (!REQ_ARGS(1))
1481				goto err_free_reuse_maps;
1482
1483			ifindex = if_nametoindex(*argv);
1484			if (!ifindex) {
1485				p_err("unrecognized netdevice '%s': %s",
1486				      *argv, strerror(errno));
1487				goto err_free_reuse_maps;
1488			}
1489			NEXT_ARG();
1490		} else if (is_prefix(*argv, "pinmaps")) {
1491			NEXT_ARG();
1492
1493			if (!REQ_ARGS(1))
1494				goto err_free_reuse_maps;
1495
1496			pinmaps = GET_ARG();
 
 
 
1497		} else {
1498			p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1499			      *argv);
1500			goto err_free_reuse_maps;
1501		}
1502	}
1503
1504	set_max_rlimit();
1505
 
 
 
 
1506	obj = bpf_object__open_file(file, &open_opts);
1507	if (libbpf_get_error(obj)) {
1508		p_err("failed to open object file");
1509		goto err_free_reuse_maps;
1510	}
1511
1512	bpf_object__for_each_program(pos, obj) {
1513		enum bpf_prog_type prog_type = common_prog_type;
1514
1515		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
1516			const char *sec_name = bpf_program__section_name(pos);
1517
1518			err = get_prog_type_by_name(sec_name, &prog_type,
1519						    &expected_attach_type);
1520			if (err < 0)
1521				goto err_close_obj;
1522		}
1523
1524		bpf_program__set_ifindex(pos, ifindex);
1525		bpf_program__set_type(pos, prog_type);
 
 
 
 
 
 
1526		bpf_program__set_expected_attach_type(pos, expected_attach_type);
1527	}
1528
1529	qsort(map_replace, old_map_fds, sizeof(*map_replace),
1530	      map_replace_compar);
1531
1532	/* After the sort maps by name will be first on the list, because they
1533	 * have idx == -1.  Resolve them.
1534	 */
1535	j = 0;
1536	while (j < old_map_fds && map_replace[j].name) {
1537		i = 0;
1538		bpf_object__for_each_map(map, obj) {
1539			if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
1540				map_replace[j].idx = i;
1541				break;
1542			}
1543			i++;
1544		}
1545		if (map_replace[j].idx == -1) {
1546			p_err("unable to find map '%s'", map_replace[j].name);
1547			goto err_close_obj;
1548		}
1549		j++;
1550	}
1551	/* Resort if any names were resolved */
1552	if (j)
1553		qsort(map_replace, old_map_fds, sizeof(*map_replace),
1554		      map_replace_compar);
1555
1556	/* Set ifindex and name reuse */
1557	j = 0;
1558	idx = 0;
1559	bpf_object__for_each_map(map, obj) {
1560		if (!bpf_map__is_offload_neutral(map))
1561			bpf_map__set_ifindex(map, ifindex);
1562
1563		if (j < old_map_fds && idx == map_replace[j].idx) {
1564			err = bpf_map__reuse_fd(map, map_replace[j++].fd);
1565			if (err) {
1566				p_err("unable to set up map reuse: %d", err);
1567				goto err_close_obj;
1568			}
1569
1570			/* Next reuse wants to apply to the same map */
1571			if (j < old_map_fds && map_replace[j].idx == idx) {
1572				p_err("replacement for map idx %d specified more than once",
1573				      idx);
1574				goto err_close_obj;
1575			}
1576		}
1577
1578		idx++;
1579	}
1580	if (j < old_map_fds) {
1581		p_err("map idx '%d' not used", map_replace[j].idx);
1582		goto err_close_obj;
1583	}
1584
1585	load_attr.obj = obj;
1586	if (verifier_logs)
1587		/* log_level1 + log_level2 + stats, but not stable UAPI */
1588		load_attr.log_level = 1 + 2 + 4;
1589
1590	err = bpf_object__load_xattr(&load_attr);
1591	if (err) {
1592		p_err("failed to load object file");
1593		goto err_close_obj;
1594	}
1595
1596	err = mount_bpffs_for_pin(pinfile);
 
 
 
1597	if (err)
1598		goto err_close_obj;
1599
1600	if (first_prog_only) {
1601		prog = bpf_program__next(NULL, obj);
1602		if (!prog) {
1603			p_err("object file doesn't contain any bpf program");
1604			goto err_close_obj;
1605		}
1606
1607		err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
 
 
 
1608		if (err) {
1609			p_err("failed to pin program %s",
1610			      bpf_program__section_name(prog));
1611			goto err_close_obj;
1612		}
1613	} else {
1614		err = bpf_object__pin_programs(obj, pinfile);
 
 
 
1615		if (err) {
1616			p_err("failed to pin all programs");
1617			goto err_close_obj;
1618		}
1619	}
1620
1621	if (pinmaps) {
1622		err = bpf_object__pin_maps(obj, pinmaps);
1623		if (err) {
1624			p_err("failed to pin all maps");
1625			goto err_unpin;
1626		}
1627	}
1628
1629	if (json_output)
1630		jsonw_null(json_wtr);
1631
1632	bpf_object__close(obj);
1633	for (i = 0; i < old_map_fds; i++)
1634		close(map_replace[i].fd);
1635	free(map_replace);
1636
1637	return 0;
1638
1639err_unpin:
1640	if (first_prog_only)
1641		unlink(pinfile);
1642	else
1643		bpf_object__unpin_programs(obj, pinfile);
1644err_close_obj:
1645	bpf_object__close(obj);
1646err_free_reuse_maps:
1647	for (i = 0; i < old_map_fds; i++)
1648		close(map_replace[i].fd);
1649	free(map_replace);
1650	return -1;
1651}
1652
1653static int count_open_fds(void)
1654{
1655	DIR *dp = opendir("/proc/self/fd");
1656	struct dirent *de;
1657	int cnt = -3;
1658
1659	if (!dp)
1660		return -1;
1661
1662	while ((de = readdir(dp)))
1663		cnt++;
1664
1665	closedir(dp);
1666	return cnt;
1667}
1668
1669static int try_loader(struct gen_loader_opts *gen)
1670{
1671	struct bpf_load_and_run_opts opts = {};
1672	struct bpf_loader_ctx *ctx;
1673	int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
1674					     sizeof(struct bpf_prog_desc));
1675	int log_buf_sz = (1u << 24) - 1;
1676	int err, fds_before, fd_delta;
1677	char *log_buf;
1678
1679	ctx = alloca(ctx_sz);
1680	memset(ctx, 0, ctx_sz);
1681	ctx->sz = ctx_sz;
1682	ctx->log_level = 1;
1683	ctx->log_size = log_buf_sz;
1684	log_buf = malloc(log_buf_sz);
1685	if (!log_buf)
1686		return -ENOMEM;
1687	ctx->log_buf = (long) log_buf;
 
 
1688	opts.ctx = ctx;
1689	opts.data = gen->data;
1690	opts.data_sz = gen->data_sz;
1691	opts.insns = gen->insns;
1692	opts.insns_sz = gen->insns_sz;
1693	fds_before = count_open_fds();
1694	err = bpf_load_and_run(&opts);
1695	fd_delta = count_open_fds() - fds_before;
1696	if (err < 0) {
1697		fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
1698		if (fd_delta)
1699			fprintf(stderr, "loader prog leaked %d FDs\n",
1700				fd_delta);
1701	}
1702	free(log_buf);
1703	return err;
1704}
1705
1706static int do_loader(int argc, char **argv)
1707{
1708	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
1709	DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
1710	struct bpf_object_load_attr load_attr = {};
1711	struct bpf_object *obj;
1712	const char *file;
1713	int err = 0;
1714
1715	if (!REQ_ARGS(1))
1716		return -1;
1717	file = GET_ARG();
1718
 
 
 
 
1719	obj = bpf_object__open_file(file, &open_opts);
1720	if (libbpf_get_error(obj)) {
1721		p_err("failed to open object file");
1722		goto err_close_obj;
1723	}
1724
1725	err = bpf_object__gen_loader(obj, &gen);
1726	if (err)
1727		goto err_close_obj;
1728
1729	load_attr.obj = obj;
1730	if (verifier_logs)
1731		/* log_level1 + log_level2 + stats, but not stable UAPI */
1732		load_attr.log_level = 1 + 2 + 4;
1733
1734	err = bpf_object__load_xattr(&load_attr);
1735	if (err) {
1736		p_err("failed to load object file");
1737		goto err_close_obj;
1738	}
1739
1740	if (verifier_logs) {
1741		struct dump_data dd = {};
1742
1743		kernel_syms_load(&dd);
1744		dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
1745		kernel_syms_destroy(&dd);
1746	}
1747	err = try_loader(&gen);
1748err_close_obj:
1749	bpf_object__close(obj);
1750	return err;
1751}
1752
1753static int do_load(int argc, char **argv)
1754{
1755	if (use_loader)
1756		return do_loader(argc, argv);
1757	return load_with_options(argc, argv, true);
1758}
1759
1760static int do_loadall(int argc, char **argv)
1761{
1762	return load_with_options(argc, argv, false);
1763}
1764
1765#ifdef BPFTOOL_WITHOUT_SKELETONS
1766
1767static int do_profile(int argc, char **argv)
1768{
1769	p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1770	return 0;
1771}
1772
1773#else /* BPFTOOL_WITHOUT_SKELETONS */
1774
1775#include "profiler.skel.h"
1776
1777struct profile_metric {
1778	const char *name;
1779	struct bpf_perf_event_value val;
1780	struct perf_event_attr attr;
1781	bool selected;
1782
1783	/* calculate ratios like instructions per cycle */
1784	const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
1785	const char *ratio_desc;
1786	const float ratio_mul;
1787} metrics[] = {
1788	{
1789		.name = "cycles",
1790		.attr = {
1791			.type = PERF_TYPE_HARDWARE,
1792			.config = PERF_COUNT_HW_CPU_CYCLES,
1793			.exclude_user = 1,
1794		},
1795	},
1796	{
1797		.name = "instructions",
1798		.attr = {
1799			.type = PERF_TYPE_HARDWARE,
1800			.config = PERF_COUNT_HW_INSTRUCTIONS,
1801			.exclude_user = 1,
1802		},
1803		.ratio_metric = 1,
1804		.ratio_desc = "insns per cycle",
1805		.ratio_mul = 1.0,
1806	},
1807	{
1808		.name = "l1d_loads",
1809		.attr = {
1810			.type = PERF_TYPE_HW_CACHE,
1811			.config =
1812				PERF_COUNT_HW_CACHE_L1D |
1813				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1814				(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
1815			.exclude_user = 1,
1816		},
1817	},
1818	{
1819		.name = "llc_misses",
1820		.attr = {
1821			.type = PERF_TYPE_HW_CACHE,
1822			.config =
1823				PERF_COUNT_HW_CACHE_LL |
1824				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1825				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1826			.exclude_user = 1
1827		},
1828		.ratio_metric = 2,
1829		.ratio_desc = "LLC misses per million insns",
1830		.ratio_mul = 1e6,
1831	},
1832	{
1833		.name = "itlb_misses",
1834		.attr = {
1835			.type = PERF_TYPE_HW_CACHE,
1836			.config =
1837				PERF_COUNT_HW_CACHE_ITLB |
1838				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1839				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1840			.exclude_user = 1
1841		},
1842		.ratio_metric = 2,
1843		.ratio_desc = "itlb misses per million insns",
1844		.ratio_mul = 1e6,
1845	},
1846	{
1847		.name = "dtlb_misses",
1848		.attr = {
1849			.type = PERF_TYPE_HW_CACHE,
1850			.config =
1851				PERF_COUNT_HW_CACHE_DTLB |
1852				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1853				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1854			.exclude_user = 1
1855		},
1856		.ratio_metric = 2,
1857		.ratio_desc = "dtlb misses per million insns",
1858		.ratio_mul = 1e6,
1859	},
1860};
1861
1862static __u64 profile_total_count;
1863
1864#define MAX_NUM_PROFILE_METRICS 4
1865
1866static int profile_parse_metrics(int argc, char **argv)
1867{
1868	unsigned int metric_cnt;
1869	int selected_cnt = 0;
1870	unsigned int i;
1871
1872	metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
1873
1874	while (argc > 0) {
1875		for (i = 0; i < metric_cnt; i++) {
1876			if (is_prefix(argv[0], metrics[i].name)) {
1877				if (!metrics[i].selected)
1878					selected_cnt++;
1879				metrics[i].selected = true;
1880				break;
1881			}
1882		}
1883		if (i == metric_cnt) {
1884			p_err("unknown metric %s", argv[0]);
1885			return -1;
1886		}
1887		NEXT_ARG();
1888	}
1889	if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
1890		p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
1891		      selected_cnt, MAX_NUM_PROFILE_METRICS);
1892		return -1;
1893	}
1894	return selected_cnt;
1895}
1896
1897static void profile_read_values(struct profiler_bpf *obj)
1898{
1899	__u32 m, cpu, num_cpu = obj->rodata->num_cpu;
1900	int reading_map_fd, count_map_fd;
1901	__u64 counts[num_cpu];
1902	__u32 key = 0;
1903	int err;
1904
1905	reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
1906	count_map_fd = bpf_map__fd(obj->maps.counts);
1907	if (reading_map_fd < 0 || count_map_fd < 0) {
1908		p_err("failed to get fd for map");
1909		return;
1910	}
1911
1912	err = bpf_map_lookup_elem(count_map_fd, &key, counts);
1913	if (err) {
1914		p_err("failed to read count_map: %s", strerror(errno));
1915		return;
1916	}
1917
1918	profile_total_count = 0;
1919	for (cpu = 0; cpu < num_cpu; cpu++)
1920		profile_total_count += counts[cpu];
1921
1922	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1923		struct bpf_perf_event_value values[num_cpu];
1924
1925		if (!metrics[m].selected)
1926			continue;
1927
1928		err = bpf_map_lookup_elem(reading_map_fd, &key, values);
1929		if (err) {
1930			p_err("failed to read reading_map: %s",
1931			      strerror(errno));
1932			return;
1933		}
1934		for (cpu = 0; cpu < num_cpu; cpu++) {
1935			metrics[m].val.counter += values[cpu].counter;
1936			metrics[m].val.enabled += values[cpu].enabled;
1937			metrics[m].val.running += values[cpu].running;
1938		}
1939		key++;
1940	}
1941}
1942
1943static void profile_print_readings_json(void)
1944{
1945	__u32 m;
1946
1947	jsonw_start_array(json_wtr);
1948	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1949		if (!metrics[m].selected)
1950			continue;
1951		jsonw_start_object(json_wtr);
1952		jsonw_string_field(json_wtr, "metric", metrics[m].name);
1953		jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
1954		jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
1955		jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
1956		jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
1957
1958		jsonw_end_object(json_wtr);
1959	}
1960	jsonw_end_array(json_wtr);
1961}
1962
1963static void profile_print_readings_plain(void)
1964{
1965	__u32 m;
1966
1967	printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
1968	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1969		struct bpf_perf_event_value *val = &metrics[m].val;
1970		int r;
1971
1972		if (!metrics[m].selected)
1973			continue;
1974		printf("%18llu %-20s", val->counter, metrics[m].name);
1975
1976		r = metrics[m].ratio_metric - 1;
1977		if (r >= 0 && metrics[r].selected &&
1978		    metrics[r].val.counter > 0) {
1979			printf("# %8.2f %-30s",
1980			       val->counter * metrics[m].ratio_mul /
1981			       metrics[r].val.counter,
1982			       metrics[m].ratio_desc);
1983		} else {
1984			printf("%-41s", "");
1985		}
1986
1987		if (val->enabled > val->running)
1988			printf("(%4.2f%%)",
1989			       val->running * 100.0 / val->enabled);
1990		printf("\n");
1991	}
1992}
1993
1994static void profile_print_readings(void)
1995{
1996	if (json_output)
1997		profile_print_readings_json();
1998	else
1999		profile_print_readings_plain();
2000}
2001
2002static char *profile_target_name(int tgt_fd)
2003{
2004	struct bpf_prog_info_linear *info_linear;
2005	struct bpf_func_info *func_info;
 
2006	const struct btf_type *t;
 
2007	struct btf *btf = NULL;
2008	char *name = NULL;
 
2009
2010	info_linear = bpf_program__get_prog_info_linear(
2011		tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
2012	if (IS_ERR_OR_NULL(info_linear)) {
2013		p_err("failed to get info_linear for prog FD %d", tgt_fd);
2014		return NULL;
2015	}
2016
2017	if (info_linear->info.btf_id == 0 ||
2018	    btf__get_from_id(info_linear->info.btf_id, &btf)) {
2019		p_err("prog FD %d doesn't have valid btf", tgt_fd);
2020		goto out;
2021	}
2022
2023	func_info = u64_to_ptr(info_linear->info.func_info);
2024	t = btf__type_by_id(btf, func_info[0].type_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2025	if (!t) {
2026		p_err("btf %d doesn't have type %d",
2027		      info_linear->info.btf_id, func_info[0].type_id);
2028		goto out;
2029	}
2030	name = strdup(btf__name_by_offset(btf, t->name_off));
2031out:
2032	btf__free(btf);
2033	free(info_linear);
2034	return name;
2035}
2036
2037static struct profiler_bpf *profile_obj;
2038static int profile_tgt_fd = -1;
2039static char *profile_tgt_name;
2040static int *profile_perf_events;
2041static int profile_perf_event_cnt;
2042
2043static void profile_close_perf_events(struct profiler_bpf *obj)
2044{
2045	int i;
2046
2047	for (i = profile_perf_event_cnt - 1; i >= 0; i--)
2048		close(profile_perf_events[i]);
2049
2050	free(profile_perf_events);
2051	profile_perf_event_cnt = 0;
2052}
2053
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2054static int profile_open_perf_events(struct profiler_bpf *obj)
2055{
2056	unsigned int cpu, m;
2057	int map_fd, pmu_fd;
2058
2059	profile_perf_events = calloc(
2060		sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
2061	if (!profile_perf_events) {
2062		p_err("failed to allocate memory for perf_event array: %s",
2063		      strerror(errno));
2064		return -1;
2065	}
2066	map_fd = bpf_map__fd(obj->maps.events);
2067	if (map_fd < 0) {
2068		p_err("failed to get fd for events map");
2069		return -1;
2070	}
2071
2072	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2073		if (!metrics[m].selected)
2074			continue;
2075		for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
2076			pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
2077					 -1/*pid*/, cpu, -1/*group_fd*/, 0);
2078			if (pmu_fd < 0 ||
2079			    bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
2080						&pmu_fd, BPF_ANY) ||
2081			    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
2082				p_err("failed to create event %s on cpu %d",
2083				      metrics[m].name, cpu);
2084				return -1;
2085			}
2086			profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
2087		}
2088	}
2089	return 0;
2090}
2091
2092static void profile_print_and_cleanup(void)
2093{
2094	profile_close_perf_events(profile_obj);
2095	profile_read_values(profile_obj);
2096	profile_print_readings();
2097	profiler_bpf__destroy(profile_obj);
2098
2099	close(profile_tgt_fd);
2100	free(profile_tgt_name);
2101}
2102
2103static void int_exit(int signo)
2104{
2105	profile_print_and_cleanup();
2106	exit(0);
2107}
2108
2109static int do_profile(int argc, char **argv)
2110{
2111	int num_metric, num_cpu, err = -1;
2112	struct bpf_program *prog;
2113	unsigned long duration;
2114	char *endptr;
2115
2116	/* we at least need two args for the prog and one metric */
2117	if (!REQ_ARGS(3))
2118		return -EINVAL;
2119
2120	/* parse target fd */
2121	profile_tgt_fd = prog_parse_fd(&argc, &argv);
2122	if (profile_tgt_fd < 0) {
2123		p_err("failed to parse fd");
2124		return -1;
2125	}
2126
2127	/* parse profiling optional duration */
2128	if (argc > 2 && is_prefix(argv[0], "duration")) {
2129		NEXT_ARG();
2130		duration = strtoul(*argv, &endptr, 0);
2131		if (*endptr)
2132			usage();
2133		NEXT_ARG();
2134	} else {
2135		duration = UINT_MAX;
2136	}
2137
2138	num_metric = profile_parse_metrics(argc, argv);
2139	if (num_metric <= 0)
2140		goto out;
2141
2142	num_cpu = libbpf_num_possible_cpus();
2143	if (num_cpu <= 0) {
2144		p_err("failed to identify number of CPUs");
2145		goto out;
2146	}
2147
2148	profile_obj = profiler_bpf__open();
2149	if (!profile_obj) {
2150		p_err("failed to open and/or load BPF object");
2151		goto out;
2152	}
2153
2154	profile_obj->rodata->num_cpu = num_cpu;
2155	profile_obj->rodata->num_metric = num_metric;
2156
2157	/* adjust map sizes */
2158	bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
2159	bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
2160	bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
2161	bpf_map__resize(profile_obj->maps.counts, 1);
2162
2163	/* change target name */
2164	profile_tgt_name = profile_target_name(profile_tgt_fd);
2165	if (!profile_tgt_name)
2166		goto out;
2167
2168	bpf_object__for_each_program(prog, profile_obj->obj) {
2169		err = bpf_program__set_attach_target(prog, profile_tgt_fd,
2170						     profile_tgt_name);
2171		if (err) {
2172			p_err("failed to set attach target\n");
2173			goto out;
2174		}
2175	}
2176
2177	set_max_rlimit();
2178	err = profiler_bpf__load(profile_obj);
2179	if (err) {
2180		p_err("failed to load profile_obj");
2181		goto out;
2182	}
2183
2184	err = profile_open_perf_events(profile_obj);
2185	if (err)
2186		goto out;
2187
2188	err = profiler_bpf__attach(profile_obj);
2189	if (err) {
2190		p_err("failed to attach profile_obj");
2191		goto out;
2192	}
2193	signal(SIGINT, int_exit);
2194
2195	sleep(duration);
2196	profile_print_and_cleanup();
2197	return 0;
2198
2199out:
2200	profile_close_perf_events(profile_obj);
2201	if (profile_obj)
2202		profiler_bpf__destroy(profile_obj);
2203	close(profile_tgt_fd);
2204	free(profile_tgt_name);
2205	return err;
2206}
2207
2208#endif /* BPFTOOL_WITHOUT_SKELETONS */
2209
2210static int do_help(int argc, char **argv)
2211{
2212	if (json_output) {
2213		jsonw_null(json_wtr);
2214		return 0;
2215	}
2216
2217	fprintf(stderr,
2218		"Usage: %1$s %2$s { show | list } [PROG]\n"
2219		"       %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
2220		"       %1$s %2$s dump jited  PROG [{ file FILE | opcodes | linum }]\n"
2221		"       %1$s %2$s pin   PROG FILE\n"
2222		"       %1$s %2$s { load | loadall } OBJ  PATH \\\n"
2223		"                         [type TYPE] [dev NAME] \\\n"
2224		"                         [map { idx IDX | name NAME } MAP]\\\n"
2225		"                         [pinmaps MAP_DIR]\n"
 
2226		"       %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
2227		"       %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
2228		"       %1$s %2$s run PROG \\\n"
2229		"                         data_in FILE \\\n"
2230		"                         [data_out FILE [data_size_out L]] \\\n"
2231		"                         [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
2232		"                         [repeat N]\n"
2233		"       %1$s %2$s profile PROG [duration DURATION] METRICs\n"
2234		"       %1$s %2$s tracelog\n"
2235		"       %1$s %2$s help\n"
2236		"\n"
2237		"       " HELP_SPEC_MAP "\n"
2238		"       " HELP_SPEC_PROGRAM "\n"
2239		"       TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
2240		"                 tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
2241		"                 cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
2242		"                 lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
2243		"                 sk_reuseport | flow_dissector | cgroup/sysctl |\n"
2244		"                 cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
2245		"                 cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
2246		"                 cgroup/getpeername4 | cgroup/getpeername6 |\n"
2247		"                 cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
2248		"                 cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
 
2249		"                 cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
2250		"                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
2251		"       ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
2252		"                        flow_dissector }\n"
2253		"       METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
2254		"       " HELP_SPEC_OPTIONS "\n"
 
 
2255		"",
2256		bin_name, argv[-2]);
2257
2258	return 0;
2259}
2260
2261static const struct cmd cmds[] = {
2262	{ "show",	do_show },
2263	{ "list",	do_show },
2264	{ "help",	do_help },
2265	{ "dump",	do_dump },
2266	{ "pin",	do_pin },
2267	{ "load",	do_load },
2268	{ "loadall",	do_loadall },
2269	{ "attach",	do_attach },
2270	{ "detach",	do_detach },
2271	{ "tracelog",	do_tracelog },
2272	{ "run",	do_run },
2273	{ "profile",	do_profile },
2274	{ 0 }
2275};
2276
2277int do_prog(int argc, char **argv)
2278{
2279	return cmd_select(cmds, argc, argv, do_help);
2280}
v6.9.4
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#ifndef _GNU_SOURCE
   5#define _GNU_SOURCE
   6#endif
   7#include <errno.h>
   8#include <fcntl.h>
   9#include <signal.h>
  10#include <stdarg.h>
  11#include <stdio.h>
  12#include <stdlib.h>
  13#include <string.h>
  14#include <time.h>
  15#include <unistd.h>
  16#include <net/if.h>
  17#include <sys/ioctl.h>
  18#include <sys/types.h>
  19#include <sys/stat.h>
  20#include <sys/syscall.h>
  21#include <dirent.h>
  22
  23#include <linux/err.h>
  24#include <linux/perf_event.h>
  25#include <linux/sizes.h>
  26
  27#include <bpf/bpf.h>
  28#include <bpf/btf.h>
  29#include <bpf/hashmap.h>
  30#include <bpf/libbpf.h>
  31#include <bpf/libbpf_internal.h>
  32#include <bpf/skel_internal.h>
  33
  34#include "cfg.h"
  35#include "main.h"
  36#include "xlated_dumper.h"
  37
  38#define BPF_METADATA_PREFIX "bpf_metadata_"
  39#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
  40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41enum dump_mode {
  42	DUMP_JITED,
  43	DUMP_XLATED,
  44};
  45
  46static const bool attach_types[] = {
  47	[BPF_SK_SKB_STREAM_PARSER] = true,
  48	[BPF_SK_SKB_STREAM_VERDICT] = true,
  49	[BPF_SK_SKB_VERDICT] = true,
  50	[BPF_SK_MSG_VERDICT] = true,
  51	[BPF_FLOW_DISSECTOR] = true,
  52	[__MAX_BPF_ATTACH_TYPE] = false,
  53};
  54
  55/* Textual representations traditionally used by the program and kept around
  56 * for the sake of backwards compatibility.
  57 */
  58static const char * const attach_type_strings[] = {
  59	[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
  60	[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
  61	[BPF_SK_SKB_VERDICT] = "skb_verdict",
  62	[BPF_SK_MSG_VERDICT] = "msg_verdict",
 
  63	[__MAX_BPF_ATTACH_TYPE] = NULL,
  64};
  65
  66static struct hashmap *prog_table;
  67
  68static enum bpf_attach_type parse_attach_type(const char *str)
  69{
  70	enum bpf_attach_type type;
  71
  72	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
  73		if (attach_types[type]) {
  74			const char *attach_type_str;
  75
  76			attach_type_str = libbpf_bpf_attach_type_str(type);
  77			if (!strcmp(str, attach_type_str))
  78				return type;
  79		}
  80
  81		if (attach_type_strings[type] &&
  82		    is_prefix(str, attach_type_strings[type]))
  83			return type;
  84	}
  85
  86	return __MAX_BPF_ATTACH_TYPE;
  87}
  88
  89static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode,
  90			  void **info_data, size_t *const info_data_sz)
  91{
  92	struct bpf_prog_info holder = {};
  93	size_t needed = 0;
  94	void *ptr;
  95
  96	if (mode == DUMP_JITED) {
  97		holder.jited_prog_len = info->jited_prog_len;
  98		needed += info->jited_prog_len;
  99	} else {
 100		holder.xlated_prog_len = info->xlated_prog_len;
 101		needed += info->xlated_prog_len;
 102	}
 103
 104	holder.nr_jited_ksyms = info->nr_jited_ksyms;
 105	needed += info->nr_jited_ksyms * sizeof(__u64);
 106
 107	holder.nr_jited_func_lens = info->nr_jited_func_lens;
 108	needed += info->nr_jited_func_lens * sizeof(__u32);
 109
 110	holder.nr_func_info = info->nr_func_info;
 111	holder.func_info_rec_size = info->func_info_rec_size;
 112	needed += info->nr_func_info * info->func_info_rec_size;
 113
 114	holder.nr_line_info = info->nr_line_info;
 115	holder.line_info_rec_size = info->line_info_rec_size;
 116	needed += info->nr_line_info * info->line_info_rec_size;
 117
 118	holder.nr_jited_line_info = info->nr_jited_line_info;
 119	holder.jited_line_info_rec_size = info->jited_line_info_rec_size;
 120	needed += info->nr_jited_line_info * info->jited_line_info_rec_size;
 121
 122	if (needed > *info_data_sz) {
 123		ptr = realloc(*info_data, needed);
 124		if (!ptr)
 125			return -1;
 126
 127		*info_data = ptr;
 128		*info_data_sz = needed;
 129	}
 130	ptr = *info_data;
 131
 132	if (mode == DUMP_JITED) {
 133		holder.jited_prog_insns = ptr_to_u64(ptr);
 134		ptr += holder.jited_prog_len;
 135	} else {
 136		holder.xlated_prog_insns = ptr_to_u64(ptr);
 137		ptr += holder.xlated_prog_len;
 138	}
 139
 140	holder.jited_ksyms = ptr_to_u64(ptr);
 141	ptr += holder.nr_jited_ksyms * sizeof(__u64);
 142
 143	holder.jited_func_lens = ptr_to_u64(ptr);
 144	ptr += holder.nr_jited_func_lens * sizeof(__u32);
 145
 146	holder.func_info = ptr_to_u64(ptr);
 147	ptr += holder.nr_func_info * holder.func_info_rec_size;
 148
 149	holder.line_info = ptr_to_u64(ptr);
 150	ptr += holder.nr_line_info * holder.line_info_rec_size;
 151
 152	holder.jited_line_info = ptr_to_u64(ptr);
 153	ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size;
 154
 155	*info = holder;
 156	return 0;
 157}
 158
 159static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
 160{
 161	struct timespec real_time_ts, boot_time_ts;
 162	time_t wallclock_secs;
 163	struct tm load_tm;
 164
 165	buf[--size] = '\0';
 166
 167	if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
 168	    clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
 169		perror("Can't read clocks");
 170		snprintf(buf, size, "%llu", nsecs / 1000000000);
 171		return;
 172	}
 173
 174	wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
 175		(real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
 176		1000000000;
 177
 178
 179	if (!localtime_r(&wallclock_secs, &load_tm)) {
 180		snprintf(buf, size, "%llu", nsecs / 1000000000);
 181		return;
 182	}
 183
 184	if (json_output)
 185		strftime(buf, size, "%s", &load_tm);
 186	else
 187		strftime(buf, size, "%FT%T%z", &load_tm);
 188}
 189
 190static void show_prog_maps(int fd, __u32 num_maps)
 191{
 192	struct bpf_prog_info info = {};
 193	__u32 len = sizeof(info);
 194	__u32 map_ids[num_maps];
 195	unsigned int i;
 196	int err;
 197
 198	info.nr_map_ids = num_maps;
 199	info.map_ids = ptr_to_u64(map_ids);
 200
 201	err = bpf_prog_get_info_by_fd(fd, &info, &len);
 202	if (err || !info.nr_map_ids)
 203		return;
 204
 205	if (json_output) {
 206		jsonw_name(json_wtr, "map_ids");
 207		jsonw_start_array(json_wtr);
 208		for (i = 0; i < info.nr_map_ids; i++)
 209			jsonw_uint(json_wtr, map_ids[i]);
 210		jsonw_end_array(json_wtr);
 211	} else {
 212		printf("  map_ids ");
 213		for (i = 0; i < info.nr_map_ids; i++)
 214			printf("%u%s", map_ids[i],
 215			       i == info.nr_map_ids - 1 ? "" : ",");
 216	}
 217}
 218
 219static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
 220{
 221	struct bpf_prog_info prog_info;
 222	__u32 prog_info_len;
 223	__u32 map_info_len;
 224	void *value = NULL;
 225	__u32 *map_ids;
 226	int nr_maps;
 227	int key = 0;
 228	int map_fd;
 229	int ret;
 230	__u32 i;
 231
 232	memset(&prog_info, 0, sizeof(prog_info));
 233	prog_info_len = sizeof(prog_info);
 234	ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
 235	if (ret)
 236		return NULL;
 237
 238	if (!prog_info.nr_map_ids)
 239		return NULL;
 240
 241	map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
 242	if (!map_ids)
 243		return NULL;
 244
 245	nr_maps = prog_info.nr_map_ids;
 246	memset(&prog_info, 0, sizeof(prog_info));
 247	prog_info.nr_map_ids = nr_maps;
 248	prog_info.map_ids = ptr_to_u64(map_ids);
 249	prog_info_len = sizeof(prog_info);
 250
 251	ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
 252	if (ret)
 253		goto free_map_ids;
 254
 255	for (i = 0; i < prog_info.nr_map_ids; i++) {
 256		map_fd = bpf_map_get_fd_by_id(map_ids[i]);
 257		if (map_fd < 0)
 258			goto free_map_ids;
 259
 260		memset(map_info, 0, sizeof(*map_info));
 261		map_info_len = sizeof(*map_info);
 262		ret = bpf_map_get_info_by_fd(map_fd, map_info, &map_info_len);
 263		if (ret < 0) {
 264			close(map_fd);
 265			goto free_map_ids;
 266		}
 267
 268		if (map_info->type != BPF_MAP_TYPE_ARRAY ||
 269		    map_info->key_size != sizeof(int) ||
 270		    map_info->max_entries != 1 ||
 271		    !map_info->btf_value_type_id ||
 272		    !strstr(map_info->name, ".rodata")) {
 273			close(map_fd);
 274			continue;
 275		}
 276
 277		value = malloc(map_info->value_size);
 278		if (!value) {
 279			close(map_fd);
 280			goto free_map_ids;
 281		}
 282
 283		if (bpf_map_lookup_elem(map_fd, &key, value)) {
 284			close(map_fd);
 285			free(value);
 286			value = NULL;
 287			goto free_map_ids;
 288		}
 289
 290		close(map_fd);
 291		break;
 292	}
 293
 294free_map_ids:
 295	free(map_ids);
 296	return value;
 297}
 298
 299static bool has_metadata_prefix(const char *s)
 300{
 301	return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
 302}
 303
 304static void show_prog_metadata(int fd, __u32 num_maps)
 305{
 306	const struct btf_type *t_datasec, *t_var;
 307	struct bpf_map_info map_info;
 308	struct btf_var_secinfo *vsi;
 309	bool printed_header = false;
 
 310	unsigned int i, vlen;
 311	void *value = NULL;
 312	const char *name;
 313	struct btf *btf;
 314	int err;
 315
 316	if (!num_maps)
 317		return;
 318
 319	memset(&map_info, 0, sizeof(map_info));
 320	value = find_metadata(fd, &map_info);
 321	if (!value)
 322		return;
 323
 324	btf = btf__load_from_kernel_by_id(map_info.btf_id);
 325	if (!btf)
 326		goto out_free;
 327
 328	t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
 329	if (!btf_is_datasec(t_datasec))
 330		goto out_free;
 331
 332	vlen = btf_vlen(t_datasec);
 333	vsi = btf_var_secinfos(t_datasec);
 334
 335	/* We don't proceed to check the kinds of the elements of the DATASEC.
 336	 * The verifier enforces them to be BTF_KIND_VAR.
 337	 */
 338
 339	if (json_output) {
 340		struct btf_dumper d = {
 341			.btf = btf,
 342			.jw = json_wtr,
 343			.is_plain_text = false,
 344		};
 345
 346		for (i = 0; i < vlen; i++, vsi++) {
 347			t_var = btf__type_by_id(btf, vsi->type);
 348			name = btf__name_by_offset(btf, t_var->name_off);
 349
 350			if (!has_metadata_prefix(name))
 351				continue;
 352
 353			if (!printed_header) {
 354				jsonw_name(json_wtr, "metadata");
 355				jsonw_start_object(json_wtr);
 356				printed_header = true;
 357			}
 358
 359			jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
 360			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
 361			if (err) {
 362				p_err("btf dump failed: %d", err);
 363				break;
 364			}
 365		}
 366		if (printed_header)
 367			jsonw_end_object(json_wtr);
 368	} else {
 369		json_writer_t *btf_wtr;
 370		struct btf_dumper d = {
 371			.btf = btf,
 
 372			.is_plain_text = true,
 373		};
 374
 
 
 
 
 
 375		for (i = 0; i < vlen; i++, vsi++) {
 376			t_var = btf__type_by_id(btf, vsi->type);
 377			name = btf__name_by_offset(btf, t_var->name_off);
 378
 379			if (!has_metadata_prefix(name))
 380				continue;
 381
 382			if (!printed_header) {
 383				printf("\tmetadata:");
 384
 385				btf_wtr = jsonw_new(stdout);
 386				if (!btf_wtr) {
 387					p_err("jsonw alloc failed");
 388					goto out_free;
 389				}
 390				d.jw = btf_wtr,
 391
 392				printed_header = true;
 393			}
 394
 395			printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
 396
 397			jsonw_reset(btf_wtr);
 398			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
 399			if (err) {
 400				p_err("btf dump failed: %d", err);
 401				break;
 402			}
 403		}
 404		if (printed_header)
 405			jsonw_destroy(&btf_wtr);
 406	}
 407
 408out_free:
 409	btf__free(btf);
 410	free(value);
 411}
 412
 413static void print_prog_header_json(struct bpf_prog_info *info, int fd)
 414{
 415	const char *prog_type_str;
 416	char prog_name[MAX_PROG_FULL_NAME];
 417
 418	jsonw_uint_field(json_wtr, "id", info->id);
 419	prog_type_str = libbpf_bpf_prog_type_str(info->type);
 420
 421	if (prog_type_str)
 422		jsonw_string_field(json_wtr, "type", prog_type_str);
 423	else
 424		jsonw_uint_field(json_wtr, "type", info->type);
 425
 426	if (*info->name) {
 427		get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
 428		jsonw_string_field(json_wtr, "name", prog_name);
 429	}
 430
 431	jsonw_name(json_wtr, "tag");
 432	jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
 433		     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
 434		     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
 435
 436	jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
 437	if (info->run_time_ns) {
 438		jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
 439		jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
 440	}
 441	if (info->recursion_misses)
 442		jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
 443}
 444
 445static void print_prog_json(struct bpf_prog_info *info, int fd, bool orphaned)
 446{
 447	char *memlock;
 448
 449	jsonw_start_object(json_wtr);
 450	print_prog_header_json(info, fd);
 451	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
 452
 453	if (info->load_time) {
 454		char buf[32];
 455
 456		print_boot_time(info->load_time, buf, sizeof(buf));
 457
 458		/* Piggy back on load_time, since 0 uid is a valid one */
 459		jsonw_name(json_wtr, "loaded_at");
 460		jsonw_printf(json_wtr, "%s", buf);
 461		jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
 462	}
 463
 464	jsonw_bool_field(json_wtr, "orphaned", orphaned);
 465	jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
 466
 467	if (info->jited_prog_len) {
 468		jsonw_bool_field(json_wtr, "jited", true);
 469		jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
 470	} else {
 471		jsonw_bool_field(json_wtr, "jited", false);
 472	}
 473
 474	memlock = get_fdinfo(fd, "memlock");
 475	if (memlock)
 476		jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
 477	free(memlock);
 478
 479	if (info->nr_map_ids)
 480		show_prog_maps(fd, info->nr_map_ids);
 481
 482	if (info->btf_id)
 483		jsonw_int_field(json_wtr, "btf_id", info->btf_id);
 484
 485	if (!hashmap__empty(prog_table)) {
 486		struct hashmap_entry *entry;
 487
 488		jsonw_name(json_wtr, "pinned");
 489		jsonw_start_array(json_wtr);
 490		hashmap__for_each_key_entry(prog_table, entry, info->id)
 491			jsonw_string(json_wtr, entry->pvalue);
 
 
 492		jsonw_end_array(json_wtr);
 493	}
 494
 495	emit_obj_refs_json(refs_table, info->id, json_wtr);
 496
 497	show_prog_metadata(fd, info->nr_map_ids);
 498
 499	jsonw_end_object(json_wtr);
 500}
 501
 502static void print_prog_header_plain(struct bpf_prog_info *info, int fd)
 503{
 504	const char *prog_type_str;
 505	char prog_name[MAX_PROG_FULL_NAME];
 506
 507	printf("%u: ", info->id);
 508	prog_type_str = libbpf_bpf_prog_type_str(info->type);
 509	if (prog_type_str)
 510		printf("%s  ", prog_type_str);
 511	else
 512		printf("type %u  ", info->type);
 513
 514	if (*info->name) {
 515		get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
 516		printf("name %s  ", prog_name);
 517	}
 518
 519	printf("tag ");
 520	fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
 521	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
 522	printf("%s", info->gpl_compatible ? "  gpl" : "");
 523	if (info->run_time_ns)
 524		printf(" run_time_ns %lld run_cnt %lld",
 525		       info->run_time_ns, info->run_cnt);
 526	if (info->recursion_misses)
 527		printf(" recursion_misses %lld", info->recursion_misses);
 528	printf("\n");
 529}
 530
 531static void print_prog_plain(struct bpf_prog_info *info, int fd, bool orphaned)
 532{
 533	char *memlock;
 534
 535	print_prog_header_plain(info, fd);
 536
 537	if (info->load_time) {
 538		char buf[32];
 539
 540		print_boot_time(info->load_time, buf, sizeof(buf));
 541
 542		/* Piggy back on load_time, since 0 uid is a valid one */
 543		printf("\tloaded_at %s  uid %u\n", buf, info->created_by_uid);
 544	}
 545
 546	printf("\txlated %uB", info->xlated_prog_len);
 547
 548	if (info->jited_prog_len)
 549		printf("  jited %uB", info->jited_prog_len);
 550	else
 551		printf("  not jited");
 552
 553	memlock = get_fdinfo(fd, "memlock");
 554	if (memlock)
 555		printf("  memlock %sB", memlock);
 556	free(memlock);
 557
 558	if (orphaned)
 559		printf("  orphaned");
 560
 561	if (info->nr_map_ids)
 562		show_prog_maps(fd, info->nr_map_ids);
 563
 564	if (!hashmap__empty(prog_table)) {
 565		struct hashmap_entry *entry;
 566
 567		hashmap__for_each_key_entry(prog_table, entry, info->id)
 568			printf("\n\tpinned %s", (char *)entry->pvalue);
 
 
 569	}
 570
 571	if (info->btf_id)
 572		printf("\n\tbtf_id %d", info->btf_id);
 573
 574	emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
 575
 576	printf("\n");
 577
 578	show_prog_metadata(fd, info->nr_map_ids);
 579}
 580
 581static int show_prog(int fd)
 582{
 583	struct bpf_prog_info info = {};
 584	__u32 len = sizeof(info);
 585	int err;
 586
 587	err = bpf_prog_get_info_by_fd(fd, &info, &len);
 588	if (err && err != -ENODEV) {
 589		p_err("can't get prog info: %s", strerror(errno));
 590		return -1;
 591	}
 592
 593	if (json_output)
 594		print_prog_json(&info, fd, err == -ENODEV);
 595	else
 596		print_prog_plain(&info, fd, err == -ENODEV);
 597
 598	return 0;
 599}
 600
 601static int do_show_subset(int argc, char **argv)
 602{
 603	int *fds = NULL;
 604	int nb_fds, i;
 605	int err = -1;
 606
 607	fds = malloc(sizeof(int));
 608	if (!fds) {
 609		p_err("mem alloc failed");
 610		return -1;
 611	}
 612	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 613	if (nb_fds < 1)
 614		goto exit_free;
 615
 616	if (json_output && nb_fds > 1)
 617		jsonw_start_array(json_wtr);	/* root array */
 618	for (i = 0; i < nb_fds; i++) {
 619		err = show_prog(fds[i]);
 620		if (err) {
 621			for (; i < nb_fds; i++)
 622				close(fds[i]);
 623			break;
 624		}
 625		close(fds[i]);
 626	}
 627	if (json_output && nb_fds > 1)
 628		jsonw_end_array(json_wtr);	/* root array */
 629
 630exit_free:
 631	free(fds);
 632	return err;
 633}
 634
 635static int do_show(int argc, char **argv)
 636{
 637	__u32 id = 0;
 638	int err;
 639	int fd;
 640
 641	if (show_pinned) {
 642		prog_table = hashmap__new(hash_fn_for_key_as_id,
 643					  equal_fn_for_key_as_id, NULL);
 644		if (IS_ERR(prog_table)) {
 645			p_err("failed to create hashmap for pinned paths");
 646			return -1;
 647		}
 648		build_pinned_obj_table(prog_table, BPF_OBJ_PROG);
 649	}
 650	build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
 651
 652	if (argc == 2)
 653		return do_show_subset(argc, argv);
 654
 655	if (argc)
 656		return BAD_ARG();
 657
 658	if (json_output)
 659		jsonw_start_array(json_wtr);
 660	while (true) {
 661		err = bpf_prog_get_next_id(id, &id);
 662		if (err) {
 663			if (errno == ENOENT) {
 664				err = 0;
 665				break;
 666			}
 667			p_err("can't get next program: %s%s", strerror(errno),
 668			      errno == EINVAL ? " -- kernel too old?" : "");
 669			err = -1;
 670			break;
 671		}
 672
 673		fd = bpf_prog_get_fd_by_id(id);
 674		if (fd < 0) {
 675			if (errno == ENOENT)
 676				continue;
 677			p_err("can't get prog by id (%u): %s",
 678			      id, strerror(errno));
 679			err = -1;
 680			break;
 681		}
 682
 683		err = show_prog(fd);
 684		close(fd);
 685		if (err)
 686			break;
 687	}
 688
 689	if (json_output)
 690		jsonw_end_array(json_wtr);
 691
 692	delete_obj_refs_table(refs_table);
 693
 694	if (show_pinned)
 695		delete_pinned_obj_table(prog_table);
 696
 697	return err;
 698}
 699
 700static int
 701prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
 702	  char *filepath, bool opcodes, bool visual, bool linum)
 703{
 704	struct bpf_prog_linfo *prog_linfo = NULL;
 705	const char *disasm_opt = NULL;
 706	struct dump_data dd = {};
 707	void *func_info = NULL;
 708	struct btf *btf = NULL;
 709	char func_sig[1024];
 710	unsigned char *buf;
 711	__u32 member_len;
 712	int fd, err = -1;
 713	ssize_t n;
 
 714
 715	if (mode == DUMP_JITED) {
 716		if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
 717			p_info("no instructions returned");
 718			return -1;
 719		}
 720		buf = u64_to_ptr(info->jited_prog_insns);
 721		member_len = info->jited_prog_len;
 722	} else {	/* DUMP_XLATED */
 723		if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
 724			p_err("error retrieving insn dump: kernel.kptr_restrict set?");
 725			return -1;
 726		}
 727		buf = u64_to_ptr(info->xlated_prog_insns);
 728		member_len = info->xlated_prog_len;
 729	}
 730
 731	if (info->btf_id) {
 732		btf = btf__load_from_kernel_by_id(info->btf_id);
 733		if (!btf) {
 734			p_err("failed to get btf");
 735			return -1;
 736		}
 737	}
 738
 739	func_info = u64_to_ptr(info->func_info);
 740
 741	if (info->nr_line_info) {
 742		prog_linfo = bpf_prog_linfo__new(info);
 743		if (!prog_linfo)
 744			p_info("error in processing bpf_line_info.  continue without it.");
 745	}
 746
 747	if (filepath) {
 748		fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 749		if (fd < 0) {
 750			p_err("can't open file %s: %s", filepath,
 751			      strerror(errno));
 752			goto exit_free;
 753		}
 754
 755		n = write(fd, buf, member_len);
 756		close(fd);
 757		if (n != (ssize_t)member_len) {
 758			p_err("error writing output file: %s",
 759			      n < 0 ? strerror(errno) : "short write");
 760			goto exit_free;
 761		}
 762
 763		if (json_output)
 764			jsonw_null(json_wtr);
 765	} else if (mode == DUMP_JITED) {
 766		const char *name = NULL;
 767
 768		if (info->ifindex) {
 769			name = ifindex_to_arch(info->ifindex, info->netns_dev,
 770					       info->netns_ino, &disasm_opt);
 
 
 771			if (!name)
 772				goto exit_free;
 773		}
 774
 775		if (info->nr_jited_func_lens && info->jited_func_lens) {
 776			struct kernel_sym *sym = NULL;
 777			struct bpf_func_info *record;
 778			char sym_name[SYM_MAX_NAME];
 779			unsigned char *img = buf;
 780			__u64 *ksyms = NULL;
 781			__u32 *lens;
 782			__u32 i;
 783			if (info->nr_jited_ksyms) {
 784				kernel_syms_load(&dd);
 785				ksyms = u64_to_ptr(info->jited_ksyms);
 786			}
 787
 788			if (json_output)
 789				jsonw_start_array(json_wtr);
 790
 791			lens = u64_to_ptr(info->jited_func_lens);
 792			for (i = 0; i < info->nr_jited_func_lens; i++) {
 793				if (ksyms) {
 794					sym = kernel_syms_search(&dd, ksyms[i]);
 795					if (sym)
 796						sprintf(sym_name, "%s", sym->name);
 797					else
 798						sprintf(sym_name, "0x%016llx", ksyms[i]);
 799				} else {
 800					strcpy(sym_name, "unknown");
 801				}
 802
 803				if (func_info) {
 804					record = func_info + i * info->func_info_rec_size;
 805					btf_dumper_type_only(btf, record->type_id,
 806							     func_sig,
 807							     sizeof(func_sig));
 808				}
 809
 810				if (json_output) {
 811					jsonw_start_object(json_wtr);
 812					if (func_info && func_sig[0] != '\0') {
 813						jsonw_name(json_wtr, "proto");
 814						jsonw_string(json_wtr, func_sig);
 815					}
 816					jsonw_name(json_wtr, "name");
 817					jsonw_string(json_wtr, sym_name);
 818					jsonw_name(json_wtr, "insns");
 819				} else {
 820					if (func_info && func_sig[0] != '\0')
 821						printf("%s:\n", func_sig);
 822					printf("%s:\n", sym_name);
 823				}
 824
 825				if (disasm_print_insn(img, lens[i], opcodes,
 826						      name, disasm_opt, btf,
 827						      prog_linfo, ksyms[i], i,
 828						      linum))
 829					goto exit_free;
 830
 831				img += lens[i];
 832
 833				if (json_output)
 834					jsonw_end_object(json_wtr);
 835				else
 836					printf("\n");
 837			}
 838
 839			if (json_output)
 840				jsonw_end_array(json_wtr);
 841		} else {
 842			if (disasm_print_insn(buf, member_len, opcodes, name,
 843					      disasm_opt, btf, NULL, 0, 0,
 844					      false))
 845				goto exit_free;
 846		}
 
 
 
 
 
 847	} else {
 848		kernel_syms_load(&dd);
 849		dd.nr_jited_ksyms = info->nr_jited_ksyms;
 850		dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
 851		dd.btf = btf;
 852		dd.func_info = func_info;
 853		dd.finfo_rec_size = info->func_info_rec_size;
 854		dd.prog_linfo = prog_linfo;
 855
 856		if (json_output)
 857			dump_xlated_json(&dd, buf, member_len, opcodes, linum);
 858		else if (visual)
 859			dump_xlated_cfg(&dd, buf, member_len, opcodes, linum);
 860		else
 861			dump_xlated_plain(&dd, buf, member_len, opcodes, linum);
 
 862		kernel_syms_destroy(&dd);
 863	}
 864
 865	err = 0;
 866
 867exit_free:
 868	btf__free(btf);
 869	bpf_prog_linfo__free(prog_linfo);
 870	return err;
 871}
 872
 873static int do_dump(int argc, char **argv)
 874{
 875	struct bpf_prog_info info;
 876	__u32 info_len = sizeof(info);
 877	size_t info_data_sz = 0;
 878	void *info_data = NULL;
 879	char *filepath = NULL;
 880	bool opcodes = false;
 881	bool visual = false;
 882	enum dump_mode mode;
 883	bool linum = false;
 
 884	int nb_fds, i = 0;
 885	int *fds = NULL;
 886	int err = -1;
 
 887
 888	if (is_prefix(*argv, "jited")) {
 889		if (disasm_init())
 890			return -1;
 891		mode = DUMP_JITED;
 892	} else if (is_prefix(*argv, "xlated")) {
 893		mode = DUMP_XLATED;
 894	} else {
 895		p_err("expected 'xlated' or 'jited', got: %s", *argv);
 896		return -1;
 897	}
 898	NEXT_ARG();
 899
 900	if (argc < 2)
 901		usage();
 902
 903	fds = malloc(sizeof(int));
 904	if (!fds) {
 905		p_err("mem alloc failed");
 906		return -1;
 907	}
 908	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 909	if (nb_fds < 1)
 910		goto exit_free;
 911
 912	while (argc) {
 913		if (is_prefix(*argv, "file")) {
 914			NEXT_ARG();
 915			if (!argc) {
 916				p_err("expected file path");
 917				goto exit_close;
 918			}
 919			if (nb_fds > 1) {
 920				p_err("several programs matched");
 921				goto exit_close;
 922			}
 923
 924			filepath = *argv;
 925			NEXT_ARG();
 926		} else if (is_prefix(*argv, "opcodes")) {
 927			opcodes = true;
 928			NEXT_ARG();
 929		} else if (is_prefix(*argv, "visual")) {
 930			if (nb_fds > 1) {
 931				p_err("several programs matched");
 932				goto exit_close;
 933			}
 934
 935			visual = true;
 936			NEXT_ARG();
 937		} else if (is_prefix(*argv, "linum")) {
 938			linum = true;
 939			NEXT_ARG();
 940		} else {
 941			usage();
 942			goto exit_close;
 943		}
 
 
 
 
 
 
 944	}
 945
 946	if (filepath && (opcodes || visual || linum)) {
 947		p_err("'file' is not compatible with 'opcodes', 'visual', or 'linum'");
 948		goto exit_close;
 949	}
 950	if (json_output && visual) {
 951		p_err("'visual' is not compatible with JSON output");
 952		goto exit_close;
 953	}
 
 
 
 
 
 
 
 
 
 
 
 954
 955	if (json_output && nb_fds > 1)
 956		jsonw_start_array(json_wtr);	/* root array */
 957	for (i = 0; i < nb_fds; i++) {
 958		memset(&info, 0, sizeof(info));
 959
 960		err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
 961		if (err) {
 962			p_err("can't get prog info: %s", strerror(errno));
 963			break;
 964		}
 965
 966		err = prep_prog_info(&info, mode, &info_data, &info_data_sz);
 967		if (err) {
 968			p_err("can't grow prog info_data");
 969			break;
 970		}
 971
 972		err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
 973		if (err) {
 974			p_err("can't get prog info: %s", strerror(errno));
 975			break;
 976		}
 977
 978		if (json_output && nb_fds > 1) {
 979			jsonw_start_object(json_wtr);	/* prog object */
 980			print_prog_header_json(&info, fds[i]);
 981			jsonw_name(json_wtr, "insns");
 982		} else if (nb_fds > 1) {
 983			print_prog_header_plain(&info, fds[i]);
 984		}
 985
 986		err = prog_dump(&info, mode, filepath, opcodes, visual, linum);
 
 987
 988		if (json_output && nb_fds > 1)
 989			jsonw_end_object(json_wtr);	/* prog object */
 990		else if (i != nb_fds - 1 && nb_fds > 1)
 991			printf("\n");
 992
 
 993		if (err)
 994			break;
 995		close(fds[i]);
 996	}
 997	if (json_output && nb_fds > 1)
 998		jsonw_end_array(json_wtr);	/* root array */
 999
1000exit_close:
1001	for (; i < nb_fds; i++)
1002		close(fds[i]);
1003exit_free:
1004	free(info_data);
1005	free(fds);
1006	return err;
1007}
1008
1009static int do_pin(int argc, char **argv)
1010{
1011	int err;
1012
1013	err = do_pin_any(argc, argv, prog_parse_fd);
1014	if (!err && json_output)
1015		jsonw_null(json_wtr);
1016	return err;
1017}
1018
1019struct map_replace {
1020	int idx;
1021	int fd;
1022	char *name;
1023};
1024
1025static int map_replace_compar(const void *p1, const void *p2)
1026{
1027	const struct map_replace *a = p1, *b = p2;
1028
1029	return a->idx - b->idx;
1030}
1031
1032static int parse_attach_detach_args(int argc, char **argv, int *progfd,
1033				    enum bpf_attach_type *attach_type,
1034				    int *mapfd)
1035{
1036	if (!REQ_ARGS(3))
1037		return -EINVAL;
1038
1039	*progfd = prog_parse_fd(&argc, &argv);
1040	if (*progfd < 0)
1041		return *progfd;
1042
1043	*attach_type = parse_attach_type(*argv);
1044	if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
1045		p_err("invalid attach/detach type");
1046		return -EINVAL;
1047	}
1048
1049	if (*attach_type == BPF_FLOW_DISSECTOR) {
1050		*mapfd = 0;
1051		return 0;
1052	}
1053
1054	NEXT_ARG();
1055	if (!REQ_ARGS(2))
1056		return -EINVAL;
1057
1058	*mapfd = map_parse_fd(&argc, &argv);
1059	if (*mapfd < 0)
1060		return *mapfd;
1061
1062	return 0;
1063}
1064
1065static int do_attach(int argc, char **argv)
1066{
1067	enum bpf_attach_type attach_type;
1068	int err, progfd;
1069	int mapfd;
1070
1071	err = parse_attach_detach_args(argc, argv,
1072				       &progfd, &attach_type, &mapfd);
1073	if (err)
1074		return err;
1075
1076	err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
1077	if (err) {
1078		p_err("failed prog attach to map");
1079		return -EINVAL;
1080	}
1081
1082	if (json_output)
1083		jsonw_null(json_wtr);
1084	return 0;
1085}
1086
1087static int do_detach(int argc, char **argv)
1088{
1089	enum bpf_attach_type attach_type;
1090	int err, progfd;
1091	int mapfd;
1092
1093	err = parse_attach_detach_args(argc, argv,
1094				       &progfd, &attach_type, &mapfd);
1095	if (err)
1096		return err;
1097
1098	err = bpf_prog_detach2(progfd, mapfd, attach_type);
1099	if (err) {
1100		p_err("failed prog detach from map");
1101		return -EINVAL;
1102	}
1103
1104	if (json_output)
1105		jsonw_null(json_wtr);
1106	return 0;
1107}
1108
1109static int check_single_stdin(char *file_data_in, char *file_ctx_in)
1110{
1111	if (file_data_in && file_ctx_in &&
1112	    !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
1113		p_err("cannot use standard input for both data_in and ctx_in");
1114		return -1;
1115	}
1116
1117	return 0;
1118}
1119
1120static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
1121{
1122	size_t block_size = 256;
1123	size_t buf_size = block_size;
1124	size_t nb_read = 0;
1125	void *tmp;
1126	FILE *f;
1127
1128	if (!fname) {
1129		*data_ptr = NULL;
1130		*size = 0;
1131		return 0;
1132	}
1133
1134	if (!strcmp(fname, "-"))
1135		f = stdin;
1136	else
1137		f = fopen(fname, "r");
1138	if (!f) {
1139		p_err("failed to open %s: %s", fname, strerror(errno));
1140		return -1;
1141	}
1142
1143	*data_ptr = malloc(block_size);
1144	if (!*data_ptr) {
1145		p_err("failed to allocate memory for data_in/ctx_in: %s",
1146		      strerror(errno));
1147		goto err_fclose;
1148	}
1149
1150	while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
1151		if (feof(f))
1152			break;
1153		if (ferror(f)) {
1154			p_err("failed to read data_in/ctx_in from %s: %s",
1155			      fname, strerror(errno));
1156			goto err_free;
1157		}
1158		if (nb_read > buf_size - block_size) {
1159			if (buf_size == UINT32_MAX) {
1160				p_err("data_in/ctx_in is too long (max: %d)",
1161				      UINT32_MAX);
1162				goto err_free;
1163			}
1164			/* No space for fread()-ing next chunk; realloc() */
1165			buf_size *= 2;
1166			tmp = realloc(*data_ptr, buf_size);
1167			if (!tmp) {
1168				p_err("failed to reallocate data_in/ctx_in: %s",
1169				      strerror(errno));
1170				goto err_free;
1171			}
1172			*data_ptr = tmp;
1173		}
1174	}
1175	if (f != stdin)
1176		fclose(f);
1177
1178	*size = nb_read;
1179	return 0;
1180
1181err_free:
1182	free(*data_ptr);
1183	*data_ptr = NULL;
1184err_fclose:
1185	if (f != stdin)
1186		fclose(f);
1187	return -1;
1188}
1189
1190static void hex_print(void *data, unsigned int size, FILE *f)
1191{
1192	size_t i, j;
1193	char c;
1194
1195	for (i = 0; i < size; i += 16) {
1196		/* Row offset */
1197		fprintf(f, "%07zx\t", i);
1198
1199		/* Hexadecimal values */
1200		for (j = i; j < i + 16 && j < size; j++)
1201			fprintf(f, "%02x%s", *(uint8_t *)(data + j),
1202				j % 2 ? " " : "");
1203		for (; j < i + 16; j++)
1204			fprintf(f, "  %s", j % 2 ? " " : "");
1205
1206		/* ASCII values (if relevant), '.' otherwise */
1207		fprintf(f, "| ");
1208		for (j = i; j < i + 16 && j < size; j++) {
1209			c = *(char *)(data + j);
1210			if (c < ' ' || c > '~')
1211				c = '.';
1212			fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
1213		}
1214
1215		fprintf(f, "\n");
1216	}
1217}
1218
1219static int
1220print_run_output(void *data, unsigned int size, const char *fname,
1221		 const char *json_key)
1222{
1223	size_t nb_written;
1224	FILE *f;
1225
1226	if (!fname)
1227		return 0;
1228
1229	if (!strcmp(fname, "-")) {
1230		f = stdout;
1231		if (json_output) {
1232			jsonw_name(json_wtr, json_key);
1233			print_data_json(data, size);
1234		} else {
1235			hex_print(data, size, f);
1236		}
1237		return 0;
1238	}
1239
1240	f = fopen(fname, "w");
1241	if (!f) {
1242		p_err("failed to open %s: %s", fname, strerror(errno));
1243		return -1;
1244	}
1245
1246	nb_written = fwrite(data, 1, size, f);
1247	fclose(f);
1248	if (nb_written != size) {
1249		p_err("failed to write output data/ctx: %s", strerror(errno));
1250		return -1;
1251	}
1252
1253	return 0;
1254}
1255
1256static int alloc_run_data(void **data_ptr, unsigned int size_out)
1257{
1258	*data_ptr = calloc(size_out, 1);
1259	if (!*data_ptr) {
1260		p_err("failed to allocate memory for output data/ctx: %s",
1261		      strerror(errno));
1262		return -1;
1263	}
1264
1265	return 0;
1266}
1267
1268static int do_run(int argc, char **argv)
1269{
1270	char *data_fname_in = NULL, *data_fname_out = NULL;
1271	char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
 
1272	const unsigned int default_size = SZ_32K;
1273	void *data_in = NULL, *data_out = NULL;
1274	void *ctx_in = NULL, *ctx_out = NULL;
1275	unsigned int repeat = 1;
1276	int fd, err;
1277	LIBBPF_OPTS(bpf_test_run_opts, test_attr);
1278
1279	if (!REQ_ARGS(4))
1280		return -1;
1281
1282	fd = prog_parse_fd(&argc, &argv);
1283	if (fd < 0)
1284		return -1;
1285
1286	while (argc) {
1287		if (detect_common_prefix(*argv, "data_in", "data_out",
1288					 "data_size_out", NULL))
1289			return -1;
1290		if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
1291					 "ctx_size_out", NULL))
1292			return -1;
1293
1294		if (is_prefix(*argv, "data_in")) {
1295			NEXT_ARG();
1296			if (!REQ_ARGS(1))
1297				return -1;
1298
1299			data_fname_in = GET_ARG();
1300			if (check_single_stdin(data_fname_in, ctx_fname_in))
1301				return -1;
1302		} else if (is_prefix(*argv, "data_out")) {
1303			NEXT_ARG();
1304			if (!REQ_ARGS(1))
1305				return -1;
1306
1307			data_fname_out = GET_ARG();
1308		} else if (is_prefix(*argv, "data_size_out")) {
1309			char *endptr;
1310
1311			NEXT_ARG();
1312			if (!REQ_ARGS(1))
1313				return -1;
1314
1315			test_attr.data_size_out = strtoul(*argv, &endptr, 0);
1316			if (*endptr) {
1317				p_err("can't parse %s as output data size",
1318				      *argv);
1319				return -1;
1320			}
1321			NEXT_ARG();
1322		} else if (is_prefix(*argv, "ctx_in")) {
1323			NEXT_ARG();
1324			if (!REQ_ARGS(1))
1325				return -1;
1326
1327			ctx_fname_in = GET_ARG();
1328			if (check_single_stdin(data_fname_in, ctx_fname_in))
1329				return -1;
1330		} else if (is_prefix(*argv, "ctx_out")) {
1331			NEXT_ARG();
1332			if (!REQ_ARGS(1))
1333				return -1;
1334
1335			ctx_fname_out = GET_ARG();
1336		} else if (is_prefix(*argv, "ctx_size_out")) {
1337			char *endptr;
1338
1339			NEXT_ARG();
1340			if (!REQ_ARGS(1))
1341				return -1;
1342
1343			test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
1344			if (*endptr) {
1345				p_err("can't parse %s as output context size",
1346				      *argv);
1347				return -1;
1348			}
1349			NEXT_ARG();
1350		} else if (is_prefix(*argv, "repeat")) {
1351			char *endptr;
1352
1353			NEXT_ARG();
1354			if (!REQ_ARGS(1))
1355				return -1;
1356
1357			repeat = strtoul(*argv, &endptr, 0);
1358			if (*endptr) {
1359				p_err("can't parse %s as repeat number",
1360				      *argv);
1361				return -1;
1362			}
1363			NEXT_ARG();
1364		} else {
1365			p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1366			      *argv);
1367			return -1;
1368		}
1369	}
1370
1371	err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
1372	if (err)
1373		return -1;
1374
1375	if (data_in) {
1376		if (!test_attr.data_size_out)
1377			test_attr.data_size_out = default_size;
1378		err = alloc_run_data(&data_out, test_attr.data_size_out);
1379		if (err)
1380			goto free_data_in;
1381	}
1382
1383	err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
1384	if (err)
1385		goto free_data_out;
1386
1387	if (ctx_in) {
1388		if (!test_attr.ctx_size_out)
1389			test_attr.ctx_size_out = default_size;
1390		err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
1391		if (err)
1392			goto free_ctx_in;
1393	}
1394
 
1395	test_attr.repeat	= repeat;
1396	test_attr.data_in	= data_in;
1397	test_attr.data_out	= data_out;
1398	test_attr.ctx_in	= ctx_in;
1399	test_attr.ctx_out	= ctx_out;
1400
1401	err = bpf_prog_test_run_opts(fd, &test_attr);
1402	if (err) {
1403		p_err("failed to run program: %s", strerror(errno));
1404		goto free_ctx_out;
1405	}
1406
1407	err = 0;
1408
1409	if (json_output)
1410		jsonw_start_object(json_wtr);	/* root */
1411
1412	/* Do not exit on errors occurring when printing output data/context,
1413	 * we still want to print return value and duration for program run.
1414	 */
1415	if (test_attr.data_size_out)
1416		err += print_run_output(test_attr.data_out,
1417					test_attr.data_size_out,
1418					data_fname_out, "data_out");
1419	if (test_attr.ctx_size_out)
1420		err += print_run_output(test_attr.ctx_out,
1421					test_attr.ctx_size_out,
1422					ctx_fname_out, "ctx_out");
1423
1424	if (json_output) {
1425		jsonw_uint_field(json_wtr, "retval", test_attr.retval);
1426		jsonw_uint_field(json_wtr, "duration", test_attr.duration);
1427		jsonw_end_object(json_wtr);	/* root */
1428	} else {
1429		fprintf(stdout, "Return value: %u, duration%s: %uns\n",
1430			test_attr.retval,
1431			repeat > 1 ? " (average)" : "", test_attr.duration);
1432	}
1433
1434free_ctx_out:
1435	free(ctx_out);
1436free_ctx_in:
1437	free(ctx_in);
1438free_data_out:
1439	free(data_out);
1440free_data_in:
1441	free(data_in);
1442
1443	return err;
1444}
1445
1446static int
1447get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
1448		      enum bpf_attach_type *expected_attach_type)
1449{
1450	libbpf_print_fn_t print_backup;
1451	int ret;
1452
1453	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1454	if (!ret)
1455		return ret;
1456
1457	/* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1458	print_backup = libbpf_set_print(print_all_levels);
1459	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1460	libbpf_set_print(print_backup);
1461
1462	return ret;
1463}
1464
1465static int
1466auto_attach_program(struct bpf_program *prog, const char *path)
1467{
1468	struct bpf_link *link;
1469	int err;
1470
1471	link = bpf_program__attach(prog);
1472	if (!link) {
1473		p_info("Program %s does not support autoattach, falling back to pinning",
1474		       bpf_program__name(prog));
1475		return bpf_obj_pin(bpf_program__fd(prog), path);
1476	}
1477
1478	err = bpf_link__pin(link, path);
1479	bpf_link__destroy(link);
1480	return err;
1481}
1482
1483static int
1484auto_attach_programs(struct bpf_object *obj, const char *path)
1485{
1486	struct bpf_program *prog;
1487	char buf[PATH_MAX];
1488	int err;
1489
1490	bpf_object__for_each_program(prog, obj) {
1491		err = pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog));
1492		if (err)
1493			goto err_unpin_programs;
1494
1495		err = auto_attach_program(prog, buf);
1496		if (err)
1497			goto err_unpin_programs;
1498	}
1499
1500	return 0;
1501
1502err_unpin_programs:
1503	while ((prog = bpf_object__prev_program(obj, prog))) {
1504		if (pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog)))
1505			continue;
1506
1507		bpf_program__unpin(prog, buf);
1508	}
1509
1510	return err;
1511}
1512
1513static int load_with_options(int argc, char **argv, bool first_prog_only)
1514{
1515	enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
1516	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
1517		.relaxed_maps = relaxed_maps,
1518	);
 
1519	enum bpf_attach_type expected_attach_type;
1520	struct map_replace *map_replace = NULL;
1521	struct bpf_program *prog = NULL, *pos;
1522	unsigned int old_map_fds = 0;
1523	const char *pinmaps = NULL;
1524	__u32 xdpmeta_ifindex = 0;
1525	__u32 offload_ifindex = 0;
1526	bool auto_attach = false;
1527	struct bpf_object *obj;
1528	struct bpf_map *map;
1529	const char *pinfile;
1530	unsigned int i, j;
 
1531	const char *file;
1532	int idx, err;
1533
1534
1535	if (!REQ_ARGS(2))
1536		return -1;
1537	file = GET_ARG();
1538	pinfile = GET_ARG();
1539
1540	while (argc) {
1541		if (is_prefix(*argv, "type")) {
 
 
1542			NEXT_ARG();
1543
1544			if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
1545				p_err("program type already specified");
1546				goto err_free_reuse_maps;
1547			}
1548			if (!REQ_ARGS(1))
1549				goto err_free_reuse_maps;
1550
1551			err = libbpf_prog_type_by_name(*argv, &common_prog_type,
1552						       &expected_attach_type);
1553			if (err < 0) {
1554				/* Put a '/' at the end of type to appease libbpf */
1555				char *type = malloc(strlen(*argv) + 2);
 
 
 
 
1556
1557				if (!type) {
1558					p_err("mem alloc failed");
1559					goto err_free_reuse_maps;
1560				}
1561				*type = 0;
1562				strcat(type, *argv);
1563				strcat(type, "/");
1564
1565				err = get_prog_type_by_name(type, &common_prog_type,
1566							    &expected_attach_type);
1567				free(type);
1568				if (err < 0)
1569					goto err_free_reuse_maps;
1570			}
1571
1572			NEXT_ARG();
1573		} else if (is_prefix(*argv, "map")) {
1574			void *new_map_replace;
1575			char *endptr, *name;
1576			int fd;
1577
1578			NEXT_ARG();
1579
1580			if (!REQ_ARGS(4))
1581				goto err_free_reuse_maps;
1582
1583			if (is_prefix(*argv, "idx")) {
1584				NEXT_ARG();
1585
1586				idx = strtoul(*argv, &endptr, 0);
1587				if (*endptr) {
1588					p_err("can't parse %s as IDX", *argv);
1589					goto err_free_reuse_maps;
1590				}
1591				name = NULL;
1592			} else if (is_prefix(*argv, "name")) {
1593				NEXT_ARG();
1594
1595				name = *argv;
1596				idx = -1;
1597			} else {
1598				p_err("expected 'idx' or 'name', got: '%s'?",
1599				      *argv);
1600				goto err_free_reuse_maps;
1601			}
1602			NEXT_ARG();
1603
1604			fd = map_parse_fd(&argc, &argv);
1605			if (fd < 0)
1606				goto err_free_reuse_maps;
1607
1608			new_map_replace = libbpf_reallocarray(map_replace,
1609							      old_map_fds + 1,
1610							      sizeof(*map_replace));
1611			if (!new_map_replace) {
1612				p_err("mem alloc failed");
1613				goto err_free_reuse_maps;
1614			}
1615			map_replace = new_map_replace;
1616
1617			map_replace[old_map_fds].idx = idx;
1618			map_replace[old_map_fds].name = name;
1619			map_replace[old_map_fds].fd = fd;
1620			old_map_fds++;
1621		} else if (is_prefix(*argv, "dev")) {
1622			p_info("Warning: 'bpftool prog load [...] dev <ifname>' syntax is deprecated.\n"
1623			       "Going further, please use 'offload_dev <ifname>' to offload program to device.\n"
1624			       "For applications using XDP hints only, use 'xdpmeta_dev <ifname>'.");
1625			goto offload_dev;
1626		} else if (is_prefix(*argv, "offload_dev")) {
1627offload_dev:
1628			NEXT_ARG();
1629
1630			if (offload_ifindex) {
1631				p_err("offload_dev already specified");
1632				goto err_free_reuse_maps;
1633			} else if (xdpmeta_ifindex) {
1634				p_err("xdpmeta_dev and offload_dev are mutually exclusive");
1635				goto err_free_reuse_maps;
1636			}
1637			if (!REQ_ARGS(1))
1638				goto err_free_reuse_maps;
1639
1640			offload_ifindex = if_nametoindex(*argv);
1641			if (!offload_ifindex) {
1642				p_err("unrecognized netdevice '%s': %s",
1643				      *argv, strerror(errno));
1644				goto err_free_reuse_maps;
1645			}
1646			NEXT_ARG();
1647		} else if (is_prefix(*argv, "xdpmeta_dev")) {
1648			NEXT_ARG();
1649
1650			if (xdpmeta_ifindex) {
1651				p_err("xdpmeta_dev already specified");
1652				goto err_free_reuse_maps;
1653			} else if (offload_ifindex) {
1654				p_err("xdpmeta_dev and offload_dev are mutually exclusive");
1655				goto err_free_reuse_maps;
1656			}
1657			if (!REQ_ARGS(1))
1658				goto err_free_reuse_maps;
1659
1660			xdpmeta_ifindex = if_nametoindex(*argv);
1661			if (!xdpmeta_ifindex) {
1662				p_err("unrecognized netdevice '%s': %s",
1663				      *argv, strerror(errno));
1664				goto err_free_reuse_maps;
1665			}
1666			NEXT_ARG();
1667		} else if (is_prefix(*argv, "pinmaps")) {
1668			NEXT_ARG();
1669
1670			if (!REQ_ARGS(1))
1671				goto err_free_reuse_maps;
1672
1673			pinmaps = GET_ARG();
1674		} else if (is_prefix(*argv, "autoattach")) {
1675			auto_attach = true;
1676			NEXT_ARG();
1677		} else {
1678			p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1679			      *argv);
1680			goto err_free_reuse_maps;
1681		}
1682	}
1683
1684	set_max_rlimit();
1685
1686	if (verifier_logs)
1687		/* log_level1 + log_level2 + stats, but not stable UAPI */
1688		open_opts.kernel_log_level = 1 + 2 + 4;
1689
1690	obj = bpf_object__open_file(file, &open_opts);
1691	if (!obj) {
1692		p_err("failed to open object file");
1693		goto err_free_reuse_maps;
1694	}
1695
1696	bpf_object__for_each_program(pos, obj) {
1697		enum bpf_prog_type prog_type = common_prog_type;
1698
1699		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
1700			const char *sec_name = bpf_program__section_name(pos);
1701
1702			err = get_prog_type_by_name(sec_name, &prog_type,
1703						    &expected_attach_type);
1704			if (err < 0)
1705				goto err_close_obj;
1706		}
1707
1708		if (prog_type == BPF_PROG_TYPE_XDP && xdpmeta_ifindex) {
1709			bpf_program__set_flags(pos, BPF_F_XDP_DEV_BOUND_ONLY);
1710			bpf_program__set_ifindex(pos, xdpmeta_ifindex);
1711		} else {
1712			bpf_program__set_ifindex(pos, offload_ifindex);
1713		}
1714		if (bpf_program__type(pos) != prog_type)
1715			bpf_program__set_type(pos, prog_type);
1716		bpf_program__set_expected_attach_type(pos, expected_attach_type);
1717	}
1718
1719	qsort(map_replace, old_map_fds, sizeof(*map_replace),
1720	      map_replace_compar);
1721
1722	/* After the sort maps by name will be first on the list, because they
1723	 * have idx == -1.  Resolve them.
1724	 */
1725	j = 0;
1726	while (j < old_map_fds && map_replace[j].name) {
1727		i = 0;
1728		bpf_object__for_each_map(map, obj) {
1729			if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
1730				map_replace[j].idx = i;
1731				break;
1732			}
1733			i++;
1734		}
1735		if (map_replace[j].idx == -1) {
1736			p_err("unable to find map '%s'", map_replace[j].name);
1737			goto err_close_obj;
1738		}
1739		j++;
1740	}
1741	/* Resort if any names were resolved */
1742	if (j)
1743		qsort(map_replace, old_map_fds, sizeof(*map_replace),
1744		      map_replace_compar);
1745
1746	/* Set ifindex and name reuse */
1747	j = 0;
1748	idx = 0;
1749	bpf_object__for_each_map(map, obj) {
1750		if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
1751			bpf_map__set_ifindex(map, offload_ifindex);
1752
1753		if (j < old_map_fds && idx == map_replace[j].idx) {
1754			err = bpf_map__reuse_fd(map, map_replace[j++].fd);
1755			if (err) {
1756				p_err("unable to set up map reuse: %d", err);
1757				goto err_close_obj;
1758			}
1759
1760			/* Next reuse wants to apply to the same map */
1761			if (j < old_map_fds && map_replace[j].idx == idx) {
1762				p_err("replacement for map idx %d specified more than once",
1763				      idx);
1764				goto err_close_obj;
1765			}
1766		}
1767
1768		idx++;
1769	}
1770	if (j < old_map_fds) {
1771		p_err("map idx '%d' not used", map_replace[j].idx);
1772		goto err_close_obj;
1773	}
1774
1775	err = bpf_object__load(obj);
 
 
 
 
 
1776	if (err) {
1777		p_err("failed to load object file");
1778		goto err_close_obj;
1779	}
1780
1781	if (first_prog_only)
1782		err = mount_bpffs_for_file(pinfile);
1783	else
1784		err = create_and_mount_bpffs_dir(pinfile);
1785	if (err)
1786		goto err_close_obj;
1787
1788	if (first_prog_only) {
1789		prog = bpf_object__next_program(obj, NULL);
1790		if (!prog) {
1791			p_err("object file doesn't contain any bpf program");
1792			goto err_close_obj;
1793		}
1794
1795		if (auto_attach)
1796			err = auto_attach_program(prog, pinfile);
1797		else
1798			err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
1799		if (err) {
1800			p_err("failed to pin program %s",
1801			      bpf_program__section_name(prog));
1802			goto err_close_obj;
1803		}
1804	} else {
1805		if (auto_attach)
1806			err = auto_attach_programs(obj, pinfile);
1807		else
1808			err = bpf_object__pin_programs(obj, pinfile);
1809		if (err) {
1810			p_err("failed to pin all programs");
1811			goto err_close_obj;
1812		}
1813	}
1814
1815	if (pinmaps) {
1816		err = bpf_object__pin_maps(obj, pinmaps);
1817		if (err) {
1818			p_err("failed to pin all maps");
1819			goto err_unpin;
1820		}
1821	}
1822
1823	if (json_output)
1824		jsonw_null(json_wtr);
1825
1826	bpf_object__close(obj);
1827	for (i = 0; i < old_map_fds; i++)
1828		close(map_replace[i].fd);
1829	free(map_replace);
1830
1831	return 0;
1832
1833err_unpin:
1834	if (first_prog_only)
1835		unlink(pinfile);
1836	else
1837		bpf_object__unpin_programs(obj, pinfile);
1838err_close_obj:
1839	bpf_object__close(obj);
1840err_free_reuse_maps:
1841	for (i = 0; i < old_map_fds; i++)
1842		close(map_replace[i].fd);
1843	free(map_replace);
1844	return -1;
1845}
1846
1847static int count_open_fds(void)
1848{
1849	DIR *dp = opendir("/proc/self/fd");
1850	struct dirent *de;
1851	int cnt = -3;
1852
1853	if (!dp)
1854		return -1;
1855
1856	while ((de = readdir(dp)))
1857		cnt++;
1858
1859	closedir(dp);
1860	return cnt;
1861}
1862
1863static int try_loader(struct gen_loader_opts *gen)
1864{
1865	struct bpf_load_and_run_opts opts = {};
1866	struct bpf_loader_ctx *ctx;
1867	int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
1868					     sizeof(struct bpf_prog_desc));
1869	int log_buf_sz = (1u << 24) - 1;
1870	int err, fds_before, fd_delta;
1871	char *log_buf = NULL;
1872
1873	ctx = alloca(ctx_sz);
1874	memset(ctx, 0, ctx_sz);
1875	ctx->sz = ctx_sz;
1876	if (verifier_logs) {
1877		ctx->log_level = 1 + 2 + 4;
1878		ctx->log_size = log_buf_sz;
1879		log_buf = malloc(log_buf_sz);
1880		if (!log_buf)
1881			return -ENOMEM;
1882		ctx->log_buf = (long) log_buf;
1883	}
1884	opts.ctx = ctx;
1885	opts.data = gen->data;
1886	opts.data_sz = gen->data_sz;
1887	opts.insns = gen->insns;
1888	opts.insns_sz = gen->insns_sz;
1889	fds_before = count_open_fds();
1890	err = bpf_load_and_run(&opts);
1891	fd_delta = count_open_fds() - fds_before;
1892	if (err < 0 || verifier_logs) {
1893		fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
1894		if (fd_delta && err < 0)
1895			fprintf(stderr, "loader prog leaked %d FDs\n",
1896				fd_delta);
1897	}
1898	free(log_buf);
1899	return err;
1900}
1901
1902static int do_loader(int argc, char **argv)
1903{
1904	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
1905	DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
 
1906	struct bpf_object *obj;
1907	const char *file;
1908	int err = 0;
1909
1910	if (!REQ_ARGS(1))
1911		return -1;
1912	file = GET_ARG();
1913
1914	if (verifier_logs)
1915		/* log_level1 + log_level2 + stats, but not stable UAPI */
1916		open_opts.kernel_log_level = 1 + 2 + 4;
1917
1918	obj = bpf_object__open_file(file, &open_opts);
1919	if (!obj) {
1920		p_err("failed to open object file");
1921		goto err_close_obj;
1922	}
1923
1924	err = bpf_object__gen_loader(obj, &gen);
1925	if (err)
1926		goto err_close_obj;
1927
1928	err = bpf_object__load(obj);
 
 
 
 
 
1929	if (err) {
1930		p_err("failed to load object file");
1931		goto err_close_obj;
1932	}
1933
1934	if (verifier_logs) {
1935		struct dump_data dd = {};
1936
1937		kernel_syms_load(&dd);
1938		dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
1939		kernel_syms_destroy(&dd);
1940	}
1941	err = try_loader(&gen);
1942err_close_obj:
1943	bpf_object__close(obj);
1944	return err;
1945}
1946
1947static int do_load(int argc, char **argv)
1948{
1949	if (use_loader)
1950		return do_loader(argc, argv);
1951	return load_with_options(argc, argv, true);
1952}
1953
1954static int do_loadall(int argc, char **argv)
1955{
1956	return load_with_options(argc, argv, false);
1957}
1958
1959#ifdef BPFTOOL_WITHOUT_SKELETONS
1960
1961static int do_profile(int argc, char **argv)
1962{
1963	p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1964	return 0;
1965}
1966
1967#else /* BPFTOOL_WITHOUT_SKELETONS */
1968
1969#include "profiler.skel.h"
1970
1971struct profile_metric {
1972	const char *name;
1973	struct bpf_perf_event_value val;
1974	struct perf_event_attr attr;
1975	bool selected;
1976
1977	/* calculate ratios like instructions per cycle */
1978	const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
1979	const char *ratio_desc;
1980	const float ratio_mul;
1981} metrics[] = {
1982	{
1983		.name = "cycles",
1984		.attr = {
1985			.type = PERF_TYPE_HARDWARE,
1986			.config = PERF_COUNT_HW_CPU_CYCLES,
1987			.exclude_user = 1,
1988		},
1989	},
1990	{
1991		.name = "instructions",
1992		.attr = {
1993			.type = PERF_TYPE_HARDWARE,
1994			.config = PERF_COUNT_HW_INSTRUCTIONS,
1995			.exclude_user = 1,
1996		},
1997		.ratio_metric = 1,
1998		.ratio_desc = "insns per cycle",
1999		.ratio_mul = 1.0,
2000	},
2001	{
2002		.name = "l1d_loads",
2003		.attr = {
2004			.type = PERF_TYPE_HW_CACHE,
2005			.config =
2006				PERF_COUNT_HW_CACHE_L1D |
2007				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2008				(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
2009			.exclude_user = 1,
2010		},
2011	},
2012	{
2013		.name = "llc_misses",
2014		.attr = {
2015			.type = PERF_TYPE_HW_CACHE,
2016			.config =
2017				PERF_COUNT_HW_CACHE_LL |
2018				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2019				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
2020			.exclude_user = 1
2021		},
2022		.ratio_metric = 2,
2023		.ratio_desc = "LLC misses per million insns",
2024		.ratio_mul = 1e6,
2025	},
2026	{
2027		.name = "itlb_misses",
2028		.attr = {
2029			.type = PERF_TYPE_HW_CACHE,
2030			.config =
2031				PERF_COUNT_HW_CACHE_ITLB |
2032				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2033				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
2034			.exclude_user = 1
2035		},
2036		.ratio_metric = 2,
2037		.ratio_desc = "itlb misses per million insns",
2038		.ratio_mul = 1e6,
2039	},
2040	{
2041		.name = "dtlb_misses",
2042		.attr = {
2043			.type = PERF_TYPE_HW_CACHE,
2044			.config =
2045				PERF_COUNT_HW_CACHE_DTLB |
2046				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2047				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
2048			.exclude_user = 1
2049		},
2050		.ratio_metric = 2,
2051		.ratio_desc = "dtlb misses per million insns",
2052		.ratio_mul = 1e6,
2053	},
2054};
2055
2056static __u64 profile_total_count;
2057
2058#define MAX_NUM_PROFILE_METRICS 4
2059
2060static int profile_parse_metrics(int argc, char **argv)
2061{
2062	unsigned int metric_cnt;
2063	int selected_cnt = 0;
2064	unsigned int i;
2065
2066	metric_cnt = ARRAY_SIZE(metrics);
2067
2068	while (argc > 0) {
2069		for (i = 0; i < metric_cnt; i++) {
2070			if (is_prefix(argv[0], metrics[i].name)) {
2071				if (!metrics[i].selected)
2072					selected_cnt++;
2073				metrics[i].selected = true;
2074				break;
2075			}
2076		}
2077		if (i == metric_cnt) {
2078			p_err("unknown metric %s", argv[0]);
2079			return -1;
2080		}
2081		NEXT_ARG();
2082	}
2083	if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
2084		p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
2085		      selected_cnt, MAX_NUM_PROFILE_METRICS);
2086		return -1;
2087	}
2088	return selected_cnt;
2089}
2090
2091static void profile_read_values(struct profiler_bpf *obj)
2092{
2093	__u32 m, cpu, num_cpu = obj->rodata->num_cpu;
2094	int reading_map_fd, count_map_fd;
2095	__u64 counts[num_cpu];
2096	__u32 key = 0;
2097	int err;
2098
2099	reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
2100	count_map_fd = bpf_map__fd(obj->maps.counts);
2101	if (reading_map_fd < 0 || count_map_fd < 0) {
2102		p_err("failed to get fd for map");
2103		return;
2104	}
2105
2106	err = bpf_map_lookup_elem(count_map_fd, &key, counts);
2107	if (err) {
2108		p_err("failed to read count_map: %s", strerror(errno));
2109		return;
2110	}
2111
2112	profile_total_count = 0;
2113	for (cpu = 0; cpu < num_cpu; cpu++)
2114		profile_total_count += counts[cpu];
2115
2116	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2117		struct bpf_perf_event_value values[num_cpu];
2118
2119		if (!metrics[m].selected)
2120			continue;
2121
2122		err = bpf_map_lookup_elem(reading_map_fd, &key, values);
2123		if (err) {
2124			p_err("failed to read reading_map: %s",
2125			      strerror(errno));
2126			return;
2127		}
2128		for (cpu = 0; cpu < num_cpu; cpu++) {
2129			metrics[m].val.counter += values[cpu].counter;
2130			metrics[m].val.enabled += values[cpu].enabled;
2131			metrics[m].val.running += values[cpu].running;
2132		}
2133		key++;
2134	}
2135}
2136
2137static void profile_print_readings_json(void)
2138{
2139	__u32 m;
2140
2141	jsonw_start_array(json_wtr);
2142	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2143		if (!metrics[m].selected)
2144			continue;
2145		jsonw_start_object(json_wtr);
2146		jsonw_string_field(json_wtr, "metric", metrics[m].name);
2147		jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
2148		jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
2149		jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
2150		jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
2151
2152		jsonw_end_object(json_wtr);
2153	}
2154	jsonw_end_array(json_wtr);
2155}
2156
2157static void profile_print_readings_plain(void)
2158{
2159	__u32 m;
2160
2161	printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
2162	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2163		struct bpf_perf_event_value *val = &metrics[m].val;
2164		int r;
2165
2166		if (!metrics[m].selected)
2167			continue;
2168		printf("%18llu %-20s", val->counter, metrics[m].name);
2169
2170		r = metrics[m].ratio_metric - 1;
2171		if (r >= 0 && metrics[r].selected &&
2172		    metrics[r].val.counter > 0) {
2173			printf("# %8.2f %-30s",
2174			       val->counter * metrics[m].ratio_mul /
2175			       metrics[r].val.counter,
2176			       metrics[m].ratio_desc);
2177		} else {
2178			printf("%-41s", "");
2179		}
2180
2181		if (val->enabled > val->running)
2182			printf("(%4.2f%%)",
2183			       val->running * 100.0 / val->enabled);
2184		printf("\n");
2185	}
2186}
2187
2188static void profile_print_readings(void)
2189{
2190	if (json_output)
2191		profile_print_readings_json();
2192	else
2193		profile_print_readings_plain();
2194}
2195
2196static char *profile_target_name(int tgt_fd)
2197{
2198	struct bpf_func_info func_info;
2199	struct bpf_prog_info info = {};
2200	__u32 info_len = sizeof(info);
2201	const struct btf_type *t;
2202	__u32 func_info_rec_size;
2203	struct btf *btf = NULL;
2204	char *name = NULL;
2205	int err;
2206
2207	err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
2208	if (err) {
2209		p_err("failed to get info for prog FD %d", tgt_fd);
2210		goto out;
 
2211	}
2212
2213	if (info.btf_id == 0) {
 
2214		p_err("prog FD %d doesn't have valid btf", tgt_fd);
2215		goto out;
2216	}
2217
2218	func_info_rec_size = info.func_info_rec_size;
2219	if (info.nr_func_info == 0) {
2220		p_err("found 0 func_info for prog FD %d", tgt_fd);
2221		goto out;
2222	}
2223
2224	memset(&info, 0, sizeof(info));
2225	info.nr_func_info = 1;
2226	info.func_info_rec_size = func_info_rec_size;
2227	info.func_info = ptr_to_u64(&func_info);
2228
2229	err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
2230	if (err) {
2231		p_err("failed to get func_info for prog FD %d", tgt_fd);
2232		goto out;
2233	}
2234
2235	btf = btf__load_from_kernel_by_id(info.btf_id);
2236	if (!btf) {
2237		p_err("failed to load btf for prog FD %d", tgt_fd);
2238		goto out;
2239	}
2240
2241	t = btf__type_by_id(btf, func_info.type_id);
2242	if (!t) {
2243		p_err("btf %d doesn't have type %d",
2244		      info.btf_id, func_info.type_id);
2245		goto out;
2246	}
2247	name = strdup(btf__name_by_offset(btf, t->name_off));
2248out:
2249	btf__free(btf);
 
2250	return name;
2251}
2252
2253static struct profiler_bpf *profile_obj;
2254static int profile_tgt_fd = -1;
2255static char *profile_tgt_name;
2256static int *profile_perf_events;
2257static int profile_perf_event_cnt;
2258
2259static void profile_close_perf_events(struct profiler_bpf *obj)
2260{
2261	int i;
2262
2263	for (i = profile_perf_event_cnt - 1; i >= 0; i--)
2264		close(profile_perf_events[i]);
2265
2266	free(profile_perf_events);
2267	profile_perf_event_cnt = 0;
2268}
2269
2270static int profile_open_perf_event(int mid, int cpu, int map_fd)
2271{
2272	int pmu_fd;
2273
2274	pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr,
2275			 -1 /*pid*/, cpu, -1 /*group_fd*/, 0);
2276	if (pmu_fd < 0) {
2277		if (errno == ENODEV) {
2278			p_info("cpu %d may be offline, skip %s profiling.",
2279				cpu, metrics[mid].name);
2280			profile_perf_event_cnt++;
2281			return 0;
2282		}
2283		return -1;
2284	}
2285
2286	if (bpf_map_update_elem(map_fd,
2287				&profile_perf_event_cnt,
2288				&pmu_fd, BPF_ANY) ||
2289	    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
2290		close(pmu_fd);
2291		return -1;
2292	}
2293
2294	profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
2295	return 0;
2296}
2297
2298static int profile_open_perf_events(struct profiler_bpf *obj)
2299{
2300	unsigned int cpu, m;
2301	int map_fd;
2302
2303	profile_perf_events = calloc(
2304		obj->rodata->num_cpu * obj->rodata->num_metric, sizeof(int));
2305	if (!profile_perf_events) {
2306		p_err("failed to allocate memory for perf_event array: %s",
2307		      strerror(errno));
2308		return -1;
2309	}
2310	map_fd = bpf_map__fd(obj->maps.events);
2311	if (map_fd < 0) {
2312		p_err("failed to get fd for events map");
2313		return -1;
2314	}
2315
2316	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2317		if (!metrics[m].selected)
2318			continue;
2319		for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
2320			if (profile_open_perf_event(m, cpu, map_fd)) {
 
 
 
 
 
2321				p_err("failed to create event %s on cpu %d",
2322				      metrics[m].name, cpu);
2323				return -1;
2324			}
 
2325		}
2326	}
2327	return 0;
2328}
2329
2330static void profile_print_and_cleanup(void)
2331{
2332	profile_close_perf_events(profile_obj);
2333	profile_read_values(profile_obj);
2334	profile_print_readings();
2335	profiler_bpf__destroy(profile_obj);
2336
2337	close(profile_tgt_fd);
2338	free(profile_tgt_name);
2339}
2340
2341static void int_exit(int signo)
2342{
2343	profile_print_and_cleanup();
2344	exit(0);
2345}
2346
2347static int do_profile(int argc, char **argv)
2348{
2349	int num_metric, num_cpu, err = -1;
2350	struct bpf_program *prog;
2351	unsigned long duration;
2352	char *endptr;
2353
2354	/* we at least need two args for the prog and one metric */
2355	if (!REQ_ARGS(3))
2356		return -EINVAL;
2357
2358	/* parse target fd */
2359	profile_tgt_fd = prog_parse_fd(&argc, &argv);
2360	if (profile_tgt_fd < 0) {
2361		p_err("failed to parse fd");
2362		return -1;
2363	}
2364
2365	/* parse profiling optional duration */
2366	if (argc > 2 && is_prefix(argv[0], "duration")) {
2367		NEXT_ARG();
2368		duration = strtoul(*argv, &endptr, 0);
2369		if (*endptr)
2370			usage();
2371		NEXT_ARG();
2372	} else {
2373		duration = UINT_MAX;
2374	}
2375
2376	num_metric = profile_parse_metrics(argc, argv);
2377	if (num_metric <= 0)
2378		goto out;
2379
2380	num_cpu = libbpf_num_possible_cpus();
2381	if (num_cpu <= 0) {
2382		p_err("failed to identify number of CPUs");
2383		goto out;
2384	}
2385
2386	profile_obj = profiler_bpf__open();
2387	if (!profile_obj) {
2388		p_err("failed to open and/or load BPF object");
2389		goto out;
2390	}
2391
2392	profile_obj->rodata->num_cpu = num_cpu;
2393	profile_obj->rodata->num_metric = num_metric;
2394
2395	/* adjust map sizes */
2396	bpf_map__set_max_entries(profile_obj->maps.events, num_metric * num_cpu);
2397	bpf_map__set_max_entries(profile_obj->maps.fentry_readings, num_metric);
2398	bpf_map__set_max_entries(profile_obj->maps.accum_readings, num_metric);
2399	bpf_map__set_max_entries(profile_obj->maps.counts, 1);
2400
2401	/* change target name */
2402	profile_tgt_name = profile_target_name(profile_tgt_fd);
2403	if (!profile_tgt_name)
2404		goto out;
2405
2406	bpf_object__for_each_program(prog, profile_obj->obj) {
2407		err = bpf_program__set_attach_target(prog, profile_tgt_fd,
2408						     profile_tgt_name);
2409		if (err) {
2410			p_err("failed to set attach target\n");
2411			goto out;
2412		}
2413	}
2414
2415	set_max_rlimit();
2416	err = profiler_bpf__load(profile_obj);
2417	if (err) {
2418		p_err("failed to load profile_obj");
2419		goto out;
2420	}
2421
2422	err = profile_open_perf_events(profile_obj);
2423	if (err)
2424		goto out;
2425
2426	err = profiler_bpf__attach(profile_obj);
2427	if (err) {
2428		p_err("failed to attach profile_obj");
2429		goto out;
2430	}
2431	signal(SIGINT, int_exit);
2432
2433	sleep(duration);
2434	profile_print_and_cleanup();
2435	return 0;
2436
2437out:
2438	profile_close_perf_events(profile_obj);
2439	if (profile_obj)
2440		profiler_bpf__destroy(profile_obj);
2441	close(profile_tgt_fd);
2442	free(profile_tgt_name);
2443	return err;
2444}
2445
2446#endif /* BPFTOOL_WITHOUT_SKELETONS */
2447
2448static int do_help(int argc, char **argv)
2449{
2450	if (json_output) {
2451		jsonw_null(json_wtr);
2452		return 0;
2453	}
2454
2455	fprintf(stderr,
2456		"Usage: %1$s %2$s { show | list } [PROG]\n"
2457		"       %1$s %2$s dump xlated PROG [{ file FILE | [opcodes] [linum] [visual] }]\n"
2458		"       %1$s %2$s dump jited  PROG [{ file FILE | [opcodes] [linum] }]\n"
2459		"       %1$s %2$s pin   PROG FILE\n"
2460		"       %1$s %2$s { load | loadall } OBJ  PATH \\\n"
2461		"                         [type TYPE] [{ offload_dev | xdpmeta_dev } NAME] \\\n"
2462		"                         [map { idx IDX | name NAME } MAP]\\\n"
2463		"                         [pinmaps MAP_DIR]\n"
2464		"                         [autoattach]\n"
2465		"       %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
2466		"       %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
2467		"       %1$s %2$s run PROG \\\n"
2468		"                         data_in FILE \\\n"
2469		"                         [data_out FILE [data_size_out L]] \\\n"
2470		"                         [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
2471		"                         [repeat N]\n"
2472		"       %1$s %2$s profile PROG [duration DURATION] METRICs\n"
2473		"       %1$s %2$s tracelog\n"
2474		"       %1$s %2$s help\n"
2475		"\n"
2476		"       " HELP_SPEC_MAP "\n"
2477		"       " HELP_SPEC_PROGRAM "\n"
2478		"       TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
2479		"                 tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
2480		"                 cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
2481		"                 lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
2482		"                 sk_reuseport | flow_dissector | cgroup/sysctl |\n"
2483		"                 cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
2484		"                 cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
2485		"                 cgroup/connect_unix | cgroup/getpeername4 | cgroup/getpeername6 |\n"
2486		"                 cgroup/getpeername_unix | cgroup/getsockname4 | cgroup/getsockname6 |\n"
2487		"                 cgroup/getsockname_unix | cgroup/sendmsg4 | cgroup/sendmsg6 |\n"
2488		"                 cgroup/sendmsg°unix | cgroup/recvmsg4 | cgroup/recvmsg6 | cgroup/recvmsg_unix |\n"
2489		"                 cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
2490		"                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
2491		"       ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
2492		"                        sk_skb_stream_parser | flow_dissector }\n"
2493		"       METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
2494		"       " HELP_SPEC_OPTIONS " |\n"
2495		"                    {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
2496		"                    {-L|--use-loader} }\n"
2497		"",
2498		bin_name, argv[-2]);
2499
2500	return 0;
2501}
2502
2503static const struct cmd cmds[] = {
2504	{ "show",	do_show },
2505	{ "list",	do_show },
2506	{ "help",	do_help },
2507	{ "dump",	do_dump },
2508	{ "pin",	do_pin },
2509	{ "load",	do_load },
2510	{ "loadall",	do_loadall },
2511	{ "attach",	do_attach },
2512	{ "detach",	do_detach },
2513	{ "tracelog",	do_tracelog },
2514	{ "run",	do_run },
2515	{ "profile",	do_profile },
2516	{ 0 }
2517};
2518
2519int do_prog(int argc, char **argv)
2520{
2521	return cmd_select(cmds, argc, argv, do_help);
2522}