Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
 
   4#define _GNU_SOURCE
 
   5#include <errno.h>
   6#include <fcntl.h>
   7#include <signal.h>
   8#include <stdarg.h>
   9#include <stdio.h>
  10#include <stdlib.h>
  11#include <string.h>
  12#include <time.h>
  13#include <unistd.h>
  14#include <net/if.h>
  15#include <sys/ioctl.h>
  16#include <sys/types.h>
  17#include <sys/stat.h>
  18#include <sys/syscall.h>
  19#include <dirent.h>
  20
  21#include <linux/err.h>
  22#include <linux/perf_event.h>
  23#include <linux/sizes.h>
  24
  25#include <bpf/bpf.h>
  26#include <bpf/btf.h>
 
  27#include <bpf/libbpf.h>
  28#include <bpf/bpf_gen_internal.h>
  29#include <bpf/skel_internal.h>
  30
  31#include "cfg.h"
  32#include "main.h"
  33#include "xlated_dumper.h"
  34
  35#define BPF_METADATA_PREFIX "bpf_metadata_"
  36#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
  37
  38const char * const prog_type_name[] = {
  39	[BPF_PROG_TYPE_UNSPEC]			= "unspec",
  40	[BPF_PROG_TYPE_SOCKET_FILTER]		= "socket_filter",
  41	[BPF_PROG_TYPE_KPROBE]			= "kprobe",
  42	[BPF_PROG_TYPE_SCHED_CLS]		= "sched_cls",
  43	[BPF_PROG_TYPE_SCHED_ACT]		= "sched_act",
  44	[BPF_PROG_TYPE_TRACEPOINT]		= "tracepoint",
  45	[BPF_PROG_TYPE_XDP]			= "xdp",
  46	[BPF_PROG_TYPE_PERF_EVENT]		= "perf_event",
  47	[BPF_PROG_TYPE_CGROUP_SKB]		= "cgroup_skb",
  48	[BPF_PROG_TYPE_CGROUP_SOCK]		= "cgroup_sock",
  49	[BPF_PROG_TYPE_LWT_IN]			= "lwt_in",
  50	[BPF_PROG_TYPE_LWT_OUT]			= "lwt_out",
  51	[BPF_PROG_TYPE_LWT_XMIT]		= "lwt_xmit",
  52	[BPF_PROG_TYPE_SOCK_OPS]		= "sock_ops",
  53	[BPF_PROG_TYPE_SK_SKB]			= "sk_skb",
  54	[BPF_PROG_TYPE_CGROUP_DEVICE]		= "cgroup_device",
  55	[BPF_PROG_TYPE_SK_MSG]			= "sk_msg",
  56	[BPF_PROG_TYPE_RAW_TRACEPOINT]		= "raw_tracepoint",
  57	[BPF_PROG_TYPE_CGROUP_SOCK_ADDR]	= "cgroup_sock_addr",
  58	[BPF_PROG_TYPE_LWT_SEG6LOCAL]		= "lwt_seg6local",
  59	[BPF_PROG_TYPE_LIRC_MODE2]		= "lirc_mode2",
  60	[BPF_PROG_TYPE_SK_REUSEPORT]		= "sk_reuseport",
  61	[BPF_PROG_TYPE_FLOW_DISSECTOR]		= "flow_dissector",
  62	[BPF_PROG_TYPE_CGROUP_SYSCTL]		= "cgroup_sysctl",
  63	[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE]	= "raw_tracepoint_writable",
  64	[BPF_PROG_TYPE_CGROUP_SOCKOPT]		= "cgroup_sockopt",
  65	[BPF_PROG_TYPE_TRACING]			= "tracing",
  66	[BPF_PROG_TYPE_STRUCT_OPS]		= "struct_ops",
  67	[BPF_PROG_TYPE_EXT]			= "ext",
  68	[BPF_PROG_TYPE_LSM]			= "lsm",
  69	[BPF_PROG_TYPE_SK_LOOKUP]		= "sk_lookup",
  70};
  71
  72const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name);
  73
  74enum dump_mode {
  75	DUMP_JITED,
  76	DUMP_XLATED,
  77};
  78
 
 
 
 
 
 
 
 
 
 
 
 
  79static const char * const attach_type_strings[] = {
  80	[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
  81	[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
  82	[BPF_SK_SKB_VERDICT] = "skb_verdict",
  83	[BPF_SK_MSG_VERDICT] = "msg_verdict",
  84	[BPF_FLOW_DISSECTOR] = "flow_dissector",
  85	[__MAX_BPF_ATTACH_TYPE] = NULL,
  86};
  87
 
 
  88static enum bpf_attach_type parse_attach_type(const char *str)
  89{
  90	enum bpf_attach_type type;
  91
  92	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
 
 
 
 
 
 
 
 
  93		if (attach_type_strings[type] &&
  94		    is_prefix(str, attach_type_strings[type]))
  95			return type;
  96	}
  97
  98	return __MAX_BPF_ATTACH_TYPE;
  99}
 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 101static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
 102{
 103	struct timespec real_time_ts, boot_time_ts;
 104	time_t wallclock_secs;
 105	struct tm load_tm;
 106
 107	buf[--size] = '\0';
 108
 109	if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
 110	    clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
 111		perror("Can't read clocks");
 112		snprintf(buf, size, "%llu", nsecs / 1000000000);
 113		return;
 114	}
 115
 116	wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
 117		(real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
 118		1000000000;
 119
 120
 121	if (!localtime_r(&wallclock_secs, &load_tm)) {
 122		snprintf(buf, size, "%llu", nsecs / 1000000000);
 123		return;
 124	}
 125
 126	if (json_output)
 127		strftime(buf, size, "%s", &load_tm);
 128	else
 129		strftime(buf, size, "%FT%T%z", &load_tm);
 130}
 131
 132static void show_prog_maps(int fd, __u32 num_maps)
 133{
 134	struct bpf_prog_info info = {};
 135	__u32 len = sizeof(info);
 136	__u32 map_ids[num_maps];
 137	unsigned int i;
 138	int err;
 139
 140	info.nr_map_ids = num_maps;
 141	info.map_ids = ptr_to_u64(map_ids);
 142
 143	err = bpf_obj_get_info_by_fd(fd, &info, &len);
 144	if (err || !info.nr_map_ids)
 145		return;
 146
 147	if (json_output) {
 148		jsonw_name(json_wtr, "map_ids");
 149		jsonw_start_array(json_wtr);
 150		for (i = 0; i < info.nr_map_ids; i++)
 151			jsonw_uint(json_wtr, map_ids[i]);
 152		jsonw_end_array(json_wtr);
 153	} else {
 154		printf("  map_ids ");
 155		for (i = 0; i < info.nr_map_ids; i++)
 156			printf("%u%s", map_ids[i],
 157			       i == info.nr_map_ids - 1 ? "" : ",");
 158	}
 159}
 160
 161static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
 162{
 163	struct bpf_prog_info prog_info;
 164	__u32 prog_info_len;
 165	__u32 map_info_len;
 166	void *value = NULL;
 167	__u32 *map_ids;
 168	int nr_maps;
 169	int key = 0;
 170	int map_fd;
 171	int ret;
 172	__u32 i;
 173
 174	memset(&prog_info, 0, sizeof(prog_info));
 175	prog_info_len = sizeof(prog_info);
 176	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
 177	if (ret)
 178		return NULL;
 179
 180	if (!prog_info.nr_map_ids)
 181		return NULL;
 182
 183	map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
 184	if (!map_ids)
 185		return NULL;
 186
 187	nr_maps = prog_info.nr_map_ids;
 188	memset(&prog_info, 0, sizeof(prog_info));
 189	prog_info.nr_map_ids = nr_maps;
 190	prog_info.map_ids = ptr_to_u64(map_ids);
 191	prog_info_len = sizeof(prog_info);
 192
 193	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
 194	if (ret)
 195		goto free_map_ids;
 196
 197	for (i = 0; i < prog_info.nr_map_ids; i++) {
 198		map_fd = bpf_map_get_fd_by_id(map_ids[i]);
 199		if (map_fd < 0)
 200			goto free_map_ids;
 201
 202		memset(map_info, 0, sizeof(*map_info));
 203		map_info_len = sizeof(*map_info);
 204		ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
 205		if (ret < 0) {
 206			close(map_fd);
 207			goto free_map_ids;
 208		}
 209
 210		if (map_info->type != BPF_MAP_TYPE_ARRAY ||
 211		    map_info->key_size != sizeof(int) ||
 212		    map_info->max_entries != 1 ||
 213		    !map_info->btf_value_type_id ||
 214		    !strstr(map_info->name, ".rodata")) {
 215			close(map_fd);
 216			continue;
 217		}
 218
 219		value = malloc(map_info->value_size);
 220		if (!value) {
 221			close(map_fd);
 222			goto free_map_ids;
 223		}
 224
 225		if (bpf_map_lookup_elem(map_fd, &key, value)) {
 226			close(map_fd);
 227			free(value);
 228			value = NULL;
 229			goto free_map_ids;
 230		}
 231
 232		close(map_fd);
 233		break;
 234	}
 235
 236free_map_ids:
 237	free(map_ids);
 238	return value;
 239}
 240
 241static bool has_metadata_prefix(const char *s)
 242{
 243	return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
 244}
 245
 246static void show_prog_metadata(int fd, __u32 num_maps)
 247{
 248	const struct btf_type *t_datasec, *t_var;
 249	struct bpf_map_info map_info;
 250	struct btf_var_secinfo *vsi;
 251	bool printed_header = false;
 252	struct btf *btf = NULL;
 253	unsigned int i, vlen;
 254	void *value = NULL;
 255	const char *name;
 
 256	int err;
 257
 258	if (!num_maps)
 259		return;
 260
 261	memset(&map_info, 0, sizeof(map_info));
 262	value = find_metadata(fd, &map_info);
 263	if (!value)
 264		return;
 265
 266	err = btf__get_from_id(map_info.btf_id, &btf);
 267	if (err || !btf)
 268		goto out_free;
 269
 270	t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
 271	if (!btf_is_datasec(t_datasec))
 272		goto out_free;
 273
 274	vlen = btf_vlen(t_datasec);
 275	vsi = btf_var_secinfos(t_datasec);
 276
 277	/* We don't proceed to check the kinds of the elements of the DATASEC.
 278	 * The verifier enforces them to be BTF_KIND_VAR.
 279	 */
 280
 281	if (json_output) {
 282		struct btf_dumper d = {
 283			.btf = btf,
 284			.jw = json_wtr,
 285			.is_plain_text = false,
 286		};
 287
 288		for (i = 0; i < vlen; i++, vsi++) {
 289			t_var = btf__type_by_id(btf, vsi->type);
 290			name = btf__name_by_offset(btf, t_var->name_off);
 291
 292			if (!has_metadata_prefix(name))
 293				continue;
 294
 295			if (!printed_header) {
 296				jsonw_name(json_wtr, "metadata");
 297				jsonw_start_object(json_wtr);
 298				printed_header = true;
 299			}
 300
 301			jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
 302			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
 303			if (err) {
 304				p_err("btf dump failed: %d", err);
 305				break;
 306			}
 307		}
 308		if (printed_header)
 309			jsonw_end_object(json_wtr);
 310	} else {
 311		json_writer_t *btf_wtr = jsonw_new(stdout);
 312		struct btf_dumper d = {
 313			.btf = btf,
 314			.jw = btf_wtr,
 315			.is_plain_text = true,
 316		};
 317
 318		if (!btf_wtr) {
 319			p_err("jsonw alloc failed");
 320			goto out_free;
 321		}
 322
 323		for (i = 0; i < vlen; i++, vsi++) {
 324			t_var = btf__type_by_id(btf, vsi->type);
 325			name = btf__name_by_offset(btf, t_var->name_off);
 326
 327			if (!has_metadata_prefix(name))
 328				continue;
 329
 330			if (!printed_header) {
 331				printf("\tmetadata:");
 
 
 
 
 
 
 
 
 332				printed_header = true;
 333			}
 334
 335			printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
 336
 337			jsonw_reset(btf_wtr);
 338			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
 339			if (err) {
 340				p_err("btf dump failed: %d", err);
 341				break;
 342			}
 343		}
 344		if (printed_header)
 345			jsonw_destroy(&btf_wtr);
 346	}
 347
 348out_free:
 349	btf__free(btf);
 350	free(value);
 351}
 352
 353static void print_prog_header_json(struct bpf_prog_info *info)
 354{
 
 
 
 355	jsonw_uint_field(json_wtr, "id", info->id);
 356	if (info->type < ARRAY_SIZE(prog_type_name))
 357		jsonw_string_field(json_wtr, "type",
 358				   prog_type_name[info->type]);
 
 359	else
 360		jsonw_uint_field(json_wtr, "type", info->type);
 361
 362	if (*info->name)
 363		jsonw_string_field(json_wtr, "name", info->name);
 
 
 364
 365	jsonw_name(json_wtr, "tag");
 366	jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
 367		     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
 368		     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
 369
 370	jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
 371	if (info->run_time_ns) {
 372		jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
 373		jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
 374	}
 375	if (info->recursion_misses)
 376		jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
 377}
 378
 379static void print_prog_json(struct bpf_prog_info *info, int fd)
 380{
 381	char *memlock;
 382
 383	jsonw_start_object(json_wtr);
 384	print_prog_header_json(info);
 385	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
 386
 387	if (info->load_time) {
 388		char buf[32];
 389
 390		print_boot_time(info->load_time, buf, sizeof(buf));
 391
 392		/* Piggy back on load_time, since 0 uid is a valid one */
 393		jsonw_name(json_wtr, "loaded_at");
 394		jsonw_printf(json_wtr, "%s", buf);
 395		jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
 396	}
 397
 398	jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
 399
 400	if (info->jited_prog_len) {
 401		jsonw_bool_field(json_wtr, "jited", true);
 402		jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
 403	} else {
 404		jsonw_bool_field(json_wtr, "jited", false);
 405	}
 406
 407	memlock = get_fdinfo(fd, "memlock");
 408	if (memlock)
 409		jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
 410	free(memlock);
 411
 412	if (info->nr_map_ids)
 413		show_prog_maps(fd, info->nr_map_ids);
 414
 415	if (info->btf_id)
 416		jsonw_int_field(json_wtr, "btf_id", info->btf_id);
 417
 418	if (!hash_empty(prog_table.table)) {
 419		struct pinned_obj *obj;
 420
 421		jsonw_name(json_wtr, "pinned");
 422		jsonw_start_array(json_wtr);
 423		hash_for_each_possible(prog_table.table, obj, hash, info->id) {
 424			if (obj->id == info->id)
 425				jsonw_string(json_wtr, obj->path);
 426		}
 427		jsonw_end_array(json_wtr);
 428	}
 429
 430	emit_obj_refs_json(&refs_table, info->id, json_wtr);
 431
 432	show_prog_metadata(fd, info->nr_map_ids);
 433
 434	jsonw_end_object(json_wtr);
 435}
 436
 437static void print_prog_header_plain(struct bpf_prog_info *info)
 438{
 
 
 
 439	printf("%u: ", info->id);
 440	if (info->type < ARRAY_SIZE(prog_type_name))
 441		printf("%s  ", prog_type_name[info->type]);
 
 442	else
 443		printf("type %u  ", info->type);
 444
 445	if (*info->name)
 446		printf("name %s  ", info->name);
 
 
 447
 448	printf("tag ");
 449	fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
 450	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
 451	printf("%s", info->gpl_compatible ? "  gpl" : "");
 452	if (info->run_time_ns)
 453		printf(" run_time_ns %lld run_cnt %lld",
 454		       info->run_time_ns, info->run_cnt);
 455	if (info->recursion_misses)
 456		printf(" recursion_misses %lld", info->recursion_misses);
 457	printf("\n");
 458}
 459
 460static void print_prog_plain(struct bpf_prog_info *info, int fd)
 461{
 462	char *memlock;
 463
 464	print_prog_header_plain(info);
 465
 466	if (info->load_time) {
 467		char buf[32];
 468
 469		print_boot_time(info->load_time, buf, sizeof(buf));
 470
 471		/* Piggy back on load_time, since 0 uid is a valid one */
 472		printf("\tloaded_at %s  uid %u\n", buf, info->created_by_uid);
 473	}
 474
 475	printf("\txlated %uB", info->xlated_prog_len);
 476
 477	if (info->jited_prog_len)
 478		printf("  jited %uB", info->jited_prog_len);
 479	else
 480		printf("  not jited");
 481
 482	memlock = get_fdinfo(fd, "memlock");
 483	if (memlock)
 484		printf("  memlock %sB", memlock);
 485	free(memlock);
 486
 487	if (info->nr_map_ids)
 488		show_prog_maps(fd, info->nr_map_ids);
 489
 490	if (!hash_empty(prog_table.table)) {
 491		struct pinned_obj *obj;
 492
 493		hash_for_each_possible(prog_table.table, obj, hash, info->id) {
 494			if (obj->id == info->id)
 495				printf("\n\tpinned %s", obj->path);
 496		}
 497	}
 498
 499	if (info->btf_id)
 500		printf("\n\tbtf_id %d", info->btf_id);
 501
 502	emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
 503
 504	printf("\n");
 505
 506	show_prog_metadata(fd, info->nr_map_ids);
 507}
 508
 509static int show_prog(int fd)
 510{
 511	struct bpf_prog_info info = {};
 512	__u32 len = sizeof(info);
 513	int err;
 514
 515	err = bpf_obj_get_info_by_fd(fd, &info, &len);
 516	if (err) {
 517		p_err("can't get prog info: %s", strerror(errno));
 518		return -1;
 519	}
 520
 521	if (json_output)
 522		print_prog_json(&info, fd);
 523	else
 524		print_prog_plain(&info, fd);
 525
 526	return 0;
 527}
 528
 529static int do_show_subset(int argc, char **argv)
 530{
 531	int *fds = NULL;
 532	int nb_fds, i;
 533	int err = -1;
 534
 535	fds = malloc(sizeof(int));
 536	if (!fds) {
 537		p_err("mem alloc failed");
 538		return -1;
 539	}
 540	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 541	if (nb_fds < 1)
 542		goto exit_free;
 543
 544	if (json_output && nb_fds > 1)
 545		jsonw_start_array(json_wtr);	/* root array */
 546	for (i = 0; i < nb_fds; i++) {
 547		err = show_prog(fds[i]);
 548		if (err) {
 549			for (; i < nb_fds; i++)
 550				close(fds[i]);
 551			break;
 552		}
 553		close(fds[i]);
 554	}
 555	if (json_output && nb_fds > 1)
 556		jsonw_end_array(json_wtr);	/* root array */
 557
 558exit_free:
 559	free(fds);
 560	return err;
 561}
 562
 563static int do_show(int argc, char **argv)
 564{
 565	__u32 id = 0;
 566	int err;
 567	int fd;
 568
 569	if (show_pinned)
 570		build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
 
 
 
 
 
 
 
 571	build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
 572
 573	if (argc == 2)
 574		return do_show_subset(argc, argv);
 575
 576	if (argc)
 577		return BAD_ARG();
 578
 579	if (json_output)
 580		jsonw_start_array(json_wtr);
 581	while (true) {
 582		err = bpf_prog_get_next_id(id, &id);
 583		if (err) {
 584			if (errno == ENOENT) {
 585				err = 0;
 586				break;
 587			}
 588			p_err("can't get next program: %s%s", strerror(errno),
 589			      errno == EINVAL ? " -- kernel too old?" : "");
 590			err = -1;
 591			break;
 592		}
 593
 594		fd = bpf_prog_get_fd_by_id(id);
 595		if (fd < 0) {
 596			if (errno == ENOENT)
 597				continue;
 598			p_err("can't get prog by id (%u): %s",
 599			      id, strerror(errno));
 600			err = -1;
 601			break;
 602		}
 603
 604		err = show_prog(fd);
 605		close(fd);
 606		if (err)
 607			break;
 608	}
 609
 610	if (json_output)
 611		jsonw_end_array(json_wtr);
 612
 613	delete_obj_refs_table(&refs_table);
 
 
 
 614
 615	return err;
 616}
 617
 618static int
 619prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
 620	  char *filepath, bool opcodes, bool visual, bool linum)
 621{
 622	struct bpf_prog_linfo *prog_linfo = NULL;
 623	const char *disasm_opt = NULL;
 624	struct dump_data dd = {};
 625	void *func_info = NULL;
 626	struct btf *btf = NULL;
 627	char func_sig[1024];
 628	unsigned char *buf;
 629	__u32 member_len;
 
 630	ssize_t n;
 631	int fd;
 632
 633	if (mode == DUMP_JITED) {
 634		if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
 635			p_info("no instructions returned");
 636			return -1;
 637		}
 638		buf = u64_to_ptr(info->jited_prog_insns);
 639		member_len = info->jited_prog_len;
 640	} else {	/* DUMP_XLATED */
 641		if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
 642			p_err("error retrieving insn dump: kernel.kptr_restrict set?");
 643			return -1;
 644		}
 645		buf = u64_to_ptr(info->xlated_prog_insns);
 646		member_len = info->xlated_prog_len;
 647	}
 648
 649	if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
 650		p_err("failed to get btf");
 651		return -1;
 
 
 
 652	}
 653
 654	func_info = u64_to_ptr(info->func_info);
 655
 656	if (info->nr_line_info) {
 657		prog_linfo = bpf_prog_linfo__new(info);
 658		if (!prog_linfo)
 659			p_info("error in processing bpf_line_info.  continue without it.");
 660	}
 661
 662	if (filepath) {
 663		fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 664		if (fd < 0) {
 665			p_err("can't open file %s: %s", filepath,
 666			      strerror(errno));
 667			return -1;
 668		}
 669
 670		n = write(fd, buf, member_len);
 671		close(fd);
 672		if (n != (ssize_t)member_len) {
 673			p_err("error writing output file: %s",
 674			      n < 0 ? strerror(errno) : "short write");
 675			return -1;
 676		}
 677
 678		if (json_output)
 679			jsonw_null(json_wtr);
 680	} else if (mode == DUMP_JITED) {
 681		const char *name = NULL;
 682
 683		if (info->ifindex) {
 684			name = ifindex_to_bfd_params(info->ifindex,
 685						     info->netns_dev,
 686						     info->netns_ino,
 687						     &disasm_opt);
 688			if (!name)
 689				return -1;
 690		}
 691
 692		if (info->nr_jited_func_lens && info->jited_func_lens) {
 693			struct kernel_sym *sym = NULL;
 694			struct bpf_func_info *record;
 695			char sym_name[SYM_MAX_NAME];
 696			unsigned char *img = buf;
 697			__u64 *ksyms = NULL;
 698			__u32 *lens;
 699			__u32 i;
 700			if (info->nr_jited_ksyms) {
 701				kernel_syms_load(&dd);
 702				ksyms = u64_to_ptr(info->jited_ksyms);
 703			}
 704
 705			if (json_output)
 706				jsonw_start_array(json_wtr);
 707
 708			lens = u64_to_ptr(info->jited_func_lens);
 709			for (i = 0; i < info->nr_jited_func_lens; i++) {
 710				if (ksyms) {
 711					sym = kernel_syms_search(&dd, ksyms[i]);
 712					if (sym)
 713						sprintf(sym_name, "%s", sym->name);
 714					else
 715						sprintf(sym_name, "0x%016llx", ksyms[i]);
 716				} else {
 717					strcpy(sym_name, "unknown");
 718				}
 719
 720				if (func_info) {
 721					record = func_info + i * info->func_info_rec_size;
 722					btf_dumper_type_only(btf, record->type_id,
 723							     func_sig,
 724							     sizeof(func_sig));
 725				}
 726
 727				if (json_output) {
 728					jsonw_start_object(json_wtr);
 729					if (func_info && func_sig[0] != '\0') {
 730						jsonw_name(json_wtr, "proto");
 731						jsonw_string(json_wtr, func_sig);
 732					}
 733					jsonw_name(json_wtr, "name");
 734					jsonw_string(json_wtr, sym_name);
 735					jsonw_name(json_wtr, "insns");
 736				} else {
 737					if (func_info && func_sig[0] != '\0')
 738						printf("%s:\n", func_sig);
 739					printf("%s:\n", sym_name);
 740				}
 741
 742				disasm_print_insn(img, lens[i], opcodes,
 743						  name, disasm_opt, btf,
 744						  prog_linfo, ksyms[i], i,
 745						  linum);
 
 746
 747				img += lens[i];
 748
 749				if (json_output)
 750					jsonw_end_object(json_wtr);
 751				else
 752					printf("\n");
 753			}
 754
 755			if (json_output)
 756				jsonw_end_array(json_wtr);
 757		} else {
 758			disasm_print_insn(buf, member_len, opcodes, name,
 759					  disasm_opt, btf, NULL, 0, 0, false);
 
 
 760		}
 761	} else if (visual) {
 762		if (json_output)
 763			jsonw_null(json_wtr);
 764		else
 765			dump_xlated_cfg(buf, member_len);
 766	} else {
 767		kernel_syms_load(&dd);
 768		dd.nr_jited_ksyms = info->nr_jited_ksyms;
 769		dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
 770		dd.btf = btf;
 771		dd.func_info = func_info;
 772		dd.finfo_rec_size = info->func_info_rec_size;
 773		dd.prog_linfo = prog_linfo;
 774
 775		if (json_output)
 776			dump_xlated_json(&dd, buf, member_len, opcodes,
 777					 linum);
 778		else
 779			dump_xlated_plain(&dd, buf, member_len, opcodes,
 780					  linum);
 781		kernel_syms_destroy(&dd);
 782	}
 783
 784	btf__free(btf);
 785
 786	return 0;
 
 
 
 787}
 788
 789static int do_dump(int argc, char **argv)
 790{
 791	struct bpf_prog_info_linear *info_linear;
 
 
 
 792	char *filepath = NULL;
 793	bool opcodes = false;
 794	bool visual = false;
 795	enum dump_mode mode;
 796	bool linum = false;
 797	int *fds = NULL;
 798	int nb_fds, i = 0;
 
 799	int err = -1;
 800	__u64 arrays;
 801
 802	if (is_prefix(*argv, "jited")) {
 803		if (disasm_init())
 804			return -1;
 805		mode = DUMP_JITED;
 806	} else if (is_prefix(*argv, "xlated")) {
 807		mode = DUMP_XLATED;
 808	} else {
 809		p_err("expected 'xlated' or 'jited', got: %s", *argv);
 810		return -1;
 811	}
 812	NEXT_ARG();
 813
 814	if (argc < 2)
 815		usage();
 816
 817	fds = malloc(sizeof(int));
 818	if (!fds) {
 819		p_err("mem alloc failed");
 820		return -1;
 821	}
 822	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 823	if (nb_fds < 1)
 824		goto exit_free;
 825
 826	if (is_prefix(*argv, "file")) {
 827		NEXT_ARG();
 828		if (!argc) {
 829			p_err("expected file path");
 830			goto exit_close;
 831		}
 832		if (nb_fds > 1) {
 833			p_err("several programs matched");
 834			goto exit_close;
 835		}
 836
 837		filepath = *argv;
 838		NEXT_ARG();
 839	} else if (is_prefix(*argv, "opcodes")) {
 840		opcodes = true;
 841		NEXT_ARG();
 842	} else if (is_prefix(*argv, "visual")) {
 843		if (nb_fds > 1) {
 844			p_err("several programs matched");
 845			goto exit_close;
 846		}
 847
 848		visual = true;
 849		NEXT_ARG();
 850	} else if (is_prefix(*argv, "linum")) {
 851		linum = true;
 852		NEXT_ARG();
 853	}
 854
 855	if (argc) {
 856		usage();
 857		goto exit_close;
 858	}
 859
 860	if (mode == DUMP_JITED)
 861		arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
 862	else
 863		arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
 864
 865	arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
 866	arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
 867	arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
 868	arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
 869	arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 870
 871	if (json_output && nb_fds > 1)
 872		jsonw_start_array(json_wtr);	/* root array */
 873	for (i = 0; i < nb_fds; i++) {
 874		info_linear = bpf_program__get_prog_info_linear(fds[i], arrays);
 875		if (IS_ERR_OR_NULL(info_linear)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876			p_err("can't get prog info: %s", strerror(errno));
 877			break;
 878		}
 879
 880		if (json_output && nb_fds > 1) {
 881			jsonw_start_object(json_wtr);	/* prog object */
 882			print_prog_header_json(&info_linear->info);
 883			jsonw_name(json_wtr, "insns");
 884		} else if (nb_fds > 1) {
 885			print_prog_header_plain(&info_linear->info);
 886		}
 887
 888		err = prog_dump(&info_linear->info, mode, filepath, opcodes,
 889				visual, linum);
 890
 891		if (json_output && nb_fds > 1)
 892			jsonw_end_object(json_wtr);	/* prog object */
 893		else if (i != nb_fds - 1 && nb_fds > 1)
 894			printf("\n");
 895
 896		free(info_linear);
 897		if (err)
 898			break;
 899		close(fds[i]);
 900	}
 901	if (json_output && nb_fds > 1)
 902		jsonw_end_array(json_wtr);	/* root array */
 903
 904exit_close:
 905	for (; i < nb_fds; i++)
 906		close(fds[i]);
 907exit_free:
 
 908	free(fds);
 909	return err;
 910}
 911
 912static int do_pin(int argc, char **argv)
 913{
 914	int err;
 915
 916	err = do_pin_any(argc, argv, prog_parse_fd);
 917	if (!err && json_output)
 918		jsonw_null(json_wtr);
 919	return err;
 920}
 921
 922struct map_replace {
 923	int idx;
 924	int fd;
 925	char *name;
 926};
 927
 928static int map_replace_compar(const void *p1, const void *p2)
 929{
 930	const struct map_replace *a = p1, *b = p2;
 931
 932	return a->idx - b->idx;
 933}
 934
 935static int parse_attach_detach_args(int argc, char **argv, int *progfd,
 936				    enum bpf_attach_type *attach_type,
 937				    int *mapfd)
 938{
 939	if (!REQ_ARGS(3))
 940		return -EINVAL;
 941
 942	*progfd = prog_parse_fd(&argc, &argv);
 943	if (*progfd < 0)
 944		return *progfd;
 945
 946	*attach_type = parse_attach_type(*argv);
 947	if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
 948		p_err("invalid attach/detach type");
 949		return -EINVAL;
 950	}
 951
 952	if (*attach_type == BPF_FLOW_DISSECTOR) {
 953		*mapfd = 0;
 954		return 0;
 955	}
 956
 957	NEXT_ARG();
 958	if (!REQ_ARGS(2))
 959		return -EINVAL;
 960
 961	*mapfd = map_parse_fd(&argc, &argv);
 962	if (*mapfd < 0)
 963		return *mapfd;
 964
 965	return 0;
 966}
 967
 968static int do_attach(int argc, char **argv)
 969{
 970	enum bpf_attach_type attach_type;
 971	int err, progfd;
 972	int mapfd;
 973
 974	err = parse_attach_detach_args(argc, argv,
 975				       &progfd, &attach_type, &mapfd);
 976	if (err)
 977		return err;
 978
 979	err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
 980	if (err) {
 981		p_err("failed prog attach to map");
 982		return -EINVAL;
 983	}
 984
 985	if (json_output)
 986		jsonw_null(json_wtr);
 987	return 0;
 988}
 989
 990static int do_detach(int argc, char **argv)
 991{
 992	enum bpf_attach_type attach_type;
 993	int err, progfd;
 994	int mapfd;
 995
 996	err = parse_attach_detach_args(argc, argv,
 997				       &progfd, &attach_type, &mapfd);
 998	if (err)
 999		return err;
1000
1001	err = bpf_prog_detach2(progfd, mapfd, attach_type);
1002	if (err) {
1003		p_err("failed prog detach from map");
1004		return -EINVAL;
1005	}
1006
1007	if (json_output)
1008		jsonw_null(json_wtr);
1009	return 0;
1010}
1011
1012static int check_single_stdin(char *file_data_in, char *file_ctx_in)
1013{
1014	if (file_data_in && file_ctx_in &&
1015	    !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
1016		p_err("cannot use standard input for both data_in and ctx_in");
1017		return -1;
1018	}
1019
1020	return 0;
1021}
1022
1023static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
1024{
1025	size_t block_size = 256;
1026	size_t buf_size = block_size;
1027	size_t nb_read = 0;
1028	void *tmp;
1029	FILE *f;
1030
1031	if (!fname) {
1032		*data_ptr = NULL;
1033		*size = 0;
1034		return 0;
1035	}
1036
1037	if (!strcmp(fname, "-"))
1038		f = stdin;
1039	else
1040		f = fopen(fname, "r");
1041	if (!f) {
1042		p_err("failed to open %s: %s", fname, strerror(errno));
1043		return -1;
1044	}
1045
1046	*data_ptr = malloc(block_size);
1047	if (!*data_ptr) {
1048		p_err("failed to allocate memory for data_in/ctx_in: %s",
1049		      strerror(errno));
1050		goto err_fclose;
1051	}
1052
1053	while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
1054		if (feof(f))
1055			break;
1056		if (ferror(f)) {
1057			p_err("failed to read data_in/ctx_in from %s: %s",
1058			      fname, strerror(errno));
1059			goto err_free;
1060		}
1061		if (nb_read > buf_size - block_size) {
1062			if (buf_size == UINT32_MAX) {
1063				p_err("data_in/ctx_in is too long (max: %d)",
1064				      UINT32_MAX);
1065				goto err_free;
1066			}
1067			/* No space for fread()-ing next chunk; realloc() */
1068			buf_size *= 2;
1069			tmp = realloc(*data_ptr, buf_size);
1070			if (!tmp) {
1071				p_err("failed to reallocate data_in/ctx_in: %s",
1072				      strerror(errno));
1073				goto err_free;
1074			}
1075			*data_ptr = tmp;
1076		}
1077	}
1078	if (f != stdin)
1079		fclose(f);
1080
1081	*size = nb_read;
1082	return 0;
1083
1084err_free:
1085	free(*data_ptr);
1086	*data_ptr = NULL;
1087err_fclose:
1088	if (f != stdin)
1089		fclose(f);
1090	return -1;
1091}
1092
1093static void hex_print(void *data, unsigned int size, FILE *f)
1094{
1095	size_t i, j;
1096	char c;
1097
1098	for (i = 0; i < size; i += 16) {
1099		/* Row offset */
1100		fprintf(f, "%07zx\t", i);
1101
1102		/* Hexadecimal values */
1103		for (j = i; j < i + 16 && j < size; j++)
1104			fprintf(f, "%02x%s", *(uint8_t *)(data + j),
1105				j % 2 ? " " : "");
1106		for (; j < i + 16; j++)
1107			fprintf(f, "  %s", j % 2 ? " " : "");
1108
1109		/* ASCII values (if relevant), '.' otherwise */
1110		fprintf(f, "| ");
1111		for (j = i; j < i + 16 && j < size; j++) {
1112			c = *(char *)(data + j);
1113			if (c < ' ' || c > '~')
1114				c = '.';
1115			fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
1116		}
1117
1118		fprintf(f, "\n");
1119	}
1120}
1121
1122static int
1123print_run_output(void *data, unsigned int size, const char *fname,
1124		 const char *json_key)
1125{
1126	size_t nb_written;
1127	FILE *f;
1128
1129	if (!fname)
1130		return 0;
1131
1132	if (!strcmp(fname, "-")) {
1133		f = stdout;
1134		if (json_output) {
1135			jsonw_name(json_wtr, json_key);
1136			print_data_json(data, size);
1137		} else {
1138			hex_print(data, size, f);
1139		}
1140		return 0;
1141	}
1142
1143	f = fopen(fname, "w");
1144	if (!f) {
1145		p_err("failed to open %s: %s", fname, strerror(errno));
1146		return -1;
1147	}
1148
1149	nb_written = fwrite(data, 1, size, f);
1150	fclose(f);
1151	if (nb_written != size) {
1152		p_err("failed to write output data/ctx: %s", strerror(errno));
1153		return -1;
1154	}
1155
1156	return 0;
1157}
1158
1159static int alloc_run_data(void **data_ptr, unsigned int size_out)
1160{
1161	*data_ptr = calloc(size_out, 1);
1162	if (!*data_ptr) {
1163		p_err("failed to allocate memory for output data/ctx: %s",
1164		      strerror(errno));
1165		return -1;
1166	}
1167
1168	return 0;
1169}
1170
1171static int do_run(int argc, char **argv)
1172{
1173	char *data_fname_in = NULL, *data_fname_out = NULL;
1174	char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
1175	struct bpf_prog_test_run_attr test_attr = {0};
1176	const unsigned int default_size = SZ_32K;
1177	void *data_in = NULL, *data_out = NULL;
1178	void *ctx_in = NULL, *ctx_out = NULL;
1179	unsigned int repeat = 1;
1180	int fd, err;
 
1181
1182	if (!REQ_ARGS(4))
1183		return -1;
1184
1185	fd = prog_parse_fd(&argc, &argv);
1186	if (fd < 0)
1187		return -1;
1188
1189	while (argc) {
1190		if (detect_common_prefix(*argv, "data_in", "data_out",
1191					 "data_size_out", NULL))
1192			return -1;
1193		if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
1194					 "ctx_size_out", NULL))
1195			return -1;
1196
1197		if (is_prefix(*argv, "data_in")) {
1198			NEXT_ARG();
1199			if (!REQ_ARGS(1))
1200				return -1;
1201
1202			data_fname_in = GET_ARG();
1203			if (check_single_stdin(data_fname_in, ctx_fname_in))
1204				return -1;
1205		} else if (is_prefix(*argv, "data_out")) {
1206			NEXT_ARG();
1207			if (!REQ_ARGS(1))
1208				return -1;
1209
1210			data_fname_out = GET_ARG();
1211		} else if (is_prefix(*argv, "data_size_out")) {
1212			char *endptr;
1213
1214			NEXT_ARG();
1215			if (!REQ_ARGS(1))
1216				return -1;
1217
1218			test_attr.data_size_out = strtoul(*argv, &endptr, 0);
1219			if (*endptr) {
1220				p_err("can't parse %s as output data size",
1221				      *argv);
1222				return -1;
1223			}
1224			NEXT_ARG();
1225		} else if (is_prefix(*argv, "ctx_in")) {
1226			NEXT_ARG();
1227			if (!REQ_ARGS(1))
1228				return -1;
1229
1230			ctx_fname_in = GET_ARG();
1231			if (check_single_stdin(data_fname_in, ctx_fname_in))
1232				return -1;
1233		} else if (is_prefix(*argv, "ctx_out")) {
1234			NEXT_ARG();
1235			if (!REQ_ARGS(1))
1236				return -1;
1237
1238			ctx_fname_out = GET_ARG();
1239		} else if (is_prefix(*argv, "ctx_size_out")) {
1240			char *endptr;
1241
1242			NEXT_ARG();
1243			if (!REQ_ARGS(1))
1244				return -1;
1245
1246			test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
1247			if (*endptr) {
1248				p_err("can't parse %s as output context size",
1249				      *argv);
1250				return -1;
1251			}
1252			NEXT_ARG();
1253		} else if (is_prefix(*argv, "repeat")) {
1254			char *endptr;
1255
1256			NEXT_ARG();
1257			if (!REQ_ARGS(1))
1258				return -1;
1259
1260			repeat = strtoul(*argv, &endptr, 0);
1261			if (*endptr) {
1262				p_err("can't parse %s as repeat number",
1263				      *argv);
1264				return -1;
1265			}
1266			NEXT_ARG();
1267		} else {
1268			p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1269			      *argv);
1270			return -1;
1271		}
1272	}
1273
1274	err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
1275	if (err)
1276		return -1;
1277
1278	if (data_in) {
1279		if (!test_attr.data_size_out)
1280			test_attr.data_size_out = default_size;
1281		err = alloc_run_data(&data_out, test_attr.data_size_out);
1282		if (err)
1283			goto free_data_in;
1284	}
1285
1286	err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
1287	if (err)
1288		goto free_data_out;
1289
1290	if (ctx_in) {
1291		if (!test_attr.ctx_size_out)
1292			test_attr.ctx_size_out = default_size;
1293		err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
1294		if (err)
1295			goto free_ctx_in;
1296	}
1297
1298	test_attr.prog_fd	= fd;
1299	test_attr.repeat	= repeat;
1300	test_attr.data_in	= data_in;
1301	test_attr.data_out	= data_out;
1302	test_attr.ctx_in	= ctx_in;
1303	test_attr.ctx_out	= ctx_out;
1304
1305	err = bpf_prog_test_run_xattr(&test_attr);
1306	if (err) {
1307		p_err("failed to run program: %s", strerror(errno));
1308		goto free_ctx_out;
1309	}
1310
1311	err = 0;
1312
1313	if (json_output)
1314		jsonw_start_object(json_wtr);	/* root */
1315
1316	/* Do not exit on errors occurring when printing output data/context,
1317	 * we still want to print return value and duration for program run.
1318	 */
1319	if (test_attr.data_size_out)
1320		err += print_run_output(test_attr.data_out,
1321					test_attr.data_size_out,
1322					data_fname_out, "data_out");
1323	if (test_attr.ctx_size_out)
1324		err += print_run_output(test_attr.ctx_out,
1325					test_attr.ctx_size_out,
1326					ctx_fname_out, "ctx_out");
1327
1328	if (json_output) {
1329		jsonw_uint_field(json_wtr, "retval", test_attr.retval);
1330		jsonw_uint_field(json_wtr, "duration", test_attr.duration);
1331		jsonw_end_object(json_wtr);	/* root */
1332	} else {
1333		fprintf(stdout, "Return value: %u, duration%s: %uns\n",
1334			test_attr.retval,
1335			repeat > 1 ? " (average)" : "", test_attr.duration);
1336	}
1337
1338free_ctx_out:
1339	free(ctx_out);
1340free_ctx_in:
1341	free(ctx_in);
1342free_data_out:
1343	free(data_out);
1344free_data_in:
1345	free(data_in);
1346
1347	return err;
1348}
1349
1350static int
1351get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
1352		      enum bpf_attach_type *expected_attach_type)
1353{
1354	libbpf_print_fn_t print_backup;
1355	int ret;
1356
1357	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1358	if (!ret)
1359		return ret;
1360
1361	/* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1362	print_backup = libbpf_set_print(print_all_levels);
1363	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1364	libbpf_set_print(print_backup);
1365
1366	return ret;
1367}
1368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1369static int load_with_options(int argc, char **argv, bool first_prog_only)
1370{
1371	enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
1372	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
1373		.relaxed_maps = relaxed_maps,
1374	);
1375	struct bpf_object_load_attr load_attr = { 0 };
1376	enum bpf_attach_type expected_attach_type;
1377	struct map_replace *map_replace = NULL;
1378	struct bpf_program *prog = NULL, *pos;
1379	unsigned int old_map_fds = 0;
1380	const char *pinmaps = NULL;
 
1381	struct bpf_object *obj;
1382	struct bpf_map *map;
1383	const char *pinfile;
1384	unsigned int i, j;
1385	__u32 ifindex = 0;
1386	const char *file;
1387	int idx, err;
1388
1389
1390	if (!REQ_ARGS(2))
1391		return -1;
1392	file = GET_ARG();
1393	pinfile = GET_ARG();
1394
1395	while (argc) {
1396		if (is_prefix(*argv, "type")) {
1397			char *type;
1398
1399			NEXT_ARG();
1400
1401			if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
1402				p_err("program type already specified");
1403				goto err_free_reuse_maps;
1404			}
1405			if (!REQ_ARGS(1))
1406				goto err_free_reuse_maps;
1407
1408			/* Put a '/' at the end of type to appease libbpf */
1409			type = malloc(strlen(*argv) + 2);
1410			if (!type) {
1411				p_err("mem alloc failed");
1412				goto err_free_reuse_maps;
1413			}
1414			*type = 0;
1415			strcat(type, *argv);
1416			strcat(type, "/");
1417
1418			err = get_prog_type_by_name(type, &common_prog_type,
1419						    &expected_attach_type);
1420			free(type);
1421			if (err < 0)
1422				goto err_free_reuse_maps;
 
 
 
 
 
 
 
 
 
1423
1424			NEXT_ARG();
1425		} else if (is_prefix(*argv, "map")) {
1426			void *new_map_replace;
1427			char *endptr, *name;
1428			int fd;
1429
1430			NEXT_ARG();
1431
1432			if (!REQ_ARGS(4))
1433				goto err_free_reuse_maps;
1434
1435			if (is_prefix(*argv, "idx")) {
1436				NEXT_ARG();
1437
1438				idx = strtoul(*argv, &endptr, 0);
1439				if (*endptr) {
1440					p_err("can't parse %s as IDX", *argv);
1441					goto err_free_reuse_maps;
1442				}
1443				name = NULL;
1444			} else if (is_prefix(*argv, "name")) {
1445				NEXT_ARG();
1446
1447				name = *argv;
1448				idx = -1;
1449			} else {
1450				p_err("expected 'idx' or 'name', got: '%s'?",
1451				      *argv);
1452				goto err_free_reuse_maps;
1453			}
1454			NEXT_ARG();
1455
1456			fd = map_parse_fd(&argc, &argv);
1457			if (fd < 0)
1458				goto err_free_reuse_maps;
1459
1460			new_map_replace = reallocarray(map_replace,
1461						       old_map_fds + 1,
1462						       sizeof(*map_replace));
1463			if (!new_map_replace) {
1464				p_err("mem alloc failed");
1465				goto err_free_reuse_maps;
1466			}
1467			map_replace = new_map_replace;
1468
1469			map_replace[old_map_fds].idx = idx;
1470			map_replace[old_map_fds].name = name;
1471			map_replace[old_map_fds].fd = fd;
1472			old_map_fds++;
1473		} else if (is_prefix(*argv, "dev")) {
1474			NEXT_ARG();
1475
1476			if (ifindex) {
1477				p_err("offload device already specified");
1478				goto err_free_reuse_maps;
1479			}
1480			if (!REQ_ARGS(1))
1481				goto err_free_reuse_maps;
1482
1483			ifindex = if_nametoindex(*argv);
1484			if (!ifindex) {
1485				p_err("unrecognized netdevice '%s': %s",
1486				      *argv, strerror(errno));
1487				goto err_free_reuse_maps;
1488			}
1489			NEXT_ARG();
1490		} else if (is_prefix(*argv, "pinmaps")) {
1491			NEXT_ARG();
1492
1493			if (!REQ_ARGS(1))
1494				goto err_free_reuse_maps;
1495
1496			pinmaps = GET_ARG();
 
 
 
1497		} else {
1498			p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1499			      *argv);
1500			goto err_free_reuse_maps;
1501		}
1502	}
1503
1504	set_max_rlimit();
1505
 
 
 
 
1506	obj = bpf_object__open_file(file, &open_opts);
1507	if (libbpf_get_error(obj)) {
1508		p_err("failed to open object file");
1509		goto err_free_reuse_maps;
1510	}
1511
1512	bpf_object__for_each_program(pos, obj) {
1513		enum bpf_prog_type prog_type = common_prog_type;
1514
1515		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
1516			const char *sec_name = bpf_program__section_name(pos);
1517
1518			err = get_prog_type_by_name(sec_name, &prog_type,
1519						    &expected_attach_type);
1520			if (err < 0)
1521				goto err_close_obj;
1522		}
1523
1524		bpf_program__set_ifindex(pos, ifindex);
1525		bpf_program__set_type(pos, prog_type);
1526		bpf_program__set_expected_attach_type(pos, expected_attach_type);
1527	}
1528
1529	qsort(map_replace, old_map_fds, sizeof(*map_replace),
1530	      map_replace_compar);
1531
1532	/* After the sort maps by name will be first on the list, because they
1533	 * have idx == -1.  Resolve them.
1534	 */
1535	j = 0;
1536	while (j < old_map_fds && map_replace[j].name) {
1537		i = 0;
1538		bpf_object__for_each_map(map, obj) {
1539			if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
1540				map_replace[j].idx = i;
1541				break;
1542			}
1543			i++;
1544		}
1545		if (map_replace[j].idx == -1) {
1546			p_err("unable to find map '%s'", map_replace[j].name);
1547			goto err_close_obj;
1548		}
1549		j++;
1550	}
1551	/* Resort if any names were resolved */
1552	if (j)
1553		qsort(map_replace, old_map_fds, sizeof(*map_replace),
1554		      map_replace_compar);
1555
1556	/* Set ifindex and name reuse */
1557	j = 0;
1558	idx = 0;
1559	bpf_object__for_each_map(map, obj) {
1560		if (!bpf_map__is_offload_neutral(map))
1561			bpf_map__set_ifindex(map, ifindex);
1562
1563		if (j < old_map_fds && idx == map_replace[j].idx) {
1564			err = bpf_map__reuse_fd(map, map_replace[j++].fd);
1565			if (err) {
1566				p_err("unable to set up map reuse: %d", err);
1567				goto err_close_obj;
1568			}
1569
1570			/* Next reuse wants to apply to the same map */
1571			if (j < old_map_fds && map_replace[j].idx == idx) {
1572				p_err("replacement for map idx %d specified more than once",
1573				      idx);
1574				goto err_close_obj;
1575			}
1576		}
1577
1578		idx++;
1579	}
1580	if (j < old_map_fds) {
1581		p_err("map idx '%d' not used", map_replace[j].idx);
1582		goto err_close_obj;
1583	}
1584
1585	load_attr.obj = obj;
1586	if (verifier_logs)
1587		/* log_level1 + log_level2 + stats, but not stable UAPI */
1588		load_attr.log_level = 1 + 2 + 4;
1589
1590	err = bpf_object__load_xattr(&load_attr);
1591	if (err) {
1592		p_err("failed to load object file");
1593		goto err_close_obj;
1594	}
1595
1596	err = mount_bpffs_for_pin(pinfile);
1597	if (err)
1598		goto err_close_obj;
1599
1600	if (first_prog_only) {
1601		prog = bpf_program__next(NULL, obj);
1602		if (!prog) {
1603			p_err("object file doesn't contain any bpf program");
1604			goto err_close_obj;
1605		}
1606
1607		err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
 
 
 
1608		if (err) {
1609			p_err("failed to pin program %s",
1610			      bpf_program__section_name(prog));
1611			goto err_close_obj;
1612		}
1613	} else {
1614		err = bpf_object__pin_programs(obj, pinfile);
 
 
 
1615		if (err) {
1616			p_err("failed to pin all programs");
1617			goto err_close_obj;
1618		}
1619	}
1620
1621	if (pinmaps) {
1622		err = bpf_object__pin_maps(obj, pinmaps);
1623		if (err) {
1624			p_err("failed to pin all maps");
1625			goto err_unpin;
1626		}
1627	}
1628
1629	if (json_output)
1630		jsonw_null(json_wtr);
1631
1632	bpf_object__close(obj);
1633	for (i = 0; i < old_map_fds; i++)
1634		close(map_replace[i].fd);
1635	free(map_replace);
1636
1637	return 0;
1638
1639err_unpin:
1640	if (first_prog_only)
1641		unlink(pinfile);
1642	else
1643		bpf_object__unpin_programs(obj, pinfile);
1644err_close_obj:
1645	bpf_object__close(obj);
1646err_free_reuse_maps:
1647	for (i = 0; i < old_map_fds; i++)
1648		close(map_replace[i].fd);
1649	free(map_replace);
1650	return -1;
1651}
1652
1653static int count_open_fds(void)
1654{
1655	DIR *dp = opendir("/proc/self/fd");
1656	struct dirent *de;
1657	int cnt = -3;
1658
1659	if (!dp)
1660		return -1;
1661
1662	while ((de = readdir(dp)))
1663		cnt++;
1664
1665	closedir(dp);
1666	return cnt;
1667}
1668
1669static int try_loader(struct gen_loader_opts *gen)
1670{
1671	struct bpf_load_and_run_opts opts = {};
1672	struct bpf_loader_ctx *ctx;
1673	int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
1674					     sizeof(struct bpf_prog_desc));
1675	int log_buf_sz = (1u << 24) - 1;
1676	int err, fds_before, fd_delta;
1677	char *log_buf;
1678
1679	ctx = alloca(ctx_sz);
1680	memset(ctx, 0, ctx_sz);
1681	ctx->sz = ctx_sz;
1682	ctx->log_level = 1;
1683	ctx->log_size = log_buf_sz;
1684	log_buf = malloc(log_buf_sz);
1685	if (!log_buf)
1686		return -ENOMEM;
1687	ctx->log_buf = (long) log_buf;
 
 
1688	opts.ctx = ctx;
1689	opts.data = gen->data;
1690	opts.data_sz = gen->data_sz;
1691	opts.insns = gen->insns;
1692	opts.insns_sz = gen->insns_sz;
1693	fds_before = count_open_fds();
1694	err = bpf_load_and_run(&opts);
1695	fd_delta = count_open_fds() - fds_before;
1696	if (err < 0) {
1697		fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
1698		if (fd_delta)
1699			fprintf(stderr, "loader prog leaked %d FDs\n",
1700				fd_delta);
1701	}
1702	free(log_buf);
1703	return err;
1704}
1705
1706static int do_loader(int argc, char **argv)
1707{
1708	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
1709	DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
1710	struct bpf_object_load_attr load_attr = {};
1711	struct bpf_object *obj;
1712	const char *file;
1713	int err = 0;
1714
1715	if (!REQ_ARGS(1))
1716		return -1;
1717	file = GET_ARG();
1718
 
 
 
 
1719	obj = bpf_object__open_file(file, &open_opts);
1720	if (libbpf_get_error(obj)) {
1721		p_err("failed to open object file");
1722		goto err_close_obj;
1723	}
1724
1725	err = bpf_object__gen_loader(obj, &gen);
1726	if (err)
1727		goto err_close_obj;
1728
1729	load_attr.obj = obj;
1730	if (verifier_logs)
1731		/* log_level1 + log_level2 + stats, but not stable UAPI */
1732		load_attr.log_level = 1 + 2 + 4;
1733
1734	err = bpf_object__load_xattr(&load_attr);
1735	if (err) {
1736		p_err("failed to load object file");
1737		goto err_close_obj;
1738	}
1739
1740	if (verifier_logs) {
1741		struct dump_data dd = {};
1742
1743		kernel_syms_load(&dd);
1744		dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
1745		kernel_syms_destroy(&dd);
1746	}
1747	err = try_loader(&gen);
1748err_close_obj:
1749	bpf_object__close(obj);
1750	return err;
1751}
1752
1753static int do_load(int argc, char **argv)
1754{
1755	if (use_loader)
1756		return do_loader(argc, argv);
1757	return load_with_options(argc, argv, true);
1758}
1759
1760static int do_loadall(int argc, char **argv)
1761{
1762	return load_with_options(argc, argv, false);
1763}
1764
1765#ifdef BPFTOOL_WITHOUT_SKELETONS
1766
1767static int do_profile(int argc, char **argv)
1768{
1769	p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1770	return 0;
1771}
1772
1773#else /* BPFTOOL_WITHOUT_SKELETONS */
1774
1775#include "profiler.skel.h"
1776
1777struct profile_metric {
1778	const char *name;
1779	struct bpf_perf_event_value val;
1780	struct perf_event_attr attr;
1781	bool selected;
1782
1783	/* calculate ratios like instructions per cycle */
1784	const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
1785	const char *ratio_desc;
1786	const float ratio_mul;
1787} metrics[] = {
1788	{
1789		.name = "cycles",
1790		.attr = {
1791			.type = PERF_TYPE_HARDWARE,
1792			.config = PERF_COUNT_HW_CPU_CYCLES,
1793			.exclude_user = 1,
1794		},
1795	},
1796	{
1797		.name = "instructions",
1798		.attr = {
1799			.type = PERF_TYPE_HARDWARE,
1800			.config = PERF_COUNT_HW_INSTRUCTIONS,
1801			.exclude_user = 1,
1802		},
1803		.ratio_metric = 1,
1804		.ratio_desc = "insns per cycle",
1805		.ratio_mul = 1.0,
1806	},
1807	{
1808		.name = "l1d_loads",
1809		.attr = {
1810			.type = PERF_TYPE_HW_CACHE,
1811			.config =
1812				PERF_COUNT_HW_CACHE_L1D |
1813				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1814				(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
1815			.exclude_user = 1,
1816		},
1817	},
1818	{
1819		.name = "llc_misses",
1820		.attr = {
1821			.type = PERF_TYPE_HW_CACHE,
1822			.config =
1823				PERF_COUNT_HW_CACHE_LL |
1824				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1825				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1826			.exclude_user = 1
1827		},
1828		.ratio_metric = 2,
1829		.ratio_desc = "LLC misses per million insns",
1830		.ratio_mul = 1e6,
1831	},
1832	{
1833		.name = "itlb_misses",
1834		.attr = {
1835			.type = PERF_TYPE_HW_CACHE,
1836			.config =
1837				PERF_COUNT_HW_CACHE_ITLB |
1838				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1839				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1840			.exclude_user = 1
1841		},
1842		.ratio_metric = 2,
1843		.ratio_desc = "itlb misses per million insns",
1844		.ratio_mul = 1e6,
1845	},
1846	{
1847		.name = "dtlb_misses",
1848		.attr = {
1849			.type = PERF_TYPE_HW_CACHE,
1850			.config =
1851				PERF_COUNT_HW_CACHE_DTLB |
1852				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1853				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1854			.exclude_user = 1
1855		},
1856		.ratio_metric = 2,
1857		.ratio_desc = "dtlb misses per million insns",
1858		.ratio_mul = 1e6,
1859	},
1860};
1861
1862static __u64 profile_total_count;
1863
1864#define MAX_NUM_PROFILE_METRICS 4
1865
1866static int profile_parse_metrics(int argc, char **argv)
1867{
1868	unsigned int metric_cnt;
1869	int selected_cnt = 0;
1870	unsigned int i;
1871
1872	metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
1873
1874	while (argc > 0) {
1875		for (i = 0; i < metric_cnt; i++) {
1876			if (is_prefix(argv[0], metrics[i].name)) {
1877				if (!metrics[i].selected)
1878					selected_cnt++;
1879				metrics[i].selected = true;
1880				break;
1881			}
1882		}
1883		if (i == metric_cnt) {
1884			p_err("unknown metric %s", argv[0]);
1885			return -1;
1886		}
1887		NEXT_ARG();
1888	}
1889	if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
1890		p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
1891		      selected_cnt, MAX_NUM_PROFILE_METRICS);
1892		return -1;
1893	}
1894	return selected_cnt;
1895}
1896
1897static void profile_read_values(struct profiler_bpf *obj)
1898{
1899	__u32 m, cpu, num_cpu = obj->rodata->num_cpu;
1900	int reading_map_fd, count_map_fd;
1901	__u64 counts[num_cpu];
1902	__u32 key = 0;
1903	int err;
1904
1905	reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
1906	count_map_fd = bpf_map__fd(obj->maps.counts);
1907	if (reading_map_fd < 0 || count_map_fd < 0) {
1908		p_err("failed to get fd for map");
1909		return;
1910	}
1911
1912	err = bpf_map_lookup_elem(count_map_fd, &key, counts);
1913	if (err) {
1914		p_err("failed to read count_map: %s", strerror(errno));
1915		return;
1916	}
1917
1918	profile_total_count = 0;
1919	for (cpu = 0; cpu < num_cpu; cpu++)
1920		profile_total_count += counts[cpu];
1921
1922	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1923		struct bpf_perf_event_value values[num_cpu];
1924
1925		if (!metrics[m].selected)
1926			continue;
1927
1928		err = bpf_map_lookup_elem(reading_map_fd, &key, values);
1929		if (err) {
1930			p_err("failed to read reading_map: %s",
1931			      strerror(errno));
1932			return;
1933		}
1934		for (cpu = 0; cpu < num_cpu; cpu++) {
1935			metrics[m].val.counter += values[cpu].counter;
1936			metrics[m].val.enabled += values[cpu].enabled;
1937			metrics[m].val.running += values[cpu].running;
1938		}
1939		key++;
1940	}
1941}
1942
1943static void profile_print_readings_json(void)
1944{
1945	__u32 m;
1946
1947	jsonw_start_array(json_wtr);
1948	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1949		if (!metrics[m].selected)
1950			continue;
1951		jsonw_start_object(json_wtr);
1952		jsonw_string_field(json_wtr, "metric", metrics[m].name);
1953		jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
1954		jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
1955		jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
1956		jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
1957
1958		jsonw_end_object(json_wtr);
1959	}
1960	jsonw_end_array(json_wtr);
1961}
1962
1963static void profile_print_readings_plain(void)
1964{
1965	__u32 m;
1966
1967	printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
1968	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1969		struct bpf_perf_event_value *val = &metrics[m].val;
1970		int r;
1971
1972		if (!metrics[m].selected)
1973			continue;
1974		printf("%18llu %-20s", val->counter, metrics[m].name);
1975
1976		r = metrics[m].ratio_metric - 1;
1977		if (r >= 0 && metrics[r].selected &&
1978		    metrics[r].val.counter > 0) {
1979			printf("# %8.2f %-30s",
1980			       val->counter * metrics[m].ratio_mul /
1981			       metrics[r].val.counter,
1982			       metrics[m].ratio_desc);
1983		} else {
1984			printf("%-41s", "");
1985		}
1986
1987		if (val->enabled > val->running)
1988			printf("(%4.2f%%)",
1989			       val->running * 100.0 / val->enabled);
1990		printf("\n");
1991	}
1992}
1993
1994static void profile_print_readings(void)
1995{
1996	if (json_output)
1997		profile_print_readings_json();
1998	else
1999		profile_print_readings_plain();
2000}
2001
2002static char *profile_target_name(int tgt_fd)
2003{
2004	struct bpf_prog_info_linear *info_linear;
2005	struct bpf_func_info *func_info;
 
2006	const struct btf_type *t;
 
2007	struct btf *btf = NULL;
2008	char *name = NULL;
 
2009
2010	info_linear = bpf_program__get_prog_info_linear(
2011		tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
2012	if (IS_ERR_OR_NULL(info_linear)) {
2013		p_err("failed to get info_linear for prog FD %d", tgt_fd);
2014		return NULL;
2015	}
2016
2017	if (info_linear->info.btf_id == 0 ||
2018	    btf__get_from_id(info_linear->info.btf_id, &btf)) {
2019		p_err("prog FD %d doesn't have valid btf", tgt_fd);
2020		goto out;
2021	}
2022
2023	func_info = u64_to_ptr(info_linear->info.func_info);
2024	t = btf__type_by_id(btf, func_info[0].type_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2025	if (!t) {
2026		p_err("btf %d doesn't have type %d",
2027		      info_linear->info.btf_id, func_info[0].type_id);
2028		goto out;
2029	}
2030	name = strdup(btf__name_by_offset(btf, t->name_off));
2031out:
2032	btf__free(btf);
2033	free(info_linear);
2034	return name;
2035}
2036
2037static struct profiler_bpf *profile_obj;
2038static int profile_tgt_fd = -1;
2039static char *profile_tgt_name;
2040static int *profile_perf_events;
2041static int profile_perf_event_cnt;
2042
2043static void profile_close_perf_events(struct profiler_bpf *obj)
2044{
2045	int i;
2046
2047	for (i = profile_perf_event_cnt - 1; i >= 0; i--)
2048		close(profile_perf_events[i]);
2049
2050	free(profile_perf_events);
2051	profile_perf_event_cnt = 0;
2052}
2053
2054static int profile_open_perf_events(struct profiler_bpf *obj)
2055{
2056	unsigned int cpu, m;
2057	int map_fd, pmu_fd;
2058
2059	profile_perf_events = calloc(
2060		sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
2061	if (!profile_perf_events) {
2062		p_err("failed to allocate memory for perf_event array: %s",
2063		      strerror(errno));
2064		return -1;
2065	}
2066	map_fd = bpf_map__fd(obj->maps.events);
2067	if (map_fd < 0) {
2068		p_err("failed to get fd for events map");
2069		return -1;
2070	}
2071
2072	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2073		if (!metrics[m].selected)
2074			continue;
2075		for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
2076			pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
2077					 -1/*pid*/, cpu, -1/*group_fd*/, 0);
2078			if (pmu_fd < 0 ||
2079			    bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
2080						&pmu_fd, BPF_ANY) ||
2081			    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
2082				p_err("failed to create event %s on cpu %d",
2083				      metrics[m].name, cpu);
2084				return -1;
2085			}
2086			profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
2087		}
2088	}
2089	return 0;
2090}
2091
2092static void profile_print_and_cleanup(void)
2093{
2094	profile_close_perf_events(profile_obj);
2095	profile_read_values(profile_obj);
2096	profile_print_readings();
2097	profiler_bpf__destroy(profile_obj);
2098
2099	close(profile_tgt_fd);
2100	free(profile_tgt_name);
2101}
2102
2103static void int_exit(int signo)
2104{
2105	profile_print_and_cleanup();
2106	exit(0);
2107}
2108
2109static int do_profile(int argc, char **argv)
2110{
2111	int num_metric, num_cpu, err = -1;
2112	struct bpf_program *prog;
2113	unsigned long duration;
2114	char *endptr;
2115
2116	/* we at least need two args for the prog and one metric */
2117	if (!REQ_ARGS(3))
2118		return -EINVAL;
2119
2120	/* parse target fd */
2121	profile_tgt_fd = prog_parse_fd(&argc, &argv);
2122	if (profile_tgt_fd < 0) {
2123		p_err("failed to parse fd");
2124		return -1;
2125	}
2126
2127	/* parse profiling optional duration */
2128	if (argc > 2 && is_prefix(argv[0], "duration")) {
2129		NEXT_ARG();
2130		duration = strtoul(*argv, &endptr, 0);
2131		if (*endptr)
2132			usage();
2133		NEXT_ARG();
2134	} else {
2135		duration = UINT_MAX;
2136	}
2137
2138	num_metric = profile_parse_metrics(argc, argv);
2139	if (num_metric <= 0)
2140		goto out;
2141
2142	num_cpu = libbpf_num_possible_cpus();
2143	if (num_cpu <= 0) {
2144		p_err("failed to identify number of CPUs");
2145		goto out;
2146	}
2147
2148	profile_obj = profiler_bpf__open();
2149	if (!profile_obj) {
2150		p_err("failed to open and/or load BPF object");
2151		goto out;
2152	}
2153
2154	profile_obj->rodata->num_cpu = num_cpu;
2155	profile_obj->rodata->num_metric = num_metric;
2156
2157	/* adjust map sizes */
2158	bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
2159	bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
2160	bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
2161	bpf_map__resize(profile_obj->maps.counts, 1);
2162
2163	/* change target name */
2164	profile_tgt_name = profile_target_name(profile_tgt_fd);
2165	if (!profile_tgt_name)
2166		goto out;
2167
2168	bpf_object__for_each_program(prog, profile_obj->obj) {
2169		err = bpf_program__set_attach_target(prog, profile_tgt_fd,
2170						     profile_tgt_name);
2171		if (err) {
2172			p_err("failed to set attach target\n");
2173			goto out;
2174		}
2175	}
2176
2177	set_max_rlimit();
2178	err = profiler_bpf__load(profile_obj);
2179	if (err) {
2180		p_err("failed to load profile_obj");
2181		goto out;
2182	}
2183
2184	err = profile_open_perf_events(profile_obj);
2185	if (err)
2186		goto out;
2187
2188	err = profiler_bpf__attach(profile_obj);
2189	if (err) {
2190		p_err("failed to attach profile_obj");
2191		goto out;
2192	}
2193	signal(SIGINT, int_exit);
2194
2195	sleep(duration);
2196	profile_print_and_cleanup();
2197	return 0;
2198
2199out:
2200	profile_close_perf_events(profile_obj);
2201	if (profile_obj)
2202		profiler_bpf__destroy(profile_obj);
2203	close(profile_tgt_fd);
2204	free(profile_tgt_name);
2205	return err;
2206}
2207
2208#endif /* BPFTOOL_WITHOUT_SKELETONS */
2209
2210static int do_help(int argc, char **argv)
2211{
2212	if (json_output) {
2213		jsonw_null(json_wtr);
2214		return 0;
2215	}
2216
2217	fprintf(stderr,
2218		"Usage: %1$s %2$s { show | list } [PROG]\n"
2219		"       %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
2220		"       %1$s %2$s dump jited  PROG [{ file FILE | opcodes | linum }]\n"
2221		"       %1$s %2$s pin   PROG FILE\n"
2222		"       %1$s %2$s { load | loadall } OBJ  PATH \\\n"
2223		"                         [type TYPE] [dev NAME] \\\n"
2224		"                         [map { idx IDX | name NAME } MAP]\\\n"
2225		"                         [pinmaps MAP_DIR]\n"
 
2226		"       %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
2227		"       %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
2228		"       %1$s %2$s run PROG \\\n"
2229		"                         data_in FILE \\\n"
2230		"                         [data_out FILE [data_size_out L]] \\\n"
2231		"                         [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
2232		"                         [repeat N]\n"
2233		"       %1$s %2$s profile PROG [duration DURATION] METRICs\n"
2234		"       %1$s %2$s tracelog\n"
2235		"       %1$s %2$s help\n"
2236		"\n"
2237		"       " HELP_SPEC_MAP "\n"
2238		"       " HELP_SPEC_PROGRAM "\n"
2239		"       TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
2240		"                 tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
2241		"                 cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
2242		"                 lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
2243		"                 sk_reuseport | flow_dissector | cgroup/sysctl |\n"
2244		"                 cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
2245		"                 cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
2246		"                 cgroup/getpeername4 | cgroup/getpeername6 |\n"
2247		"                 cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
2248		"                 cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
2249		"                 cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
2250		"                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
2251		"       ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
2252		"                        flow_dissector }\n"
2253		"       METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
2254		"       " HELP_SPEC_OPTIONS "\n"
 
 
2255		"",
2256		bin_name, argv[-2]);
2257
2258	return 0;
2259}
2260
2261static const struct cmd cmds[] = {
2262	{ "show",	do_show },
2263	{ "list",	do_show },
2264	{ "help",	do_help },
2265	{ "dump",	do_dump },
2266	{ "pin",	do_pin },
2267	{ "load",	do_load },
2268	{ "loadall",	do_loadall },
2269	{ "attach",	do_attach },
2270	{ "detach",	do_detach },
2271	{ "tracelog",	do_tracelog },
2272	{ "run",	do_run },
2273	{ "profile",	do_profile },
2274	{ 0 }
2275};
2276
2277int do_prog(int argc, char **argv)
2278{
2279	return cmd_select(cmds, argc, argv, do_help);
2280}
v6.2
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#ifndef _GNU_SOURCE
   5#define _GNU_SOURCE
   6#endif
   7#include <errno.h>
   8#include <fcntl.h>
   9#include <signal.h>
  10#include <stdarg.h>
  11#include <stdio.h>
  12#include <stdlib.h>
  13#include <string.h>
  14#include <time.h>
  15#include <unistd.h>
  16#include <net/if.h>
  17#include <sys/ioctl.h>
  18#include <sys/types.h>
  19#include <sys/stat.h>
  20#include <sys/syscall.h>
  21#include <dirent.h>
  22
  23#include <linux/err.h>
  24#include <linux/perf_event.h>
  25#include <linux/sizes.h>
  26
  27#include <bpf/bpf.h>
  28#include <bpf/btf.h>
  29#include <bpf/hashmap.h>
  30#include <bpf/libbpf.h>
  31#include <bpf/libbpf_internal.h>
  32#include <bpf/skel_internal.h>
  33
  34#include "cfg.h"
  35#include "main.h"
  36#include "xlated_dumper.h"
  37
  38#define BPF_METADATA_PREFIX "bpf_metadata_"
  39#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
  40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41enum dump_mode {
  42	DUMP_JITED,
  43	DUMP_XLATED,
  44};
  45
  46static const bool attach_types[] = {
  47	[BPF_SK_SKB_STREAM_PARSER] = true,
  48	[BPF_SK_SKB_STREAM_VERDICT] = true,
  49	[BPF_SK_SKB_VERDICT] = true,
  50	[BPF_SK_MSG_VERDICT] = true,
  51	[BPF_FLOW_DISSECTOR] = true,
  52	[__MAX_BPF_ATTACH_TYPE] = false,
  53};
  54
  55/* Textual representations traditionally used by the program and kept around
  56 * for the sake of backwards compatibility.
  57 */
  58static const char * const attach_type_strings[] = {
  59	[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
  60	[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
  61	[BPF_SK_SKB_VERDICT] = "skb_verdict",
  62	[BPF_SK_MSG_VERDICT] = "msg_verdict",
 
  63	[__MAX_BPF_ATTACH_TYPE] = NULL,
  64};
  65
  66static struct hashmap *prog_table;
  67
  68static enum bpf_attach_type parse_attach_type(const char *str)
  69{
  70	enum bpf_attach_type type;
  71
  72	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
  73		if (attach_types[type]) {
  74			const char *attach_type_str;
  75
  76			attach_type_str = libbpf_bpf_attach_type_str(type);
  77			if (!strcmp(str, attach_type_str))
  78				return type;
  79		}
  80
  81		if (attach_type_strings[type] &&
  82		    is_prefix(str, attach_type_strings[type]))
  83			return type;
  84	}
  85
  86	return __MAX_BPF_ATTACH_TYPE;
  87}
  88
  89static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode,
  90			  void **info_data, size_t *const info_data_sz)
  91{
  92	struct bpf_prog_info holder = {};
  93	size_t needed = 0;
  94	void *ptr;
  95
  96	if (mode == DUMP_JITED) {
  97		holder.jited_prog_len = info->jited_prog_len;
  98		needed += info->jited_prog_len;
  99	} else {
 100		holder.xlated_prog_len = info->xlated_prog_len;
 101		needed += info->xlated_prog_len;
 102	}
 103
 104	holder.nr_jited_ksyms = info->nr_jited_ksyms;
 105	needed += info->nr_jited_ksyms * sizeof(__u64);
 106
 107	holder.nr_jited_func_lens = info->nr_jited_func_lens;
 108	needed += info->nr_jited_func_lens * sizeof(__u32);
 109
 110	holder.nr_func_info = info->nr_func_info;
 111	holder.func_info_rec_size = info->func_info_rec_size;
 112	needed += info->nr_func_info * info->func_info_rec_size;
 113
 114	holder.nr_line_info = info->nr_line_info;
 115	holder.line_info_rec_size = info->line_info_rec_size;
 116	needed += info->nr_line_info * info->line_info_rec_size;
 117
 118	holder.nr_jited_line_info = info->nr_jited_line_info;
 119	holder.jited_line_info_rec_size = info->jited_line_info_rec_size;
 120	needed += info->nr_jited_line_info * info->jited_line_info_rec_size;
 121
 122	if (needed > *info_data_sz) {
 123		ptr = realloc(*info_data, needed);
 124		if (!ptr)
 125			return -1;
 126
 127		*info_data = ptr;
 128		*info_data_sz = needed;
 129	}
 130	ptr = *info_data;
 131
 132	if (mode == DUMP_JITED) {
 133		holder.jited_prog_insns = ptr_to_u64(ptr);
 134		ptr += holder.jited_prog_len;
 135	} else {
 136		holder.xlated_prog_insns = ptr_to_u64(ptr);
 137		ptr += holder.xlated_prog_len;
 138	}
 139
 140	holder.jited_ksyms = ptr_to_u64(ptr);
 141	ptr += holder.nr_jited_ksyms * sizeof(__u64);
 142
 143	holder.jited_func_lens = ptr_to_u64(ptr);
 144	ptr += holder.nr_jited_func_lens * sizeof(__u32);
 145
 146	holder.func_info = ptr_to_u64(ptr);
 147	ptr += holder.nr_func_info * holder.func_info_rec_size;
 148
 149	holder.line_info = ptr_to_u64(ptr);
 150	ptr += holder.nr_line_info * holder.line_info_rec_size;
 151
 152	holder.jited_line_info = ptr_to_u64(ptr);
 153	ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size;
 154
 155	*info = holder;
 156	return 0;
 157}
 158
 159static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
 160{
 161	struct timespec real_time_ts, boot_time_ts;
 162	time_t wallclock_secs;
 163	struct tm load_tm;
 164
 165	buf[--size] = '\0';
 166
 167	if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
 168	    clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
 169		perror("Can't read clocks");
 170		snprintf(buf, size, "%llu", nsecs / 1000000000);
 171		return;
 172	}
 173
 174	wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
 175		(real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
 176		1000000000;
 177
 178
 179	if (!localtime_r(&wallclock_secs, &load_tm)) {
 180		snprintf(buf, size, "%llu", nsecs / 1000000000);
 181		return;
 182	}
 183
 184	if (json_output)
 185		strftime(buf, size, "%s", &load_tm);
 186	else
 187		strftime(buf, size, "%FT%T%z", &load_tm);
 188}
 189
 190static void show_prog_maps(int fd, __u32 num_maps)
 191{
 192	struct bpf_prog_info info = {};
 193	__u32 len = sizeof(info);
 194	__u32 map_ids[num_maps];
 195	unsigned int i;
 196	int err;
 197
 198	info.nr_map_ids = num_maps;
 199	info.map_ids = ptr_to_u64(map_ids);
 200
 201	err = bpf_obj_get_info_by_fd(fd, &info, &len);
 202	if (err || !info.nr_map_ids)
 203		return;
 204
 205	if (json_output) {
 206		jsonw_name(json_wtr, "map_ids");
 207		jsonw_start_array(json_wtr);
 208		for (i = 0; i < info.nr_map_ids; i++)
 209			jsonw_uint(json_wtr, map_ids[i]);
 210		jsonw_end_array(json_wtr);
 211	} else {
 212		printf("  map_ids ");
 213		for (i = 0; i < info.nr_map_ids; i++)
 214			printf("%u%s", map_ids[i],
 215			       i == info.nr_map_ids - 1 ? "" : ",");
 216	}
 217}
 218
 219static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
 220{
 221	struct bpf_prog_info prog_info;
 222	__u32 prog_info_len;
 223	__u32 map_info_len;
 224	void *value = NULL;
 225	__u32 *map_ids;
 226	int nr_maps;
 227	int key = 0;
 228	int map_fd;
 229	int ret;
 230	__u32 i;
 231
 232	memset(&prog_info, 0, sizeof(prog_info));
 233	prog_info_len = sizeof(prog_info);
 234	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
 235	if (ret)
 236		return NULL;
 237
 238	if (!prog_info.nr_map_ids)
 239		return NULL;
 240
 241	map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
 242	if (!map_ids)
 243		return NULL;
 244
 245	nr_maps = prog_info.nr_map_ids;
 246	memset(&prog_info, 0, sizeof(prog_info));
 247	prog_info.nr_map_ids = nr_maps;
 248	prog_info.map_ids = ptr_to_u64(map_ids);
 249	prog_info_len = sizeof(prog_info);
 250
 251	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
 252	if (ret)
 253		goto free_map_ids;
 254
 255	for (i = 0; i < prog_info.nr_map_ids; i++) {
 256		map_fd = bpf_map_get_fd_by_id(map_ids[i]);
 257		if (map_fd < 0)
 258			goto free_map_ids;
 259
 260		memset(map_info, 0, sizeof(*map_info));
 261		map_info_len = sizeof(*map_info);
 262		ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
 263		if (ret < 0) {
 264			close(map_fd);
 265			goto free_map_ids;
 266		}
 267
 268		if (map_info->type != BPF_MAP_TYPE_ARRAY ||
 269		    map_info->key_size != sizeof(int) ||
 270		    map_info->max_entries != 1 ||
 271		    !map_info->btf_value_type_id ||
 272		    !strstr(map_info->name, ".rodata")) {
 273			close(map_fd);
 274			continue;
 275		}
 276
 277		value = malloc(map_info->value_size);
 278		if (!value) {
 279			close(map_fd);
 280			goto free_map_ids;
 281		}
 282
 283		if (bpf_map_lookup_elem(map_fd, &key, value)) {
 284			close(map_fd);
 285			free(value);
 286			value = NULL;
 287			goto free_map_ids;
 288		}
 289
 290		close(map_fd);
 291		break;
 292	}
 293
 294free_map_ids:
 295	free(map_ids);
 296	return value;
 297}
 298
 299static bool has_metadata_prefix(const char *s)
 300{
 301	return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
 302}
 303
 304static void show_prog_metadata(int fd, __u32 num_maps)
 305{
 306	const struct btf_type *t_datasec, *t_var;
 307	struct bpf_map_info map_info;
 308	struct btf_var_secinfo *vsi;
 309	bool printed_header = false;
 
 310	unsigned int i, vlen;
 311	void *value = NULL;
 312	const char *name;
 313	struct btf *btf;
 314	int err;
 315
 316	if (!num_maps)
 317		return;
 318
 319	memset(&map_info, 0, sizeof(map_info));
 320	value = find_metadata(fd, &map_info);
 321	if (!value)
 322		return;
 323
 324	btf = btf__load_from_kernel_by_id(map_info.btf_id);
 325	if (!btf)
 326		goto out_free;
 327
 328	t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
 329	if (!btf_is_datasec(t_datasec))
 330		goto out_free;
 331
 332	vlen = btf_vlen(t_datasec);
 333	vsi = btf_var_secinfos(t_datasec);
 334
 335	/* We don't proceed to check the kinds of the elements of the DATASEC.
 336	 * The verifier enforces them to be BTF_KIND_VAR.
 337	 */
 338
 339	if (json_output) {
 340		struct btf_dumper d = {
 341			.btf = btf,
 342			.jw = json_wtr,
 343			.is_plain_text = false,
 344		};
 345
 346		for (i = 0; i < vlen; i++, vsi++) {
 347			t_var = btf__type_by_id(btf, vsi->type);
 348			name = btf__name_by_offset(btf, t_var->name_off);
 349
 350			if (!has_metadata_prefix(name))
 351				continue;
 352
 353			if (!printed_header) {
 354				jsonw_name(json_wtr, "metadata");
 355				jsonw_start_object(json_wtr);
 356				printed_header = true;
 357			}
 358
 359			jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
 360			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
 361			if (err) {
 362				p_err("btf dump failed: %d", err);
 363				break;
 364			}
 365		}
 366		if (printed_header)
 367			jsonw_end_object(json_wtr);
 368	} else {
 369		json_writer_t *btf_wtr;
 370		struct btf_dumper d = {
 371			.btf = btf,
 
 372			.is_plain_text = true,
 373		};
 374
 
 
 
 
 
 375		for (i = 0; i < vlen; i++, vsi++) {
 376			t_var = btf__type_by_id(btf, vsi->type);
 377			name = btf__name_by_offset(btf, t_var->name_off);
 378
 379			if (!has_metadata_prefix(name))
 380				continue;
 381
 382			if (!printed_header) {
 383				printf("\tmetadata:");
 384
 385				btf_wtr = jsonw_new(stdout);
 386				if (!btf_wtr) {
 387					p_err("jsonw alloc failed");
 388					goto out_free;
 389				}
 390				d.jw = btf_wtr,
 391
 392				printed_header = true;
 393			}
 394
 395			printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
 396
 397			jsonw_reset(btf_wtr);
 398			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
 399			if (err) {
 400				p_err("btf dump failed: %d", err);
 401				break;
 402			}
 403		}
 404		if (printed_header)
 405			jsonw_destroy(&btf_wtr);
 406	}
 407
 408out_free:
 409	btf__free(btf);
 410	free(value);
 411}
 412
 413static void print_prog_header_json(struct bpf_prog_info *info, int fd)
 414{
 415	const char *prog_type_str;
 416	char prog_name[MAX_PROG_FULL_NAME];
 417
 418	jsonw_uint_field(json_wtr, "id", info->id);
 419	prog_type_str = libbpf_bpf_prog_type_str(info->type);
 420
 421	if (prog_type_str)
 422		jsonw_string_field(json_wtr, "type", prog_type_str);
 423	else
 424		jsonw_uint_field(json_wtr, "type", info->type);
 425
 426	if (*info->name) {
 427		get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
 428		jsonw_string_field(json_wtr, "name", prog_name);
 429	}
 430
 431	jsonw_name(json_wtr, "tag");
 432	jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
 433		     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
 434		     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
 435
 436	jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
 437	if (info->run_time_ns) {
 438		jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
 439		jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
 440	}
 441	if (info->recursion_misses)
 442		jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
 443}
 444
 445static void print_prog_json(struct bpf_prog_info *info, int fd)
 446{
 447	char *memlock;
 448
 449	jsonw_start_object(json_wtr);
 450	print_prog_header_json(info, fd);
 451	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
 452
 453	if (info->load_time) {
 454		char buf[32];
 455
 456		print_boot_time(info->load_time, buf, sizeof(buf));
 457
 458		/* Piggy back on load_time, since 0 uid is a valid one */
 459		jsonw_name(json_wtr, "loaded_at");
 460		jsonw_printf(json_wtr, "%s", buf);
 461		jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
 462	}
 463
 464	jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
 465
 466	if (info->jited_prog_len) {
 467		jsonw_bool_field(json_wtr, "jited", true);
 468		jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
 469	} else {
 470		jsonw_bool_field(json_wtr, "jited", false);
 471	}
 472
 473	memlock = get_fdinfo(fd, "memlock");
 474	if (memlock)
 475		jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
 476	free(memlock);
 477
 478	if (info->nr_map_ids)
 479		show_prog_maps(fd, info->nr_map_ids);
 480
 481	if (info->btf_id)
 482		jsonw_int_field(json_wtr, "btf_id", info->btf_id);
 483
 484	if (!hashmap__empty(prog_table)) {
 485		struct hashmap_entry *entry;
 486
 487		jsonw_name(json_wtr, "pinned");
 488		jsonw_start_array(json_wtr);
 489		hashmap__for_each_key_entry(prog_table, entry, info->id)
 490			jsonw_string(json_wtr, entry->pvalue);
 
 
 491		jsonw_end_array(json_wtr);
 492	}
 493
 494	emit_obj_refs_json(refs_table, info->id, json_wtr);
 495
 496	show_prog_metadata(fd, info->nr_map_ids);
 497
 498	jsonw_end_object(json_wtr);
 499}
 500
 501static void print_prog_header_plain(struct bpf_prog_info *info, int fd)
 502{
 503	const char *prog_type_str;
 504	char prog_name[MAX_PROG_FULL_NAME];
 505
 506	printf("%u: ", info->id);
 507	prog_type_str = libbpf_bpf_prog_type_str(info->type);
 508	if (prog_type_str)
 509		printf("%s  ", prog_type_str);
 510	else
 511		printf("type %u  ", info->type);
 512
 513	if (*info->name) {
 514		get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
 515		printf("name %s  ", prog_name);
 516	}
 517
 518	printf("tag ");
 519	fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
 520	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
 521	printf("%s", info->gpl_compatible ? "  gpl" : "");
 522	if (info->run_time_ns)
 523		printf(" run_time_ns %lld run_cnt %lld",
 524		       info->run_time_ns, info->run_cnt);
 525	if (info->recursion_misses)
 526		printf(" recursion_misses %lld", info->recursion_misses);
 527	printf("\n");
 528}
 529
 530static void print_prog_plain(struct bpf_prog_info *info, int fd)
 531{
 532	char *memlock;
 533
 534	print_prog_header_plain(info, fd);
 535
 536	if (info->load_time) {
 537		char buf[32];
 538
 539		print_boot_time(info->load_time, buf, sizeof(buf));
 540
 541		/* Piggy back on load_time, since 0 uid is a valid one */
 542		printf("\tloaded_at %s  uid %u\n", buf, info->created_by_uid);
 543	}
 544
 545	printf("\txlated %uB", info->xlated_prog_len);
 546
 547	if (info->jited_prog_len)
 548		printf("  jited %uB", info->jited_prog_len);
 549	else
 550		printf("  not jited");
 551
 552	memlock = get_fdinfo(fd, "memlock");
 553	if (memlock)
 554		printf("  memlock %sB", memlock);
 555	free(memlock);
 556
 557	if (info->nr_map_ids)
 558		show_prog_maps(fd, info->nr_map_ids);
 559
 560	if (!hashmap__empty(prog_table)) {
 561		struct hashmap_entry *entry;
 562
 563		hashmap__for_each_key_entry(prog_table, entry, info->id)
 564			printf("\n\tpinned %s", (char *)entry->pvalue);
 
 
 565	}
 566
 567	if (info->btf_id)
 568		printf("\n\tbtf_id %d", info->btf_id);
 569
 570	emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
 571
 572	printf("\n");
 573
 574	show_prog_metadata(fd, info->nr_map_ids);
 575}
 576
 577static int show_prog(int fd)
 578{
 579	struct bpf_prog_info info = {};
 580	__u32 len = sizeof(info);
 581	int err;
 582
 583	err = bpf_obj_get_info_by_fd(fd, &info, &len);
 584	if (err) {
 585		p_err("can't get prog info: %s", strerror(errno));
 586		return -1;
 587	}
 588
 589	if (json_output)
 590		print_prog_json(&info, fd);
 591	else
 592		print_prog_plain(&info, fd);
 593
 594	return 0;
 595}
 596
 597static int do_show_subset(int argc, char **argv)
 598{
 599	int *fds = NULL;
 600	int nb_fds, i;
 601	int err = -1;
 602
 603	fds = malloc(sizeof(int));
 604	if (!fds) {
 605		p_err("mem alloc failed");
 606		return -1;
 607	}
 608	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 609	if (nb_fds < 1)
 610		goto exit_free;
 611
 612	if (json_output && nb_fds > 1)
 613		jsonw_start_array(json_wtr);	/* root array */
 614	for (i = 0; i < nb_fds; i++) {
 615		err = show_prog(fds[i]);
 616		if (err) {
 617			for (; i < nb_fds; i++)
 618				close(fds[i]);
 619			break;
 620		}
 621		close(fds[i]);
 622	}
 623	if (json_output && nb_fds > 1)
 624		jsonw_end_array(json_wtr);	/* root array */
 625
 626exit_free:
 627	free(fds);
 628	return err;
 629}
 630
 631static int do_show(int argc, char **argv)
 632{
 633	__u32 id = 0;
 634	int err;
 635	int fd;
 636
 637	if (show_pinned) {
 638		prog_table = hashmap__new(hash_fn_for_key_as_id,
 639					  equal_fn_for_key_as_id, NULL);
 640		if (IS_ERR(prog_table)) {
 641			p_err("failed to create hashmap for pinned paths");
 642			return -1;
 643		}
 644		build_pinned_obj_table(prog_table, BPF_OBJ_PROG);
 645	}
 646	build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
 647
 648	if (argc == 2)
 649		return do_show_subset(argc, argv);
 650
 651	if (argc)
 652		return BAD_ARG();
 653
 654	if (json_output)
 655		jsonw_start_array(json_wtr);
 656	while (true) {
 657		err = bpf_prog_get_next_id(id, &id);
 658		if (err) {
 659			if (errno == ENOENT) {
 660				err = 0;
 661				break;
 662			}
 663			p_err("can't get next program: %s%s", strerror(errno),
 664			      errno == EINVAL ? " -- kernel too old?" : "");
 665			err = -1;
 666			break;
 667		}
 668
 669		fd = bpf_prog_get_fd_by_id(id);
 670		if (fd < 0) {
 671			if (errno == ENOENT)
 672				continue;
 673			p_err("can't get prog by id (%u): %s",
 674			      id, strerror(errno));
 675			err = -1;
 676			break;
 677		}
 678
 679		err = show_prog(fd);
 680		close(fd);
 681		if (err)
 682			break;
 683	}
 684
 685	if (json_output)
 686		jsonw_end_array(json_wtr);
 687
 688	delete_obj_refs_table(refs_table);
 689
 690	if (show_pinned)
 691		delete_pinned_obj_table(prog_table);
 692
 693	return err;
 694}
 695
 696static int
 697prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
 698	  char *filepath, bool opcodes, bool visual, bool linum)
 699{
 700	struct bpf_prog_linfo *prog_linfo = NULL;
 701	const char *disasm_opt = NULL;
 702	struct dump_data dd = {};
 703	void *func_info = NULL;
 704	struct btf *btf = NULL;
 705	char func_sig[1024];
 706	unsigned char *buf;
 707	__u32 member_len;
 708	int fd, err = -1;
 709	ssize_t n;
 
 710
 711	if (mode == DUMP_JITED) {
 712		if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
 713			p_info("no instructions returned");
 714			return -1;
 715		}
 716		buf = u64_to_ptr(info->jited_prog_insns);
 717		member_len = info->jited_prog_len;
 718	} else {	/* DUMP_XLATED */
 719		if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
 720			p_err("error retrieving insn dump: kernel.kptr_restrict set?");
 721			return -1;
 722		}
 723		buf = u64_to_ptr(info->xlated_prog_insns);
 724		member_len = info->xlated_prog_len;
 725	}
 726
 727	if (info->btf_id) {
 728		btf = btf__load_from_kernel_by_id(info->btf_id);
 729		if (!btf) {
 730			p_err("failed to get btf");
 731			return -1;
 732		}
 733	}
 734
 735	func_info = u64_to_ptr(info->func_info);
 736
 737	if (info->nr_line_info) {
 738		prog_linfo = bpf_prog_linfo__new(info);
 739		if (!prog_linfo)
 740			p_info("error in processing bpf_line_info.  continue without it.");
 741	}
 742
 743	if (filepath) {
 744		fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 745		if (fd < 0) {
 746			p_err("can't open file %s: %s", filepath,
 747			      strerror(errno));
 748			goto exit_free;
 749		}
 750
 751		n = write(fd, buf, member_len);
 752		close(fd);
 753		if (n != (ssize_t)member_len) {
 754			p_err("error writing output file: %s",
 755			      n < 0 ? strerror(errno) : "short write");
 756			goto exit_free;
 757		}
 758
 759		if (json_output)
 760			jsonw_null(json_wtr);
 761	} else if (mode == DUMP_JITED) {
 762		const char *name = NULL;
 763
 764		if (info->ifindex) {
 765			name = ifindex_to_arch(info->ifindex, info->netns_dev,
 766					       info->netns_ino, &disasm_opt);
 
 
 767			if (!name)
 768				goto exit_free;
 769		}
 770
 771		if (info->nr_jited_func_lens && info->jited_func_lens) {
 772			struct kernel_sym *sym = NULL;
 773			struct bpf_func_info *record;
 774			char sym_name[SYM_MAX_NAME];
 775			unsigned char *img = buf;
 776			__u64 *ksyms = NULL;
 777			__u32 *lens;
 778			__u32 i;
 779			if (info->nr_jited_ksyms) {
 780				kernel_syms_load(&dd);
 781				ksyms = u64_to_ptr(info->jited_ksyms);
 782			}
 783
 784			if (json_output)
 785				jsonw_start_array(json_wtr);
 786
 787			lens = u64_to_ptr(info->jited_func_lens);
 788			for (i = 0; i < info->nr_jited_func_lens; i++) {
 789				if (ksyms) {
 790					sym = kernel_syms_search(&dd, ksyms[i]);
 791					if (sym)
 792						sprintf(sym_name, "%s", sym->name);
 793					else
 794						sprintf(sym_name, "0x%016llx", ksyms[i]);
 795				} else {
 796					strcpy(sym_name, "unknown");
 797				}
 798
 799				if (func_info) {
 800					record = func_info + i * info->func_info_rec_size;
 801					btf_dumper_type_only(btf, record->type_id,
 802							     func_sig,
 803							     sizeof(func_sig));
 804				}
 805
 806				if (json_output) {
 807					jsonw_start_object(json_wtr);
 808					if (func_info && func_sig[0] != '\0') {
 809						jsonw_name(json_wtr, "proto");
 810						jsonw_string(json_wtr, func_sig);
 811					}
 812					jsonw_name(json_wtr, "name");
 813					jsonw_string(json_wtr, sym_name);
 814					jsonw_name(json_wtr, "insns");
 815				} else {
 816					if (func_info && func_sig[0] != '\0')
 817						printf("%s:\n", func_sig);
 818					printf("%s:\n", sym_name);
 819				}
 820
 821				if (disasm_print_insn(img, lens[i], opcodes,
 822						      name, disasm_opt, btf,
 823						      prog_linfo, ksyms[i], i,
 824						      linum))
 825					goto exit_free;
 826
 827				img += lens[i];
 828
 829				if (json_output)
 830					jsonw_end_object(json_wtr);
 831				else
 832					printf("\n");
 833			}
 834
 835			if (json_output)
 836				jsonw_end_array(json_wtr);
 837		} else {
 838			if (disasm_print_insn(buf, member_len, opcodes, name,
 839					      disasm_opt, btf, NULL, 0, 0,
 840					      false))
 841				goto exit_free;
 842		}
 843	} else if (visual) {
 844		if (json_output)
 845			jsonw_null(json_wtr);
 846		else
 847			dump_xlated_cfg(buf, member_len);
 848	} else {
 849		kernel_syms_load(&dd);
 850		dd.nr_jited_ksyms = info->nr_jited_ksyms;
 851		dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
 852		dd.btf = btf;
 853		dd.func_info = func_info;
 854		dd.finfo_rec_size = info->func_info_rec_size;
 855		dd.prog_linfo = prog_linfo;
 856
 857		if (json_output)
 858			dump_xlated_json(&dd, buf, member_len, opcodes,
 859					 linum);
 860		else
 861			dump_xlated_plain(&dd, buf, member_len, opcodes,
 862					  linum);
 863		kernel_syms_destroy(&dd);
 864	}
 865
 866	err = 0;
 867
 868exit_free:
 869	btf__free(btf);
 870	bpf_prog_linfo__free(prog_linfo);
 871	return err;
 872}
 873
 874static int do_dump(int argc, char **argv)
 875{
 876	struct bpf_prog_info info;
 877	__u32 info_len = sizeof(info);
 878	size_t info_data_sz = 0;
 879	void *info_data = NULL;
 880	char *filepath = NULL;
 881	bool opcodes = false;
 882	bool visual = false;
 883	enum dump_mode mode;
 884	bool linum = false;
 
 885	int nb_fds, i = 0;
 886	int *fds = NULL;
 887	int err = -1;
 
 888
 889	if (is_prefix(*argv, "jited")) {
 890		if (disasm_init())
 891			return -1;
 892		mode = DUMP_JITED;
 893	} else if (is_prefix(*argv, "xlated")) {
 894		mode = DUMP_XLATED;
 895	} else {
 896		p_err("expected 'xlated' or 'jited', got: %s", *argv);
 897		return -1;
 898	}
 899	NEXT_ARG();
 900
 901	if (argc < 2)
 902		usage();
 903
 904	fds = malloc(sizeof(int));
 905	if (!fds) {
 906		p_err("mem alloc failed");
 907		return -1;
 908	}
 909	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 910	if (nb_fds < 1)
 911		goto exit_free;
 912
 913	if (is_prefix(*argv, "file")) {
 914		NEXT_ARG();
 915		if (!argc) {
 916			p_err("expected file path");
 917			goto exit_close;
 918		}
 919		if (nb_fds > 1) {
 920			p_err("several programs matched");
 921			goto exit_close;
 922		}
 923
 924		filepath = *argv;
 925		NEXT_ARG();
 926	} else if (is_prefix(*argv, "opcodes")) {
 927		opcodes = true;
 928		NEXT_ARG();
 929	} else if (is_prefix(*argv, "visual")) {
 930		if (nb_fds > 1) {
 931			p_err("several programs matched");
 932			goto exit_close;
 933		}
 934
 935		visual = true;
 936		NEXT_ARG();
 937	} else if (is_prefix(*argv, "linum")) {
 938		linum = true;
 939		NEXT_ARG();
 940	}
 941
 942	if (argc) {
 943		usage();
 944		goto exit_close;
 945	}
 946
 
 
 
 
 
 
 
 
 
 
 
 947	if (json_output && nb_fds > 1)
 948		jsonw_start_array(json_wtr);	/* root array */
 949	for (i = 0; i < nb_fds; i++) {
 950		memset(&info, 0, sizeof(info));
 951
 952		err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
 953		if (err) {
 954			p_err("can't get prog info: %s", strerror(errno));
 955			break;
 956		}
 957
 958		err = prep_prog_info(&info, mode, &info_data, &info_data_sz);
 959		if (err) {
 960			p_err("can't grow prog info_data");
 961			break;
 962		}
 963
 964		err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
 965		if (err) {
 966			p_err("can't get prog info: %s", strerror(errno));
 967			break;
 968		}
 969
 970		if (json_output && nb_fds > 1) {
 971			jsonw_start_object(json_wtr);	/* prog object */
 972			print_prog_header_json(&info, fds[i]);
 973			jsonw_name(json_wtr, "insns");
 974		} else if (nb_fds > 1) {
 975			print_prog_header_plain(&info, fds[i]);
 976		}
 977
 978		err = prog_dump(&info, mode, filepath, opcodes, visual, linum);
 
 979
 980		if (json_output && nb_fds > 1)
 981			jsonw_end_object(json_wtr);	/* prog object */
 982		else if (i != nb_fds - 1 && nb_fds > 1)
 983			printf("\n");
 984
 
 985		if (err)
 986			break;
 987		close(fds[i]);
 988	}
 989	if (json_output && nb_fds > 1)
 990		jsonw_end_array(json_wtr);	/* root array */
 991
 992exit_close:
 993	for (; i < nb_fds; i++)
 994		close(fds[i]);
 995exit_free:
 996	free(info_data);
 997	free(fds);
 998	return err;
 999}
1000
1001static int do_pin(int argc, char **argv)
1002{
1003	int err;
1004
1005	err = do_pin_any(argc, argv, prog_parse_fd);
1006	if (!err && json_output)
1007		jsonw_null(json_wtr);
1008	return err;
1009}
1010
1011struct map_replace {
1012	int idx;
1013	int fd;
1014	char *name;
1015};
1016
1017static int map_replace_compar(const void *p1, const void *p2)
1018{
1019	const struct map_replace *a = p1, *b = p2;
1020
1021	return a->idx - b->idx;
1022}
1023
1024static int parse_attach_detach_args(int argc, char **argv, int *progfd,
1025				    enum bpf_attach_type *attach_type,
1026				    int *mapfd)
1027{
1028	if (!REQ_ARGS(3))
1029		return -EINVAL;
1030
1031	*progfd = prog_parse_fd(&argc, &argv);
1032	if (*progfd < 0)
1033		return *progfd;
1034
1035	*attach_type = parse_attach_type(*argv);
1036	if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
1037		p_err("invalid attach/detach type");
1038		return -EINVAL;
1039	}
1040
1041	if (*attach_type == BPF_FLOW_DISSECTOR) {
1042		*mapfd = 0;
1043		return 0;
1044	}
1045
1046	NEXT_ARG();
1047	if (!REQ_ARGS(2))
1048		return -EINVAL;
1049
1050	*mapfd = map_parse_fd(&argc, &argv);
1051	if (*mapfd < 0)
1052		return *mapfd;
1053
1054	return 0;
1055}
1056
1057static int do_attach(int argc, char **argv)
1058{
1059	enum bpf_attach_type attach_type;
1060	int err, progfd;
1061	int mapfd;
1062
1063	err = parse_attach_detach_args(argc, argv,
1064				       &progfd, &attach_type, &mapfd);
1065	if (err)
1066		return err;
1067
1068	err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
1069	if (err) {
1070		p_err("failed prog attach to map");
1071		return -EINVAL;
1072	}
1073
1074	if (json_output)
1075		jsonw_null(json_wtr);
1076	return 0;
1077}
1078
1079static int do_detach(int argc, char **argv)
1080{
1081	enum bpf_attach_type attach_type;
1082	int err, progfd;
1083	int mapfd;
1084
1085	err = parse_attach_detach_args(argc, argv,
1086				       &progfd, &attach_type, &mapfd);
1087	if (err)
1088		return err;
1089
1090	err = bpf_prog_detach2(progfd, mapfd, attach_type);
1091	if (err) {
1092		p_err("failed prog detach from map");
1093		return -EINVAL;
1094	}
1095
1096	if (json_output)
1097		jsonw_null(json_wtr);
1098	return 0;
1099}
1100
1101static int check_single_stdin(char *file_data_in, char *file_ctx_in)
1102{
1103	if (file_data_in && file_ctx_in &&
1104	    !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
1105		p_err("cannot use standard input for both data_in and ctx_in");
1106		return -1;
1107	}
1108
1109	return 0;
1110}
1111
1112static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
1113{
1114	size_t block_size = 256;
1115	size_t buf_size = block_size;
1116	size_t nb_read = 0;
1117	void *tmp;
1118	FILE *f;
1119
1120	if (!fname) {
1121		*data_ptr = NULL;
1122		*size = 0;
1123		return 0;
1124	}
1125
1126	if (!strcmp(fname, "-"))
1127		f = stdin;
1128	else
1129		f = fopen(fname, "r");
1130	if (!f) {
1131		p_err("failed to open %s: %s", fname, strerror(errno));
1132		return -1;
1133	}
1134
1135	*data_ptr = malloc(block_size);
1136	if (!*data_ptr) {
1137		p_err("failed to allocate memory for data_in/ctx_in: %s",
1138		      strerror(errno));
1139		goto err_fclose;
1140	}
1141
1142	while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
1143		if (feof(f))
1144			break;
1145		if (ferror(f)) {
1146			p_err("failed to read data_in/ctx_in from %s: %s",
1147			      fname, strerror(errno));
1148			goto err_free;
1149		}
1150		if (nb_read > buf_size - block_size) {
1151			if (buf_size == UINT32_MAX) {
1152				p_err("data_in/ctx_in is too long (max: %d)",
1153				      UINT32_MAX);
1154				goto err_free;
1155			}
1156			/* No space for fread()-ing next chunk; realloc() */
1157			buf_size *= 2;
1158			tmp = realloc(*data_ptr, buf_size);
1159			if (!tmp) {
1160				p_err("failed to reallocate data_in/ctx_in: %s",
1161				      strerror(errno));
1162				goto err_free;
1163			}
1164			*data_ptr = tmp;
1165		}
1166	}
1167	if (f != stdin)
1168		fclose(f);
1169
1170	*size = nb_read;
1171	return 0;
1172
1173err_free:
1174	free(*data_ptr);
1175	*data_ptr = NULL;
1176err_fclose:
1177	if (f != stdin)
1178		fclose(f);
1179	return -1;
1180}
1181
1182static void hex_print(void *data, unsigned int size, FILE *f)
1183{
1184	size_t i, j;
1185	char c;
1186
1187	for (i = 0; i < size; i += 16) {
1188		/* Row offset */
1189		fprintf(f, "%07zx\t", i);
1190
1191		/* Hexadecimal values */
1192		for (j = i; j < i + 16 && j < size; j++)
1193			fprintf(f, "%02x%s", *(uint8_t *)(data + j),
1194				j % 2 ? " " : "");
1195		for (; j < i + 16; j++)
1196			fprintf(f, "  %s", j % 2 ? " " : "");
1197
1198		/* ASCII values (if relevant), '.' otherwise */
1199		fprintf(f, "| ");
1200		for (j = i; j < i + 16 && j < size; j++) {
1201			c = *(char *)(data + j);
1202			if (c < ' ' || c > '~')
1203				c = '.';
1204			fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
1205		}
1206
1207		fprintf(f, "\n");
1208	}
1209}
1210
1211static int
1212print_run_output(void *data, unsigned int size, const char *fname,
1213		 const char *json_key)
1214{
1215	size_t nb_written;
1216	FILE *f;
1217
1218	if (!fname)
1219		return 0;
1220
1221	if (!strcmp(fname, "-")) {
1222		f = stdout;
1223		if (json_output) {
1224			jsonw_name(json_wtr, json_key);
1225			print_data_json(data, size);
1226		} else {
1227			hex_print(data, size, f);
1228		}
1229		return 0;
1230	}
1231
1232	f = fopen(fname, "w");
1233	if (!f) {
1234		p_err("failed to open %s: %s", fname, strerror(errno));
1235		return -1;
1236	}
1237
1238	nb_written = fwrite(data, 1, size, f);
1239	fclose(f);
1240	if (nb_written != size) {
1241		p_err("failed to write output data/ctx: %s", strerror(errno));
1242		return -1;
1243	}
1244
1245	return 0;
1246}
1247
1248static int alloc_run_data(void **data_ptr, unsigned int size_out)
1249{
1250	*data_ptr = calloc(size_out, 1);
1251	if (!*data_ptr) {
1252		p_err("failed to allocate memory for output data/ctx: %s",
1253		      strerror(errno));
1254		return -1;
1255	}
1256
1257	return 0;
1258}
1259
1260static int do_run(int argc, char **argv)
1261{
1262	char *data_fname_in = NULL, *data_fname_out = NULL;
1263	char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
 
1264	const unsigned int default_size = SZ_32K;
1265	void *data_in = NULL, *data_out = NULL;
1266	void *ctx_in = NULL, *ctx_out = NULL;
1267	unsigned int repeat = 1;
1268	int fd, err;
1269	LIBBPF_OPTS(bpf_test_run_opts, test_attr);
1270
1271	if (!REQ_ARGS(4))
1272		return -1;
1273
1274	fd = prog_parse_fd(&argc, &argv);
1275	if (fd < 0)
1276		return -1;
1277
1278	while (argc) {
1279		if (detect_common_prefix(*argv, "data_in", "data_out",
1280					 "data_size_out", NULL))
1281			return -1;
1282		if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
1283					 "ctx_size_out", NULL))
1284			return -1;
1285
1286		if (is_prefix(*argv, "data_in")) {
1287			NEXT_ARG();
1288			if (!REQ_ARGS(1))
1289				return -1;
1290
1291			data_fname_in = GET_ARG();
1292			if (check_single_stdin(data_fname_in, ctx_fname_in))
1293				return -1;
1294		} else if (is_prefix(*argv, "data_out")) {
1295			NEXT_ARG();
1296			if (!REQ_ARGS(1))
1297				return -1;
1298
1299			data_fname_out = GET_ARG();
1300		} else if (is_prefix(*argv, "data_size_out")) {
1301			char *endptr;
1302
1303			NEXT_ARG();
1304			if (!REQ_ARGS(1))
1305				return -1;
1306
1307			test_attr.data_size_out = strtoul(*argv, &endptr, 0);
1308			if (*endptr) {
1309				p_err("can't parse %s as output data size",
1310				      *argv);
1311				return -1;
1312			}
1313			NEXT_ARG();
1314		} else if (is_prefix(*argv, "ctx_in")) {
1315			NEXT_ARG();
1316			if (!REQ_ARGS(1))
1317				return -1;
1318
1319			ctx_fname_in = GET_ARG();
1320			if (check_single_stdin(data_fname_in, ctx_fname_in))
1321				return -1;
1322		} else if (is_prefix(*argv, "ctx_out")) {
1323			NEXT_ARG();
1324			if (!REQ_ARGS(1))
1325				return -1;
1326
1327			ctx_fname_out = GET_ARG();
1328		} else if (is_prefix(*argv, "ctx_size_out")) {
1329			char *endptr;
1330
1331			NEXT_ARG();
1332			if (!REQ_ARGS(1))
1333				return -1;
1334
1335			test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
1336			if (*endptr) {
1337				p_err("can't parse %s as output context size",
1338				      *argv);
1339				return -1;
1340			}
1341			NEXT_ARG();
1342		} else if (is_prefix(*argv, "repeat")) {
1343			char *endptr;
1344
1345			NEXT_ARG();
1346			if (!REQ_ARGS(1))
1347				return -1;
1348
1349			repeat = strtoul(*argv, &endptr, 0);
1350			if (*endptr) {
1351				p_err("can't parse %s as repeat number",
1352				      *argv);
1353				return -1;
1354			}
1355			NEXT_ARG();
1356		} else {
1357			p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1358			      *argv);
1359			return -1;
1360		}
1361	}
1362
1363	err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
1364	if (err)
1365		return -1;
1366
1367	if (data_in) {
1368		if (!test_attr.data_size_out)
1369			test_attr.data_size_out = default_size;
1370		err = alloc_run_data(&data_out, test_attr.data_size_out);
1371		if (err)
1372			goto free_data_in;
1373	}
1374
1375	err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
1376	if (err)
1377		goto free_data_out;
1378
1379	if (ctx_in) {
1380		if (!test_attr.ctx_size_out)
1381			test_attr.ctx_size_out = default_size;
1382		err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
1383		if (err)
1384			goto free_ctx_in;
1385	}
1386
 
1387	test_attr.repeat	= repeat;
1388	test_attr.data_in	= data_in;
1389	test_attr.data_out	= data_out;
1390	test_attr.ctx_in	= ctx_in;
1391	test_attr.ctx_out	= ctx_out;
1392
1393	err = bpf_prog_test_run_opts(fd, &test_attr);
1394	if (err) {
1395		p_err("failed to run program: %s", strerror(errno));
1396		goto free_ctx_out;
1397	}
1398
1399	err = 0;
1400
1401	if (json_output)
1402		jsonw_start_object(json_wtr);	/* root */
1403
1404	/* Do not exit on errors occurring when printing output data/context,
1405	 * we still want to print return value and duration for program run.
1406	 */
1407	if (test_attr.data_size_out)
1408		err += print_run_output(test_attr.data_out,
1409					test_attr.data_size_out,
1410					data_fname_out, "data_out");
1411	if (test_attr.ctx_size_out)
1412		err += print_run_output(test_attr.ctx_out,
1413					test_attr.ctx_size_out,
1414					ctx_fname_out, "ctx_out");
1415
1416	if (json_output) {
1417		jsonw_uint_field(json_wtr, "retval", test_attr.retval);
1418		jsonw_uint_field(json_wtr, "duration", test_attr.duration);
1419		jsonw_end_object(json_wtr);	/* root */
1420	} else {
1421		fprintf(stdout, "Return value: %u, duration%s: %uns\n",
1422			test_attr.retval,
1423			repeat > 1 ? " (average)" : "", test_attr.duration);
1424	}
1425
1426free_ctx_out:
1427	free(ctx_out);
1428free_ctx_in:
1429	free(ctx_in);
1430free_data_out:
1431	free(data_out);
1432free_data_in:
1433	free(data_in);
1434
1435	return err;
1436}
1437
1438static int
1439get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
1440		      enum bpf_attach_type *expected_attach_type)
1441{
1442	libbpf_print_fn_t print_backup;
1443	int ret;
1444
1445	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1446	if (!ret)
1447		return ret;
1448
1449	/* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1450	print_backup = libbpf_set_print(print_all_levels);
1451	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1452	libbpf_set_print(print_backup);
1453
1454	return ret;
1455}
1456
1457static int
1458auto_attach_program(struct bpf_program *prog, const char *path)
1459{
1460	struct bpf_link *link;
1461	int err;
1462
1463	link = bpf_program__attach(prog);
1464	if (!link) {
1465		p_info("Program %s does not support autoattach, falling back to pinning",
1466		       bpf_program__name(prog));
1467		return bpf_obj_pin(bpf_program__fd(prog), path);
1468	}
1469
1470	err = bpf_link__pin(link, path);
1471	bpf_link__destroy(link);
1472	return err;
1473}
1474
1475static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
1476{
1477	int len;
1478
1479	len = snprintf(buf, buf_sz, "%s/%s", path, name);
1480	if (len < 0)
1481		return -EINVAL;
1482	if ((size_t)len >= buf_sz)
1483		return -ENAMETOOLONG;
1484
1485	return 0;
1486}
1487
1488static int
1489auto_attach_programs(struct bpf_object *obj, const char *path)
1490{
1491	struct bpf_program *prog;
1492	char buf[PATH_MAX];
1493	int err;
1494
1495	bpf_object__for_each_program(prog, obj) {
1496		err = pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog));
1497		if (err)
1498			goto err_unpin_programs;
1499
1500		err = auto_attach_program(prog, buf);
1501		if (err)
1502			goto err_unpin_programs;
1503	}
1504
1505	return 0;
1506
1507err_unpin_programs:
1508	while ((prog = bpf_object__prev_program(obj, prog))) {
1509		if (pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog)))
1510			continue;
1511
1512		bpf_program__unpin(prog, buf);
1513	}
1514
1515	return err;
1516}
1517
1518static int load_with_options(int argc, char **argv, bool first_prog_only)
1519{
1520	enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
1521	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
1522		.relaxed_maps = relaxed_maps,
1523	);
 
1524	enum bpf_attach_type expected_attach_type;
1525	struct map_replace *map_replace = NULL;
1526	struct bpf_program *prog = NULL, *pos;
1527	unsigned int old_map_fds = 0;
1528	const char *pinmaps = NULL;
1529	bool auto_attach = false;
1530	struct bpf_object *obj;
1531	struct bpf_map *map;
1532	const char *pinfile;
1533	unsigned int i, j;
1534	__u32 ifindex = 0;
1535	const char *file;
1536	int idx, err;
1537
1538
1539	if (!REQ_ARGS(2))
1540		return -1;
1541	file = GET_ARG();
1542	pinfile = GET_ARG();
1543
1544	while (argc) {
1545		if (is_prefix(*argv, "type")) {
 
 
1546			NEXT_ARG();
1547
1548			if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
1549				p_err("program type already specified");
1550				goto err_free_reuse_maps;
1551			}
1552			if (!REQ_ARGS(1))
1553				goto err_free_reuse_maps;
1554
1555			err = libbpf_prog_type_by_name(*argv, &common_prog_type,
1556						       &expected_attach_type);
1557			if (err < 0) {
1558				/* Put a '/' at the end of type to appease libbpf */
1559				char *type = malloc(strlen(*argv) + 2);
 
 
 
 
1560
1561				if (!type) {
1562					p_err("mem alloc failed");
1563					goto err_free_reuse_maps;
1564				}
1565				*type = 0;
1566				strcat(type, *argv);
1567				strcat(type, "/");
1568
1569				err = get_prog_type_by_name(type, &common_prog_type,
1570							    &expected_attach_type);
1571				free(type);
1572				if (err < 0)
1573					goto err_free_reuse_maps;
1574			}
1575
1576			NEXT_ARG();
1577		} else if (is_prefix(*argv, "map")) {
1578			void *new_map_replace;
1579			char *endptr, *name;
1580			int fd;
1581
1582			NEXT_ARG();
1583
1584			if (!REQ_ARGS(4))
1585				goto err_free_reuse_maps;
1586
1587			if (is_prefix(*argv, "idx")) {
1588				NEXT_ARG();
1589
1590				idx = strtoul(*argv, &endptr, 0);
1591				if (*endptr) {
1592					p_err("can't parse %s as IDX", *argv);
1593					goto err_free_reuse_maps;
1594				}
1595				name = NULL;
1596			} else if (is_prefix(*argv, "name")) {
1597				NEXT_ARG();
1598
1599				name = *argv;
1600				idx = -1;
1601			} else {
1602				p_err("expected 'idx' or 'name', got: '%s'?",
1603				      *argv);
1604				goto err_free_reuse_maps;
1605			}
1606			NEXT_ARG();
1607
1608			fd = map_parse_fd(&argc, &argv);
1609			if (fd < 0)
1610				goto err_free_reuse_maps;
1611
1612			new_map_replace = libbpf_reallocarray(map_replace,
1613							      old_map_fds + 1,
1614							      sizeof(*map_replace));
1615			if (!new_map_replace) {
1616				p_err("mem alloc failed");
1617				goto err_free_reuse_maps;
1618			}
1619			map_replace = new_map_replace;
1620
1621			map_replace[old_map_fds].idx = idx;
1622			map_replace[old_map_fds].name = name;
1623			map_replace[old_map_fds].fd = fd;
1624			old_map_fds++;
1625		} else if (is_prefix(*argv, "dev")) {
1626			NEXT_ARG();
1627
1628			if (ifindex) {
1629				p_err("offload device already specified");
1630				goto err_free_reuse_maps;
1631			}
1632			if (!REQ_ARGS(1))
1633				goto err_free_reuse_maps;
1634
1635			ifindex = if_nametoindex(*argv);
1636			if (!ifindex) {
1637				p_err("unrecognized netdevice '%s': %s",
1638				      *argv, strerror(errno));
1639				goto err_free_reuse_maps;
1640			}
1641			NEXT_ARG();
1642		} else if (is_prefix(*argv, "pinmaps")) {
1643			NEXT_ARG();
1644
1645			if (!REQ_ARGS(1))
1646				goto err_free_reuse_maps;
1647
1648			pinmaps = GET_ARG();
1649		} else if (is_prefix(*argv, "autoattach")) {
1650			auto_attach = true;
1651			NEXT_ARG();
1652		} else {
1653			p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1654			      *argv);
1655			goto err_free_reuse_maps;
1656		}
1657	}
1658
1659	set_max_rlimit();
1660
1661	if (verifier_logs)
1662		/* log_level1 + log_level2 + stats, but not stable UAPI */
1663		open_opts.kernel_log_level = 1 + 2 + 4;
1664
1665	obj = bpf_object__open_file(file, &open_opts);
1666	if (!obj) {
1667		p_err("failed to open object file");
1668		goto err_free_reuse_maps;
1669	}
1670
1671	bpf_object__for_each_program(pos, obj) {
1672		enum bpf_prog_type prog_type = common_prog_type;
1673
1674		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
1675			const char *sec_name = bpf_program__section_name(pos);
1676
1677			err = get_prog_type_by_name(sec_name, &prog_type,
1678						    &expected_attach_type);
1679			if (err < 0)
1680				goto err_close_obj;
1681		}
1682
1683		bpf_program__set_ifindex(pos, ifindex);
1684		bpf_program__set_type(pos, prog_type);
1685		bpf_program__set_expected_attach_type(pos, expected_attach_type);
1686	}
1687
1688	qsort(map_replace, old_map_fds, sizeof(*map_replace),
1689	      map_replace_compar);
1690
1691	/* After the sort maps by name will be first on the list, because they
1692	 * have idx == -1.  Resolve them.
1693	 */
1694	j = 0;
1695	while (j < old_map_fds && map_replace[j].name) {
1696		i = 0;
1697		bpf_object__for_each_map(map, obj) {
1698			if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
1699				map_replace[j].idx = i;
1700				break;
1701			}
1702			i++;
1703		}
1704		if (map_replace[j].idx == -1) {
1705			p_err("unable to find map '%s'", map_replace[j].name);
1706			goto err_close_obj;
1707		}
1708		j++;
1709	}
1710	/* Resort if any names were resolved */
1711	if (j)
1712		qsort(map_replace, old_map_fds, sizeof(*map_replace),
1713		      map_replace_compar);
1714
1715	/* Set ifindex and name reuse */
1716	j = 0;
1717	idx = 0;
1718	bpf_object__for_each_map(map, obj) {
1719		if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
1720			bpf_map__set_ifindex(map, ifindex);
1721
1722		if (j < old_map_fds && idx == map_replace[j].idx) {
1723			err = bpf_map__reuse_fd(map, map_replace[j++].fd);
1724			if (err) {
1725				p_err("unable to set up map reuse: %d", err);
1726				goto err_close_obj;
1727			}
1728
1729			/* Next reuse wants to apply to the same map */
1730			if (j < old_map_fds && map_replace[j].idx == idx) {
1731				p_err("replacement for map idx %d specified more than once",
1732				      idx);
1733				goto err_close_obj;
1734			}
1735		}
1736
1737		idx++;
1738	}
1739	if (j < old_map_fds) {
1740		p_err("map idx '%d' not used", map_replace[j].idx);
1741		goto err_close_obj;
1742	}
1743
1744	err = bpf_object__load(obj);
 
 
 
 
 
1745	if (err) {
1746		p_err("failed to load object file");
1747		goto err_close_obj;
1748	}
1749
1750	err = mount_bpffs_for_pin(pinfile);
1751	if (err)
1752		goto err_close_obj;
1753
1754	if (first_prog_only) {
1755		prog = bpf_object__next_program(obj, NULL);
1756		if (!prog) {
1757			p_err("object file doesn't contain any bpf program");
1758			goto err_close_obj;
1759		}
1760
1761		if (auto_attach)
1762			err = auto_attach_program(prog, pinfile);
1763		else
1764			err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
1765		if (err) {
1766			p_err("failed to pin program %s",
1767			      bpf_program__section_name(prog));
1768			goto err_close_obj;
1769		}
1770	} else {
1771		if (auto_attach)
1772			err = auto_attach_programs(obj, pinfile);
1773		else
1774			err = bpf_object__pin_programs(obj, pinfile);
1775		if (err) {
1776			p_err("failed to pin all programs");
1777			goto err_close_obj;
1778		}
1779	}
1780
1781	if (pinmaps) {
1782		err = bpf_object__pin_maps(obj, pinmaps);
1783		if (err) {
1784			p_err("failed to pin all maps");
1785			goto err_unpin;
1786		}
1787	}
1788
1789	if (json_output)
1790		jsonw_null(json_wtr);
1791
1792	bpf_object__close(obj);
1793	for (i = 0; i < old_map_fds; i++)
1794		close(map_replace[i].fd);
1795	free(map_replace);
1796
1797	return 0;
1798
1799err_unpin:
1800	if (first_prog_only)
1801		unlink(pinfile);
1802	else
1803		bpf_object__unpin_programs(obj, pinfile);
1804err_close_obj:
1805	bpf_object__close(obj);
1806err_free_reuse_maps:
1807	for (i = 0; i < old_map_fds; i++)
1808		close(map_replace[i].fd);
1809	free(map_replace);
1810	return -1;
1811}
1812
1813static int count_open_fds(void)
1814{
1815	DIR *dp = opendir("/proc/self/fd");
1816	struct dirent *de;
1817	int cnt = -3;
1818
1819	if (!dp)
1820		return -1;
1821
1822	while ((de = readdir(dp)))
1823		cnt++;
1824
1825	closedir(dp);
1826	return cnt;
1827}
1828
1829static int try_loader(struct gen_loader_opts *gen)
1830{
1831	struct bpf_load_and_run_opts opts = {};
1832	struct bpf_loader_ctx *ctx;
1833	int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
1834					     sizeof(struct bpf_prog_desc));
1835	int log_buf_sz = (1u << 24) - 1;
1836	int err, fds_before, fd_delta;
1837	char *log_buf = NULL;
1838
1839	ctx = alloca(ctx_sz);
1840	memset(ctx, 0, ctx_sz);
1841	ctx->sz = ctx_sz;
1842	if (verifier_logs) {
1843		ctx->log_level = 1 + 2 + 4;
1844		ctx->log_size = log_buf_sz;
1845		log_buf = malloc(log_buf_sz);
1846		if (!log_buf)
1847			return -ENOMEM;
1848		ctx->log_buf = (long) log_buf;
1849	}
1850	opts.ctx = ctx;
1851	opts.data = gen->data;
1852	opts.data_sz = gen->data_sz;
1853	opts.insns = gen->insns;
1854	opts.insns_sz = gen->insns_sz;
1855	fds_before = count_open_fds();
1856	err = bpf_load_and_run(&opts);
1857	fd_delta = count_open_fds() - fds_before;
1858	if (err < 0 || verifier_logs) {
1859		fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
1860		if (fd_delta && err < 0)
1861			fprintf(stderr, "loader prog leaked %d FDs\n",
1862				fd_delta);
1863	}
1864	free(log_buf);
1865	return err;
1866}
1867
1868static int do_loader(int argc, char **argv)
1869{
1870	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
1871	DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
 
1872	struct bpf_object *obj;
1873	const char *file;
1874	int err = 0;
1875
1876	if (!REQ_ARGS(1))
1877		return -1;
1878	file = GET_ARG();
1879
1880	if (verifier_logs)
1881		/* log_level1 + log_level2 + stats, but not stable UAPI */
1882		open_opts.kernel_log_level = 1 + 2 + 4;
1883
1884	obj = bpf_object__open_file(file, &open_opts);
1885	if (!obj) {
1886		p_err("failed to open object file");
1887		goto err_close_obj;
1888	}
1889
1890	err = bpf_object__gen_loader(obj, &gen);
1891	if (err)
1892		goto err_close_obj;
1893
1894	err = bpf_object__load(obj);
 
 
 
 
 
1895	if (err) {
1896		p_err("failed to load object file");
1897		goto err_close_obj;
1898	}
1899
1900	if (verifier_logs) {
1901		struct dump_data dd = {};
1902
1903		kernel_syms_load(&dd);
1904		dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
1905		kernel_syms_destroy(&dd);
1906	}
1907	err = try_loader(&gen);
1908err_close_obj:
1909	bpf_object__close(obj);
1910	return err;
1911}
1912
1913static int do_load(int argc, char **argv)
1914{
1915	if (use_loader)
1916		return do_loader(argc, argv);
1917	return load_with_options(argc, argv, true);
1918}
1919
1920static int do_loadall(int argc, char **argv)
1921{
1922	return load_with_options(argc, argv, false);
1923}
1924
1925#ifdef BPFTOOL_WITHOUT_SKELETONS
1926
1927static int do_profile(int argc, char **argv)
1928{
1929	p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1930	return 0;
1931}
1932
1933#else /* BPFTOOL_WITHOUT_SKELETONS */
1934
1935#include "profiler.skel.h"
1936
1937struct profile_metric {
1938	const char *name;
1939	struct bpf_perf_event_value val;
1940	struct perf_event_attr attr;
1941	bool selected;
1942
1943	/* calculate ratios like instructions per cycle */
1944	const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
1945	const char *ratio_desc;
1946	const float ratio_mul;
1947} metrics[] = {
1948	{
1949		.name = "cycles",
1950		.attr = {
1951			.type = PERF_TYPE_HARDWARE,
1952			.config = PERF_COUNT_HW_CPU_CYCLES,
1953			.exclude_user = 1,
1954		},
1955	},
1956	{
1957		.name = "instructions",
1958		.attr = {
1959			.type = PERF_TYPE_HARDWARE,
1960			.config = PERF_COUNT_HW_INSTRUCTIONS,
1961			.exclude_user = 1,
1962		},
1963		.ratio_metric = 1,
1964		.ratio_desc = "insns per cycle",
1965		.ratio_mul = 1.0,
1966	},
1967	{
1968		.name = "l1d_loads",
1969		.attr = {
1970			.type = PERF_TYPE_HW_CACHE,
1971			.config =
1972				PERF_COUNT_HW_CACHE_L1D |
1973				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1974				(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
1975			.exclude_user = 1,
1976		},
1977	},
1978	{
1979		.name = "llc_misses",
1980		.attr = {
1981			.type = PERF_TYPE_HW_CACHE,
1982			.config =
1983				PERF_COUNT_HW_CACHE_LL |
1984				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1985				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1986			.exclude_user = 1
1987		},
1988		.ratio_metric = 2,
1989		.ratio_desc = "LLC misses per million insns",
1990		.ratio_mul = 1e6,
1991	},
1992	{
1993		.name = "itlb_misses",
1994		.attr = {
1995			.type = PERF_TYPE_HW_CACHE,
1996			.config =
1997				PERF_COUNT_HW_CACHE_ITLB |
1998				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1999				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
2000			.exclude_user = 1
2001		},
2002		.ratio_metric = 2,
2003		.ratio_desc = "itlb misses per million insns",
2004		.ratio_mul = 1e6,
2005	},
2006	{
2007		.name = "dtlb_misses",
2008		.attr = {
2009			.type = PERF_TYPE_HW_CACHE,
2010			.config =
2011				PERF_COUNT_HW_CACHE_DTLB |
2012				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2013				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
2014			.exclude_user = 1
2015		},
2016		.ratio_metric = 2,
2017		.ratio_desc = "dtlb misses per million insns",
2018		.ratio_mul = 1e6,
2019	},
2020};
2021
2022static __u64 profile_total_count;
2023
2024#define MAX_NUM_PROFILE_METRICS 4
2025
2026static int profile_parse_metrics(int argc, char **argv)
2027{
2028	unsigned int metric_cnt;
2029	int selected_cnt = 0;
2030	unsigned int i;
2031
2032	metric_cnt = ARRAY_SIZE(metrics);
2033
2034	while (argc > 0) {
2035		for (i = 0; i < metric_cnt; i++) {
2036			if (is_prefix(argv[0], metrics[i].name)) {
2037				if (!metrics[i].selected)
2038					selected_cnt++;
2039				metrics[i].selected = true;
2040				break;
2041			}
2042		}
2043		if (i == metric_cnt) {
2044			p_err("unknown metric %s", argv[0]);
2045			return -1;
2046		}
2047		NEXT_ARG();
2048	}
2049	if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
2050		p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
2051		      selected_cnt, MAX_NUM_PROFILE_METRICS);
2052		return -1;
2053	}
2054	return selected_cnt;
2055}
2056
2057static void profile_read_values(struct profiler_bpf *obj)
2058{
2059	__u32 m, cpu, num_cpu = obj->rodata->num_cpu;
2060	int reading_map_fd, count_map_fd;
2061	__u64 counts[num_cpu];
2062	__u32 key = 0;
2063	int err;
2064
2065	reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
2066	count_map_fd = bpf_map__fd(obj->maps.counts);
2067	if (reading_map_fd < 0 || count_map_fd < 0) {
2068		p_err("failed to get fd for map");
2069		return;
2070	}
2071
2072	err = bpf_map_lookup_elem(count_map_fd, &key, counts);
2073	if (err) {
2074		p_err("failed to read count_map: %s", strerror(errno));
2075		return;
2076	}
2077
2078	profile_total_count = 0;
2079	for (cpu = 0; cpu < num_cpu; cpu++)
2080		profile_total_count += counts[cpu];
2081
2082	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2083		struct bpf_perf_event_value values[num_cpu];
2084
2085		if (!metrics[m].selected)
2086			continue;
2087
2088		err = bpf_map_lookup_elem(reading_map_fd, &key, values);
2089		if (err) {
2090			p_err("failed to read reading_map: %s",
2091			      strerror(errno));
2092			return;
2093		}
2094		for (cpu = 0; cpu < num_cpu; cpu++) {
2095			metrics[m].val.counter += values[cpu].counter;
2096			metrics[m].val.enabled += values[cpu].enabled;
2097			metrics[m].val.running += values[cpu].running;
2098		}
2099		key++;
2100	}
2101}
2102
2103static void profile_print_readings_json(void)
2104{
2105	__u32 m;
2106
2107	jsonw_start_array(json_wtr);
2108	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2109		if (!metrics[m].selected)
2110			continue;
2111		jsonw_start_object(json_wtr);
2112		jsonw_string_field(json_wtr, "metric", metrics[m].name);
2113		jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
2114		jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
2115		jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
2116		jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
2117
2118		jsonw_end_object(json_wtr);
2119	}
2120	jsonw_end_array(json_wtr);
2121}
2122
2123static void profile_print_readings_plain(void)
2124{
2125	__u32 m;
2126
2127	printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
2128	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2129		struct bpf_perf_event_value *val = &metrics[m].val;
2130		int r;
2131
2132		if (!metrics[m].selected)
2133			continue;
2134		printf("%18llu %-20s", val->counter, metrics[m].name);
2135
2136		r = metrics[m].ratio_metric - 1;
2137		if (r >= 0 && metrics[r].selected &&
2138		    metrics[r].val.counter > 0) {
2139			printf("# %8.2f %-30s",
2140			       val->counter * metrics[m].ratio_mul /
2141			       metrics[r].val.counter,
2142			       metrics[m].ratio_desc);
2143		} else {
2144			printf("%-41s", "");
2145		}
2146
2147		if (val->enabled > val->running)
2148			printf("(%4.2f%%)",
2149			       val->running * 100.0 / val->enabled);
2150		printf("\n");
2151	}
2152}
2153
2154static void profile_print_readings(void)
2155{
2156	if (json_output)
2157		profile_print_readings_json();
2158	else
2159		profile_print_readings_plain();
2160}
2161
2162static char *profile_target_name(int tgt_fd)
2163{
2164	struct bpf_func_info func_info;
2165	struct bpf_prog_info info = {};
2166	__u32 info_len = sizeof(info);
2167	const struct btf_type *t;
2168	__u32 func_info_rec_size;
2169	struct btf *btf = NULL;
2170	char *name = NULL;
2171	int err;
2172
2173	err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
2174	if (err) {
2175		p_err("failed to bpf_obj_get_info_by_fd for prog FD %d", tgt_fd);
2176		goto out;
 
2177	}
2178
2179	if (info.btf_id == 0) {
 
2180		p_err("prog FD %d doesn't have valid btf", tgt_fd);
2181		goto out;
2182	}
2183
2184	func_info_rec_size = info.func_info_rec_size;
2185	if (info.nr_func_info == 0) {
2186		p_err("bpf_obj_get_info_by_fd for prog FD %d found 0 func_info", tgt_fd);
2187		goto out;
2188	}
2189
2190	memset(&info, 0, sizeof(info));
2191	info.nr_func_info = 1;
2192	info.func_info_rec_size = func_info_rec_size;
2193	info.func_info = ptr_to_u64(&func_info);
2194
2195	err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
2196	if (err) {
2197		p_err("failed to get func_info for prog FD %d", tgt_fd);
2198		goto out;
2199	}
2200
2201	btf = btf__load_from_kernel_by_id(info.btf_id);
2202	if (!btf) {
2203		p_err("failed to load btf for prog FD %d", tgt_fd);
2204		goto out;
2205	}
2206
2207	t = btf__type_by_id(btf, func_info.type_id);
2208	if (!t) {
2209		p_err("btf %d doesn't have type %d",
2210		      info.btf_id, func_info.type_id);
2211		goto out;
2212	}
2213	name = strdup(btf__name_by_offset(btf, t->name_off));
2214out:
2215	btf__free(btf);
 
2216	return name;
2217}
2218
2219static struct profiler_bpf *profile_obj;
2220static int profile_tgt_fd = -1;
2221static char *profile_tgt_name;
2222static int *profile_perf_events;
2223static int profile_perf_event_cnt;
2224
2225static void profile_close_perf_events(struct profiler_bpf *obj)
2226{
2227	int i;
2228
2229	for (i = profile_perf_event_cnt - 1; i >= 0; i--)
2230		close(profile_perf_events[i]);
2231
2232	free(profile_perf_events);
2233	profile_perf_event_cnt = 0;
2234}
2235
2236static int profile_open_perf_events(struct profiler_bpf *obj)
2237{
2238	unsigned int cpu, m;
2239	int map_fd, pmu_fd;
2240
2241	profile_perf_events = calloc(
2242		sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
2243	if (!profile_perf_events) {
2244		p_err("failed to allocate memory for perf_event array: %s",
2245		      strerror(errno));
2246		return -1;
2247	}
2248	map_fd = bpf_map__fd(obj->maps.events);
2249	if (map_fd < 0) {
2250		p_err("failed to get fd for events map");
2251		return -1;
2252	}
2253
2254	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2255		if (!metrics[m].selected)
2256			continue;
2257		for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
2258			pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
2259					 -1/*pid*/, cpu, -1/*group_fd*/, 0);
2260			if (pmu_fd < 0 ||
2261			    bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
2262						&pmu_fd, BPF_ANY) ||
2263			    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
2264				p_err("failed to create event %s on cpu %d",
2265				      metrics[m].name, cpu);
2266				return -1;
2267			}
2268			profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
2269		}
2270	}
2271	return 0;
2272}
2273
2274static void profile_print_and_cleanup(void)
2275{
2276	profile_close_perf_events(profile_obj);
2277	profile_read_values(profile_obj);
2278	profile_print_readings();
2279	profiler_bpf__destroy(profile_obj);
2280
2281	close(profile_tgt_fd);
2282	free(profile_tgt_name);
2283}
2284
2285static void int_exit(int signo)
2286{
2287	profile_print_and_cleanup();
2288	exit(0);
2289}
2290
2291static int do_profile(int argc, char **argv)
2292{
2293	int num_metric, num_cpu, err = -1;
2294	struct bpf_program *prog;
2295	unsigned long duration;
2296	char *endptr;
2297
2298	/* we at least need two args for the prog and one metric */
2299	if (!REQ_ARGS(3))
2300		return -EINVAL;
2301
2302	/* parse target fd */
2303	profile_tgt_fd = prog_parse_fd(&argc, &argv);
2304	if (profile_tgt_fd < 0) {
2305		p_err("failed to parse fd");
2306		return -1;
2307	}
2308
2309	/* parse profiling optional duration */
2310	if (argc > 2 && is_prefix(argv[0], "duration")) {
2311		NEXT_ARG();
2312		duration = strtoul(*argv, &endptr, 0);
2313		if (*endptr)
2314			usage();
2315		NEXT_ARG();
2316	} else {
2317		duration = UINT_MAX;
2318	}
2319
2320	num_metric = profile_parse_metrics(argc, argv);
2321	if (num_metric <= 0)
2322		goto out;
2323
2324	num_cpu = libbpf_num_possible_cpus();
2325	if (num_cpu <= 0) {
2326		p_err("failed to identify number of CPUs");
2327		goto out;
2328	}
2329
2330	profile_obj = profiler_bpf__open();
2331	if (!profile_obj) {
2332		p_err("failed to open and/or load BPF object");
2333		goto out;
2334	}
2335
2336	profile_obj->rodata->num_cpu = num_cpu;
2337	profile_obj->rodata->num_metric = num_metric;
2338
2339	/* adjust map sizes */
2340	bpf_map__set_max_entries(profile_obj->maps.events, num_metric * num_cpu);
2341	bpf_map__set_max_entries(profile_obj->maps.fentry_readings, num_metric);
2342	bpf_map__set_max_entries(profile_obj->maps.accum_readings, num_metric);
2343	bpf_map__set_max_entries(profile_obj->maps.counts, 1);
2344
2345	/* change target name */
2346	profile_tgt_name = profile_target_name(profile_tgt_fd);
2347	if (!profile_tgt_name)
2348		goto out;
2349
2350	bpf_object__for_each_program(prog, profile_obj->obj) {
2351		err = bpf_program__set_attach_target(prog, profile_tgt_fd,
2352						     profile_tgt_name);
2353		if (err) {
2354			p_err("failed to set attach target\n");
2355			goto out;
2356		}
2357	}
2358
2359	set_max_rlimit();
2360	err = profiler_bpf__load(profile_obj);
2361	if (err) {
2362		p_err("failed to load profile_obj");
2363		goto out;
2364	}
2365
2366	err = profile_open_perf_events(profile_obj);
2367	if (err)
2368		goto out;
2369
2370	err = profiler_bpf__attach(profile_obj);
2371	if (err) {
2372		p_err("failed to attach profile_obj");
2373		goto out;
2374	}
2375	signal(SIGINT, int_exit);
2376
2377	sleep(duration);
2378	profile_print_and_cleanup();
2379	return 0;
2380
2381out:
2382	profile_close_perf_events(profile_obj);
2383	if (profile_obj)
2384		profiler_bpf__destroy(profile_obj);
2385	close(profile_tgt_fd);
2386	free(profile_tgt_name);
2387	return err;
2388}
2389
2390#endif /* BPFTOOL_WITHOUT_SKELETONS */
2391
2392static int do_help(int argc, char **argv)
2393{
2394	if (json_output) {
2395		jsonw_null(json_wtr);
2396		return 0;
2397	}
2398
2399	fprintf(stderr,
2400		"Usage: %1$s %2$s { show | list } [PROG]\n"
2401		"       %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
2402		"       %1$s %2$s dump jited  PROG [{ file FILE | opcodes | linum }]\n"
2403		"       %1$s %2$s pin   PROG FILE\n"
2404		"       %1$s %2$s { load | loadall } OBJ  PATH \\\n"
2405		"                         [type TYPE] [dev NAME] \\\n"
2406		"                         [map { idx IDX | name NAME } MAP]\\\n"
2407		"                         [pinmaps MAP_DIR]\n"
2408		"                         [autoattach]\n"
2409		"       %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
2410		"       %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
2411		"       %1$s %2$s run PROG \\\n"
2412		"                         data_in FILE \\\n"
2413		"                         [data_out FILE [data_size_out L]] \\\n"
2414		"                         [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
2415		"                         [repeat N]\n"
2416		"       %1$s %2$s profile PROG [duration DURATION] METRICs\n"
2417		"       %1$s %2$s tracelog\n"
2418		"       %1$s %2$s help\n"
2419		"\n"
2420		"       " HELP_SPEC_MAP "\n"
2421		"       " HELP_SPEC_PROGRAM "\n"
2422		"       TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
2423		"                 tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
2424		"                 cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
2425		"                 lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
2426		"                 sk_reuseport | flow_dissector | cgroup/sysctl |\n"
2427		"                 cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
2428		"                 cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
2429		"                 cgroup/getpeername4 | cgroup/getpeername6 |\n"
2430		"                 cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
2431		"                 cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
2432		"                 cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
2433		"                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
2434		"       ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
2435		"                        sk_skb_stream_parser | flow_dissector }\n"
2436		"       METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
2437		"       " HELP_SPEC_OPTIONS " |\n"
2438		"                    {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
2439		"                    {-L|--use-loader} }\n"
2440		"",
2441		bin_name, argv[-2]);
2442
2443	return 0;
2444}
2445
2446static const struct cmd cmds[] = {
2447	{ "show",	do_show },
2448	{ "list",	do_show },
2449	{ "help",	do_help },
2450	{ "dump",	do_dump },
2451	{ "pin",	do_pin },
2452	{ "load",	do_load },
2453	{ "loadall",	do_loadall },
2454	{ "attach",	do_attach },
2455	{ "detach",	do_detach },
2456	{ "tracelog",	do_tracelog },
2457	{ "run",	do_run },
2458	{ "profile",	do_profile },
2459	{ 0 }
2460};
2461
2462int do_prog(int argc, char **argv)
2463{
2464	return cmd_select(cmds, argc, argv, do_help);
2465}