Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#ifndef _GNU_SOURCE
   5#define _GNU_SOURCE
   6#endif
   7#include <errno.h>
   8#include <fcntl.h>
   9#include <signal.h>
  10#include <stdarg.h>
  11#include <stdio.h>
  12#include <stdlib.h>
  13#include <string.h>
  14#include <time.h>
  15#include <unistd.h>
  16#include <net/if.h>
  17#include <sys/ioctl.h>
  18#include <sys/types.h>
  19#include <sys/stat.h>
  20#include <sys/syscall.h>
  21#include <dirent.h>
  22
  23#include <linux/err.h>
  24#include <linux/perf_event.h>
  25#include <linux/sizes.h>
  26
  27#include <bpf/bpf.h>
  28#include <bpf/btf.h>
  29#include <bpf/hashmap.h>
  30#include <bpf/libbpf.h>
  31#include <bpf/libbpf_internal.h>
  32#include <bpf/skel_internal.h>
  33
  34#include "cfg.h"
  35#include "main.h"
  36#include "xlated_dumper.h"
  37
  38#define BPF_METADATA_PREFIX "bpf_metadata_"
  39#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  40
  41enum dump_mode {
  42	DUMP_JITED,
  43	DUMP_XLATED,
  44};
  45
  46static const bool attach_types[] = {
  47	[BPF_SK_SKB_STREAM_PARSER] = true,
  48	[BPF_SK_SKB_STREAM_VERDICT] = true,
  49	[BPF_SK_SKB_VERDICT] = true,
  50	[BPF_SK_MSG_VERDICT] = true,
  51	[BPF_FLOW_DISSECTOR] = true,
  52	[__MAX_BPF_ATTACH_TYPE] = false,
  53};
  54
  55/* Textual representations traditionally used by the program and kept around
  56 * for the sake of backwards compatibility.
  57 */
  58static const char * const attach_type_strings[] = {
  59	[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
  60	[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
  61	[BPF_SK_SKB_VERDICT] = "skb_verdict",
  62	[BPF_SK_MSG_VERDICT] = "msg_verdict",
 
  63	[__MAX_BPF_ATTACH_TYPE] = NULL,
  64};
  65
  66static struct hashmap *prog_table;
  67
  68static enum bpf_attach_type parse_attach_type(const char *str)
  69{
  70	enum bpf_attach_type type;
  71
  72	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
  73		if (attach_types[type]) {
  74			const char *attach_type_str;
  75
  76			attach_type_str = libbpf_bpf_attach_type_str(type);
  77			if (!strcmp(str, attach_type_str))
  78				return type;
  79		}
  80
  81		if (attach_type_strings[type] &&
  82		    is_prefix(str, attach_type_strings[type]))
  83			return type;
  84	}
  85
  86	return __MAX_BPF_ATTACH_TYPE;
  87}
  88
  89static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode,
  90			  void **info_data, size_t *const info_data_sz)
  91{
  92	struct bpf_prog_info holder = {};
  93	size_t needed = 0;
  94	void *ptr;
  95
  96	if (mode == DUMP_JITED) {
  97		holder.jited_prog_len = info->jited_prog_len;
  98		needed += info->jited_prog_len;
  99	} else {
 100		holder.xlated_prog_len = info->xlated_prog_len;
 101		needed += info->xlated_prog_len;
 102	}
 103
 104	holder.nr_jited_ksyms = info->nr_jited_ksyms;
 105	needed += info->nr_jited_ksyms * sizeof(__u64);
 106
 107	holder.nr_jited_func_lens = info->nr_jited_func_lens;
 108	needed += info->nr_jited_func_lens * sizeof(__u32);
 109
 110	holder.nr_func_info = info->nr_func_info;
 111	holder.func_info_rec_size = info->func_info_rec_size;
 112	needed += info->nr_func_info * info->func_info_rec_size;
 113
 114	holder.nr_line_info = info->nr_line_info;
 115	holder.line_info_rec_size = info->line_info_rec_size;
 116	needed += info->nr_line_info * info->line_info_rec_size;
 117
 118	holder.nr_jited_line_info = info->nr_jited_line_info;
 119	holder.jited_line_info_rec_size = info->jited_line_info_rec_size;
 120	needed += info->nr_jited_line_info * info->jited_line_info_rec_size;
 121
 122	if (needed > *info_data_sz) {
 123		ptr = realloc(*info_data, needed);
 124		if (!ptr)
 125			return -1;
 126
 127		*info_data = ptr;
 128		*info_data_sz = needed;
 129	}
 130	ptr = *info_data;
 131
 132	if (mode == DUMP_JITED) {
 133		holder.jited_prog_insns = ptr_to_u64(ptr);
 134		ptr += holder.jited_prog_len;
 135	} else {
 136		holder.xlated_prog_insns = ptr_to_u64(ptr);
 137		ptr += holder.xlated_prog_len;
 138	}
 139
 140	holder.jited_ksyms = ptr_to_u64(ptr);
 141	ptr += holder.nr_jited_ksyms * sizeof(__u64);
 142
 143	holder.jited_func_lens = ptr_to_u64(ptr);
 144	ptr += holder.nr_jited_func_lens * sizeof(__u32);
 145
 146	holder.func_info = ptr_to_u64(ptr);
 147	ptr += holder.nr_func_info * holder.func_info_rec_size;
 148
 149	holder.line_info = ptr_to_u64(ptr);
 150	ptr += holder.nr_line_info * holder.line_info_rec_size;
 151
 152	holder.jited_line_info = ptr_to_u64(ptr);
 153	ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size;
 154
 155	*info = holder;
 156	return 0;
 157}
 158
 159static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
 160{
 161	struct timespec real_time_ts, boot_time_ts;
 162	time_t wallclock_secs;
 163	struct tm load_tm;
 164
 165	buf[--size] = '\0';
 166
 167	if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
 168	    clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
 169		perror("Can't read clocks");
 170		snprintf(buf, size, "%llu", nsecs / 1000000000);
 171		return;
 172	}
 173
 174	wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
 175		(real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
 176		1000000000;
 177
 178
 179	if (!localtime_r(&wallclock_secs, &load_tm)) {
 180		snprintf(buf, size, "%llu", nsecs / 1000000000);
 181		return;
 182	}
 183
 184	if (json_output)
 185		strftime(buf, size, "%s", &load_tm);
 186	else
 187		strftime(buf, size, "%FT%T%z", &load_tm);
 188}
 189
 190static void show_prog_maps(int fd, __u32 num_maps)
 191{
 192	struct bpf_prog_info info = {};
 193	__u32 len = sizeof(info);
 194	__u32 map_ids[num_maps];
 195	unsigned int i;
 196	int err;
 197
 198	info.nr_map_ids = num_maps;
 199	info.map_ids = ptr_to_u64(map_ids);
 200
 201	err = bpf_prog_get_info_by_fd(fd, &info, &len);
 202	if (err || !info.nr_map_ids)
 203		return;
 204
 205	if (json_output) {
 206		jsonw_name(json_wtr, "map_ids");
 207		jsonw_start_array(json_wtr);
 208		for (i = 0; i < info.nr_map_ids; i++)
 209			jsonw_uint(json_wtr, map_ids[i]);
 210		jsonw_end_array(json_wtr);
 211	} else {
 212		printf("  map_ids ");
 213		for (i = 0; i < info.nr_map_ids; i++)
 214			printf("%u%s", map_ids[i],
 215			       i == info.nr_map_ids - 1 ? "" : ",");
 216	}
 217}
 218
 219static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
 220{
 221	struct bpf_prog_info prog_info;
 222	__u32 prog_info_len;
 223	__u32 map_info_len;
 224	void *value = NULL;
 225	__u32 *map_ids;
 226	int nr_maps;
 227	int key = 0;
 228	int map_fd;
 229	int ret;
 230	__u32 i;
 231
 232	memset(&prog_info, 0, sizeof(prog_info));
 233	prog_info_len = sizeof(prog_info);
 234	ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
 235	if (ret)
 236		return NULL;
 237
 238	if (!prog_info.nr_map_ids)
 239		return NULL;
 240
 241	map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
 242	if (!map_ids)
 243		return NULL;
 244
 245	nr_maps = prog_info.nr_map_ids;
 246	memset(&prog_info, 0, sizeof(prog_info));
 247	prog_info.nr_map_ids = nr_maps;
 248	prog_info.map_ids = ptr_to_u64(map_ids);
 249	prog_info_len = sizeof(prog_info);
 250
 251	ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
 252	if (ret)
 253		goto free_map_ids;
 254
 255	for (i = 0; i < prog_info.nr_map_ids; i++) {
 256		map_fd = bpf_map_get_fd_by_id(map_ids[i]);
 257		if (map_fd < 0)
 258			goto free_map_ids;
 259
 260		memset(map_info, 0, sizeof(*map_info));
 261		map_info_len = sizeof(*map_info);
 262		ret = bpf_map_get_info_by_fd(map_fd, map_info, &map_info_len);
 263		if (ret < 0) {
 264			close(map_fd);
 265			goto free_map_ids;
 266		}
 267
 268		if (map_info->type != BPF_MAP_TYPE_ARRAY ||
 269		    map_info->key_size != sizeof(int) ||
 270		    map_info->max_entries != 1 ||
 271		    !map_info->btf_value_type_id ||
 272		    !strstr(map_info->name, ".rodata")) {
 273			close(map_fd);
 274			continue;
 275		}
 276
 277		value = malloc(map_info->value_size);
 278		if (!value) {
 279			close(map_fd);
 280			goto free_map_ids;
 281		}
 282
 283		if (bpf_map_lookup_elem(map_fd, &key, value)) {
 284			close(map_fd);
 285			free(value);
 286			value = NULL;
 287			goto free_map_ids;
 288		}
 289
 290		close(map_fd);
 291		break;
 292	}
 293
 294free_map_ids:
 295	free(map_ids);
 296	return value;
 297}
 298
 299static bool has_metadata_prefix(const char *s)
 300{
 301	return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
 302}
 303
 304static void show_prog_metadata(int fd, __u32 num_maps)
 305{
 306	const struct btf_type *t_datasec, *t_var;
 307	struct bpf_map_info map_info;
 308	struct btf_var_secinfo *vsi;
 309	bool printed_header = false;
 310	unsigned int i, vlen;
 311	void *value = NULL;
 312	const char *name;
 313	struct btf *btf;
 314	int err;
 315
 316	if (!num_maps)
 317		return;
 318
 319	memset(&map_info, 0, sizeof(map_info));
 320	value = find_metadata(fd, &map_info);
 321	if (!value)
 322		return;
 323
 324	btf = btf__load_from_kernel_by_id(map_info.btf_id);
 325	if (!btf)
 326		goto out_free;
 327
 328	t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
 329	if (!btf_is_datasec(t_datasec))
 330		goto out_free;
 331
 332	vlen = btf_vlen(t_datasec);
 333	vsi = btf_var_secinfos(t_datasec);
 334
 335	/* We don't proceed to check the kinds of the elements of the DATASEC.
 336	 * The verifier enforces them to be BTF_KIND_VAR.
 337	 */
 338
 339	if (json_output) {
 340		struct btf_dumper d = {
 341			.btf = btf,
 342			.jw = json_wtr,
 343			.is_plain_text = false,
 344		};
 345
 346		for (i = 0; i < vlen; i++, vsi++) {
 347			t_var = btf__type_by_id(btf, vsi->type);
 348			name = btf__name_by_offset(btf, t_var->name_off);
 349
 350			if (!has_metadata_prefix(name))
 351				continue;
 352
 353			if (!printed_header) {
 354				jsonw_name(json_wtr, "metadata");
 355				jsonw_start_object(json_wtr);
 356				printed_header = true;
 357			}
 358
 359			jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
 360			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
 361			if (err) {
 362				p_err("btf dump failed: %d", err);
 363				break;
 364			}
 365		}
 366		if (printed_header)
 367			jsonw_end_object(json_wtr);
 368	} else {
 369		json_writer_t *btf_wtr;
 370		struct btf_dumper d = {
 371			.btf = btf,
 372			.is_plain_text = true,
 373		};
 374
 375		for (i = 0; i < vlen; i++, vsi++) {
 376			t_var = btf__type_by_id(btf, vsi->type);
 377			name = btf__name_by_offset(btf, t_var->name_off);
 378
 379			if (!has_metadata_prefix(name))
 380				continue;
 381
 382			if (!printed_header) {
 383				printf("\tmetadata:");
 384
 385				btf_wtr = jsonw_new(stdout);
 386				if (!btf_wtr) {
 387					p_err("jsonw alloc failed");
 388					goto out_free;
 389				}
 390				d.jw = btf_wtr,
 391
 392				printed_header = true;
 393			}
 394
 395			printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
 396
 397			jsonw_reset(btf_wtr);
 398			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
 399			if (err) {
 400				p_err("btf dump failed: %d", err);
 401				break;
 402			}
 403		}
 404		if (printed_header)
 405			jsonw_destroy(&btf_wtr);
 406	}
 407
 408out_free:
 409	btf__free(btf);
 410	free(value);
 411}
 412
 413static void print_prog_header_json(struct bpf_prog_info *info, int fd)
 414{
 415	const char *prog_type_str;
 416	char prog_name[MAX_PROG_FULL_NAME];
 417
 418	jsonw_uint_field(json_wtr, "id", info->id);
 419	prog_type_str = libbpf_bpf_prog_type_str(info->type);
 420
 421	if (prog_type_str)
 422		jsonw_string_field(json_wtr, "type", prog_type_str);
 423	else
 424		jsonw_uint_field(json_wtr, "type", info->type);
 425
 426	if (*info->name) {
 427		get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
 428		jsonw_string_field(json_wtr, "name", prog_name);
 429	}
 430
 431	jsonw_name(json_wtr, "tag");
 432	jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
 433		     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
 434		     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
 435
 436	jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
 437	if (info->run_time_ns) {
 438		jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
 439		jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
 440	}
 441	if (info->recursion_misses)
 442		jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
 443}
 444
 445static void print_prog_json(struct bpf_prog_info *info, int fd, bool orphaned)
 446{
 447	char *memlock;
 448
 449	jsonw_start_object(json_wtr);
 450	print_prog_header_json(info, fd);
 451	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
 452
 453	if (info->load_time) {
 454		char buf[32];
 455
 456		print_boot_time(info->load_time, buf, sizeof(buf));
 457
 458		/* Piggy back on load_time, since 0 uid is a valid one */
 459		jsonw_name(json_wtr, "loaded_at");
 460		jsonw_printf(json_wtr, "%s", buf);
 461		jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
 462	}
 463
 464	jsonw_bool_field(json_wtr, "orphaned", orphaned);
 465	jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
 466
 467	if (info->jited_prog_len) {
 468		jsonw_bool_field(json_wtr, "jited", true);
 469		jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
 470	} else {
 471		jsonw_bool_field(json_wtr, "jited", false);
 472	}
 473
 474	memlock = get_fdinfo(fd, "memlock");
 475	if (memlock)
 476		jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
 477	free(memlock);
 478
 479	if (info->nr_map_ids)
 480		show_prog_maps(fd, info->nr_map_ids);
 481
 482	if (info->btf_id)
 483		jsonw_int_field(json_wtr, "btf_id", info->btf_id);
 484
 485	if (!hashmap__empty(prog_table)) {
 486		struct hashmap_entry *entry;
 487
 488		jsonw_name(json_wtr, "pinned");
 489		jsonw_start_array(json_wtr);
 490		hashmap__for_each_key_entry(prog_table, entry, info->id)
 491			jsonw_string(json_wtr, entry->pvalue);
 
 
 492		jsonw_end_array(json_wtr);
 493	}
 494
 495	emit_obj_refs_json(refs_table, info->id, json_wtr);
 496
 497	show_prog_metadata(fd, info->nr_map_ids);
 498
 499	jsonw_end_object(json_wtr);
 500}
 501
 502static void print_prog_header_plain(struct bpf_prog_info *info, int fd)
 503{
 504	const char *prog_type_str;
 505	char prog_name[MAX_PROG_FULL_NAME];
 506
 507	printf("%u: ", info->id);
 508	prog_type_str = libbpf_bpf_prog_type_str(info->type);
 509	if (prog_type_str)
 510		printf("%s  ", prog_type_str);
 511	else
 512		printf("type %u  ", info->type);
 513
 514	if (*info->name) {
 515		get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
 516		printf("name %s  ", prog_name);
 517	}
 518
 519	printf("tag ");
 520	fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
 521	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
 522	printf("%s", info->gpl_compatible ? "  gpl" : "");
 523	if (info->run_time_ns)
 524		printf(" run_time_ns %lld run_cnt %lld",
 525		       info->run_time_ns, info->run_cnt);
 526	if (info->recursion_misses)
 527		printf(" recursion_misses %lld", info->recursion_misses);
 528	printf("\n");
 529}
 530
 531static void print_prog_plain(struct bpf_prog_info *info, int fd, bool orphaned)
 532{
 533	char *memlock;
 534
 535	print_prog_header_plain(info, fd);
 536
 537	if (info->load_time) {
 538		char buf[32];
 539
 540		print_boot_time(info->load_time, buf, sizeof(buf));
 541
 542		/* Piggy back on load_time, since 0 uid is a valid one */
 543		printf("\tloaded_at %s  uid %u\n", buf, info->created_by_uid);
 544	}
 545
 546	printf("\txlated %uB", info->xlated_prog_len);
 547
 548	if (info->jited_prog_len)
 549		printf("  jited %uB", info->jited_prog_len);
 550	else
 551		printf("  not jited");
 552
 553	memlock = get_fdinfo(fd, "memlock");
 554	if (memlock)
 555		printf("  memlock %sB", memlock);
 556	free(memlock);
 557
 558	if (orphaned)
 559		printf("  orphaned");
 560
 561	if (info->nr_map_ids)
 562		show_prog_maps(fd, info->nr_map_ids);
 563
 564	if (!hashmap__empty(prog_table)) {
 565		struct hashmap_entry *entry;
 566
 567		hashmap__for_each_key_entry(prog_table, entry, info->id)
 568			printf("\n\tpinned %s", (char *)entry->pvalue);
 
 
 569	}
 570
 571	if (info->btf_id)
 572		printf("\n\tbtf_id %d", info->btf_id);
 573
 574	emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
 575
 576	printf("\n");
 577
 578	show_prog_metadata(fd, info->nr_map_ids);
 579}
 580
 581static int show_prog(int fd)
 582{
 583	struct bpf_prog_info info = {};
 584	__u32 len = sizeof(info);
 585	int err;
 586
 587	err = bpf_prog_get_info_by_fd(fd, &info, &len);
 588	if (err && err != -ENODEV) {
 589		p_err("can't get prog info: %s", strerror(errno));
 590		return -1;
 591	}
 592
 593	if (json_output)
 594		print_prog_json(&info, fd, err == -ENODEV);
 595	else
 596		print_prog_plain(&info, fd, err == -ENODEV);
 597
 598	return 0;
 599}
 600
 601static int do_show_subset(int argc, char **argv)
 602{
 603	int *fds = NULL;
 604	int nb_fds, i;
 605	int err = -1;
 606
 607	fds = malloc(sizeof(int));
 608	if (!fds) {
 609		p_err("mem alloc failed");
 610		return -1;
 611	}
 612	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 613	if (nb_fds < 1)
 614		goto exit_free;
 615
 616	if (json_output && nb_fds > 1)
 617		jsonw_start_array(json_wtr);	/* root array */
 618	for (i = 0; i < nb_fds; i++) {
 619		err = show_prog(fds[i]);
 620		if (err) {
 621			for (; i < nb_fds; i++)
 622				close(fds[i]);
 623			break;
 624		}
 625		close(fds[i]);
 626	}
 627	if (json_output && nb_fds > 1)
 628		jsonw_end_array(json_wtr);	/* root array */
 629
 630exit_free:
 631	free(fds);
 632	return err;
 633}
 634
 635static int do_show(int argc, char **argv)
 636{
 637	__u32 id = 0;
 638	int err;
 639	int fd;
 640
 641	if (show_pinned) {
 642		prog_table = hashmap__new(hash_fn_for_key_as_id,
 643					  equal_fn_for_key_as_id, NULL);
 644		if (IS_ERR(prog_table)) {
 645			p_err("failed to create hashmap for pinned paths");
 646			return -1;
 647		}
 648		build_pinned_obj_table(prog_table, BPF_OBJ_PROG);
 649	}
 650	build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
 651
 652	if (argc == 2)
 653		return do_show_subset(argc, argv);
 654
 655	if (argc)
 656		return BAD_ARG();
 657
 658	if (json_output)
 659		jsonw_start_array(json_wtr);
 660	while (true) {
 661		err = bpf_prog_get_next_id(id, &id);
 662		if (err) {
 663			if (errno == ENOENT) {
 664				err = 0;
 665				break;
 666			}
 667			p_err("can't get next program: %s%s", strerror(errno),
 668			      errno == EINVAL ? " -- kernel too old?" : "");
 669			err = -1;
 670			break;
 671		}
 672
 673		fd = bpf_prog_get_fd_by_id(id);
 674		if (fd < 0) {
 675			if (errno == ENOENT)
 676				continue;
 677			p_err("can't get prog by id (%u): %s",
 678			      id, strerror(errno));
 679			err = -1;
 680			break;
 681		}
 682
 683		err = show_prog(fd);
 684		close(fd);
 685		if (err)
 686			break;
 687	}
 688
 689	if (json_output)
 690		jsonw_end_array(json_wtr);
 691
 692	delete_obj_refs_table(refs_table);
 693
 694	if (show_pinned)
 695		delete_pinned_obj_table(prog_table);
 696
 697	return err;
 698}
 699
 700static int
 701prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
 702	  char *filepath, bool opcodes, bool visual, bool linum)
 703{
 704	struct bpf_prog_linfo *prog_linfo = NULL;
 705	const char *disasm_opt = NULL;
 706	struct dump_data dd = {};
 707	void *func_info = NULL;
 708	struct btf *btf = NULL;
 709	char func_sig[1024];
 710	unsigned char *buf;
 711	__u32 member_len;
 712	int fd, err = -1;
 713	ssize_t n;
 
 714
 715	if (mode == DUMP_JITED) {
 716		if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
 717			p_info("no instructions returned");
 718			return -1;
 719		}
 720		buf = u64_to_ptr(info->jited_prog_insns);
 721		member_len = info->jited_prog_len;
 722	} else {	/* DUMP_XLATED */
 723		if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
 724			p_err("error retrieving insn dump: kernel.kptr_restrict set?");
 725			return -1;
 726		}
 727		buf = u64_to_ptr(info->xlated_prog_insns);
 728		member_len = info->xlated_prog_len;
 729	}
 730
 731	if (info->btf_id) {
 732		btf = btf__load_from_kernel_by_id(info->btf_id);
 733		if (!btf) {
 734			p_err("failed to get btf");
 735			return -1;
 736		}
 737	}
 738
 739	func_info = u64_to_ptr(info->func_info);
 740
 741	if (info->nr_line_info) {
 742		prog_linfo = bpf_prog_linfo__new(info);
 743		if (!prog_linfo)
 744			p_info("error in processing bpf_line_info.  continue without it.");
 745	}
 746
 747	if (filepath) {
 748		fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 749		if (fd < 0) {
 750			p_err("can't open file %s: %s", filepath,
 751			      strerror(errno));
 752			goto exit_free;
 753		}
 754
 755		n = write(fd, buf, member_len);
 756		close(fd);
 757		if (n != (ssize_t)member_len) {
 758			p_err("error writing output file: %s",
 759			      n < 0 ? strerror(errno) : "short write");
 760			goto exit_free;
 761		}
 762
 763		if (json_output)
 764			jsonw_null(json_wtr);
 765	} else if (mode == DUMP_JITED) {
 766		const char *name = NULL;
 767
 768		if (info->ifindex) {
 769			name = ifindex_to_arch(info->ifindex, info->netns_dev,
 770					       info->netns_ino, &disasm_opt);
 
 
 771			if (!name)
 772				goto exit_free;
 773		}
 774
 775		if (info->nr_jited_func_lens && info->jited_func_lens) {
 776			struct kernel_sym *sym = NULL;
 777			struct bpf_func_info *record;
 778			char sym_name[SYM_MAX_NAME];
 779			unsigned char *img = buf;
 780			__u64 *ksyms = NULL;
 781			__u32 *lens;
 782			__u32 i;
 783			if (info->nr_jited_ksyms) {
 784				kernel_syms_load(&dd);
 785				ksyms = u64_to_ptr(info->jited_ksyms);
 786			}
 787
 788			if (json_output)
 789				jsonw_start_array(json_wtr);
 790
 791			lens = u64_to_ptr(info->jited_func_lens);
 792			for (i = 0; i < info->nr_jited_func_lens; i++) {
 793				if (ksyms) {
 794					sym = kernel_syms_search(&dd, ksyms[i]);
 795					if (sym)
 796						sprintf(sym_name, "%s", sym->name);
 797					else
 798						sprintf(sym_name, "0x%016llx", ksyms[i]);
 799				} else {
 800					strcpy(sym_name, "unknown");
 801				}
 802
 803				if (func_info) {
 804					record = func_info + i * info->func_info_rec_size;
 805					btf_dumper_type_only(btf, record->type_id,
 806							     func_sig,
 807							     sizeof(func_sig));
 808				}
 809
 810				if (json_output) {
 811					jsonw_start_object(json_wtr);
 812					if (func_info && func_sig[0] != '\0') {
 813						jsonw_name(json_wtr, "proto");
 814						jsonw_string(json_wtr, func_sig);
 815					}
 816					jsonw_name(json_wtr, "name");
 817					jsonw_string(json_wtr, sym_name);
 818					jsonw_name(json_wtr, "insns");
 819				} else {
 820					if (func_info && func_sig[0] != '\0')
 821						printf("%s:\n", func_sig);
 822					printf("%s:\n", sym_name);
 823				}
 824
 825				if (ksyms) {
 826					if (disasm_print_insn(img, lens[i], opcodes,
 827							      name, disasm_opt, btf,
 828							      prog_linfo, ksyms[i], i,
 829							      linum))
 830						goto exit_free;
 831				} else {
 832					if (disasm_print_insn(img, lens[i], opcodes,
 833							      name, disasm_opt, btf,
 834							      NULL, 0, 0, false))
 835						goto exit_free;
 836				}
 837
 838				img += lens[i];
 839
 840				if (json_output)
 841					jsonw_end_object(json_wtr);
 842				else
 843					printf("\n");
 844			}
 845
 846			if (json_output)
 847				jsonw_end_array(json_wtr);
 848		} else {
 849			if (disasm_print_insn(buf, member_len, opcodes, name,
 850					      disasm_opt, btf, NULL, 0, 0,
 851					      false))
 852				goto exit_free;
 853		}
 
 
 
 
 
 854	} else {
 855		kernel_syms_load(&dd);
 856		dd.nr_jited_ksyms = info->nr_jited_ksyms;
 857		dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
 858		dd.btf = btf;
 859		dd.func_info = func_info;
 860		dd.finfo_rec_size = info->func_info_rec_size;
 861		dd.prog_linfo = prog_linfo;
 862
 863		if (json_output)
 864			dump_xlated_json(&dd, buf, member_len, opcodes, linum);
 865		else if (visual)
 866			dump_xlated_cfg(&dd, buf, member_len, opcodes, linum);
 867		else
 868			dump_xlated_plain(&dd, buf, member_len, opcodes, linum);
 
 869		kernel_syms_destroy(&dd);
 870	}
 871
 872	err = 0;
 873
 874exit_free:
 875	btf__free(btf);
 876	bpf_prog_linfo__free(prog_linfo);
 877	return err;
 878}
 879
 880static int do_dump(int argc, char **argv)
 881{
 882	struct bpf_prog_info info;
 883	__u32 info_len = sizeof(info);
 884	size_t info_data_sz = 0;
 885	void *info_data = NULL;
 886	char *filepath = NULL;
 887	bool opcodes = false;
 888	bool visual = false;
 889	enum dump_mode mode;
 890	bool linum = false;
 891	int nb_fds, i = 0;
 892	int *fds = NULL;
 
 893	int err = -1;
 
 894
 895	if (is_prefix(*argv, "jited")) {
 896		if (disasm_init())
 897			return -1;
 898		mode = DUMP_JITED;
 899	} else if (is_prefix(*argv, "xlated")) {
 900		mode = DUMP_XLATED;
 901	} else {
 902		p_err("expected 'xlated' or 'jited', got: %s", *argv);
 903		return -1;
 904	}
 905	NEXT_ARG();
 906
 907	if (argc < 2)
 908		usage();
 909
 910	fds = malloc(sizeof(int));
 911	if (!fds) {
 912		p_err("mem alloc failed");
 913		return -1;
 914	}
 915	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 916	if (nb_fds < 1)
 917		goto exit_free;
 918
 919	while (argc) {
 920		if (is_prefix(*argv, "file")) {
 921			NEXT_ARG();
 922			if (!argc) {
 923				p_err("expected file path");
 924				goto exit_close;
 925			}
 926			if (nb_fds > 1) {
 927				p_err("several programs matched");
 928				goto exit_close;
 929			}
 930
 931			filepath = *argv;
 932			NEXT_ARG();
 933		} else if (is_prefix(*argv, "opcodes")) {
 934			opcodes = true;
 935			NEXT_ARG();
 936		} else if (is_prefix(*argv, "visual")) {
 937			if (nb_fds > 1) {
 938				p_err("several programs matched");
 939				goto exit_close;
 940			}
 941
 942			visual = true;
 943			NEXT_ARG();
 944		} else if (is_prefix(*argv, "linum")) {
 945			linum = true;
 946			NEXT_ARG();
 947		} else {
 948			usage();
 
 949			goto exit_close;
 950		}
 951	}
 952
 953	if (filepath && (opcodes || visual || linum)) {
 954		p_err("'file' is not compatible with 'opcodes', 'visual', or 'linum'");
 955		goto exit_close;
 
 
 956	}
 957	if (json_output && visual) {
 958		p_err("'visual' is not compatible with JSON output");
 
 959		goto exit_close;
 960	}
 961
 
 
 
 
 
 
 
 
 
 
 
 962	if (json_output && nb_fds > 1)
 963		jsonw_start_array(json_wtr);	/* root array */
 964	for (i = 0; i < nb_fds; i++) {
 965		memset(&info, 0, sizeof(info));
 966
 967		err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
 968		if (err) {
 969			p_err("can't get prog info: %s", strerror(errno));
 970			break;
 971		}
 972
 973		err = prep_prog_info(&info, mode, &info_data, &info_data_sz);
 974		if (err) {
 975			p_err("can't grow prog info_data");
 976			break;
 977		}
 978
 979		err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
 980		if (err) {
 981			p_err("can't get prog info: %s", strerror(errno));
 982			break;
 983		}
 984
 985		if (json_output && nb_fds > 1) {
 986			jsonw_start_object(json_wtr);	/* prog object */
 987			print_prog_header_json(&info, fds[i]);
 988			jsonw_name(json_wtr, "insns");
 989		} else if (nb_fds > 1) {
 990			print_prog_header_plain(&info, fds[i]);
 991		}
 992
 993		err = prog_dump(&info, mode, filepath, opcodes, visual, linum);
 
 994
 995		if (json_output && nb_fds > 1)
 996			jsonw_end_object(json_wtr);	/* prog object */
 997		else if (i != nb_fds - 1 && nb_fds > 1)
 998			printf("\n");
 999
 
1000		if (err)
1001			break;
1002		close(fds[i]);
1003	}
1004	if (json_output && nb_fds > 1)
1005		jsonw_end_array(json_wtr);	/* root array */
1006
1007exit_close:
1008	for (; i < nb_fds; i++)
1009		close(fds[i]);
1010exit_free:
1011	free(info_data);
1012	free(fds);
1013	return err;
1014}
1015
1016static int do_pin(int argc, char **argv)
1017{
1018	int err;
1019
1020	err = do_pin_any(argc, argv, prog_parse_fd);
1021	if (!err && json_output)
1022		jsonw_null(json_wtr);
1023	return err;
1024}
1025
1026struct map_replace {
1027	int idx;
1028	int fd;
1029	char *name;
1030};
1031
1032static int map_replace_compar(const void *p1, const void *p2)
1033{
1034	const struct map_replace *a = p1, *b = p2;
1035
1036	return a->idx - b->idx;
1037}
1038
1039static int parse_attach_detach_args(int argc, char **argv, int *progfd,
1040				    enum bpf_attach_type *attach_type,
1041				    int *mapfd)
1042{
1043	if (!REQ_ARGS(3))
1044		return -EINVAL;
1045
1046	*progfd = prog_parse_fd(&argc, &argv);
1047	if (*progfd < 0)
1048		return *progfd;
1049
1050	*attach_type = parse_attach_type(*argv);
1051	if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
1052		p_err("invalid attach/detach type");
1053		return -EINVAL;
1054	}
1055
1056	if (*attach_type == BPF_FLOW_DISSECTOR) {
1057		*mapfd = 0;
1058		return 0;
1059	}
1060
1061	NEXT_ARG();
1062	if (!REQ_ARGS(2))
1063		return -EINVAL;
1064
1065	*mapfd = map_parse_fd(&argc, &argv);
1066	if (*mapfd < 0)
1067		return *mapfd;
1068
1069	return 0;
1070}
1071
1072static int do_attach(int argc, char **argv)
1073{
1074	enum bpf_attach_type attach_type;
1075	int err, progfd;
1076	int mapfd;
1077
1078	err = parse_attach_detach_args(argc, argv,
1079				       &progfd, &attach_type, &mapfd);
1080	if (err)
1081		return err;
1082
1083	err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
1084	if (err) {
1085		p_err("failed prog attach to map");
1086		return -EINVAL;
1087	}
1088
1089	if (json_output)
1090		jsonw_null(json_wtr);
1091	return 0;
1092}
1093
1094static int do_detach(int argc, char **argv)
1095{
1096	enum bpf_attach_type attach_type;
1097	int err, progfd;
1098	int mapfd;
1099
1100	err = parse_attach_detach_args(argc, argv,
1101				       &progfd, &attach_type, &mapfd);
1102	if (err)
1103		return err;
1104
1105	err = bpf_prog_detach2(progfd, mapfd, attach_type);
1106	if (err) {
1107		p_err("failed prog detach from map");
1108		return -EINVAL;
1109	}
1110
1111	if (json_output)
1112		jsonw_null(json_wtr);
1113	return 0;
1114}
1115
1116static int check_single_stdin(char *file_data_in, char *file_ctx_in)
1117{
1118	if (file_data_in && file_ctx_in &&
1119	    !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
1120		p_err("cannot use standard input for both data_in and ctx_in");
1121		return -1;
1122	}
1123
1124	return 0;
1125}
1126
1127static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
1128{
1129	size_t block_size = 256;
1130	size_t buf_size = block_size;
1131	size_t nb_read = 0;
1132	void *tmp;
1133	FILE *f;
1134
1135	if (!fname) {
1136		*data_ptr = NULL;
1137		*size = 0;
1138		return 0;
1139	}
1140
1141	if (!strcmp(fname, "-"))
1142		f = stdin;
1143	else
1144		f = fopen(fname, "r");
1145	if (!f) {
1146		p_err("failed to open %s: %s", fname, strerror(errno));
1147		return -1;
1148	}
1149
1150	*data_ptr = malloc(block_size);
1151	if (!*data_ptr) {
1152		p_err("failed to allocate memory for data_in/ctx_in: %s",
1153		      strerror(errno));
1154		goto err_fclose;
1155	}
1156
1157	while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
1158		if (feof(f))
1159			break;
1160		if (ferror(f)) {
1161			p_err("failed to read data_in/ctx_in from %s: %s",
1162			      fname, strerror(errno));
1163			goto err_free;
1164		}
1165		if (nb_read > buf_size - block_size) {
1166			if (buf_size == UINT32_MAX) {
1167				p_err("data_in/ctx_in is too long (max: %d)",
1168				      UINT32_MAX);
1169				goto err_free;
1170			}
1171			/* No space for fread()-ing next chunk; realloc() */
1172			buf_size *= 2;
1173			tmp = realloc(*data_ptr, buf_size);
1174			if (!tmp) {
1175				p_err("failed to reallocate data_in/ctx_in: %s",
1176				      strerror(errno));
1177				goto err_free;
1178			}
1179			*data_ptr = tmp;
1180		}
1181	}
1182	if (f != stdin)
1183		fclose(f);
1184
1185	*size = nb_read;
1186	return 0;
1187
1188err_free:
1189	free(*data_ptr);
1190	*data_ptr = NULL;
1191err_fclose:
1192	if (f != stdin)
1193		fclose(f);
1194	return -1;
1195}
1196
1197static void hex_print(void *data, unsigned int size, FILE *f)
1198{
1199	size_t i, j;
1200	char c;
1201
1202	for (i = 0; i < size; i += 16) {
1203		/* Row offset */
1204		fprintf(f, "%07zx\t", i);
1205
1206		/* Hexadecimal values */
1207		for (j = i; j < i + 16 && j < size; j++)
1208			fprintf(f, "%02x%s", *(uint8_t *)(data + j),
1209				j % 2 ? " " : "");
1210		for (; j < i + 16; j++)
1211			fprintf(f, "  %s", j % 2 ? " " : "");
1212
1213		/* ASCII values (if relevant), '.' otherwise */
1214		fprintf(f, "| ");
1215		for (j = i; j < i + 16 && j < size; j++) {
1216			c = *(char *)(data + j);
1217			if (c < ' ' || c > '~')
1218				c = '.';
1219			fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
1220		}
1221
1222		fprintf(f, "\n");
1223	}
1224}
1225
1226static int
1227print_run_output(void *data, unsigned int size, const char *fname,
1228		 const char *json_key)
1229{
1230	size_t nb_written;
1231	FILE *f;
1232
1233	if (!fname)
1234		return 0;
1235
1236	if (!strcmp(fname, "-")) {
1237		f = stdout;
1238		if (json_output) {
1239			jsonw_name(json_wtr, json_key);
1240			print_data_json(data, size);
1241		} else {
1242			hex_print(data, size, f);
1243		}
1244		return 0;
1245	}
1246
1247	f = fopen(fname, "w");
1248	if (!f) {
1249		p_err("failed to open %s: %s", fname, strerror(errno));
1250		return -1;
1251	}
1252
1253	nb_written = fwrite(data, 1, size, f);
1254	fclose(f);
1255	if (nb_written != size) {
1256		p_err("failed to write output data/ctx: %s", strerror(errno));
1257		return -1;
1258	}
1259
1260	return 0;
1261}
1262
1263static int alloc_run_data(void **data_ptr, unsigned int size_out)
1264{
1265	*data_ptr = calloc(size_out, 1);
1266	if (!*data_ptr) {
1267		p_err("failed to allocate memory for output data/ctx: %s",
1268		      strerror(errno));
1269		return -1;
1270	}
1271
1272	return 0;
1273}
1274
1275static int do_run(int argc, char **argv)
1276{
1277	char *data_fname_in = NULL, *data_fname_out = NULL;
1278	char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
 
1279	const unsigned int default_size = SZ_32K;
1280	void *data_in = NULL, *data_out = NULL;
1281	void *ctx_in = NULL, *ctx_out = NULL;
1282	unsigned int repeat = 1;
1283	int fd, err;
1284	LIBBPF_OPTS(bpf_test_run_opts, test_attr);
1285
1286	if (!REQ_ARGS(4))
1287		return -1;
1288
1289	fd = prog_parse_fd(&argc, &argv);
1290	if (fd < 0)
1291		return -1;
1292
1293	while (argc) {
1294		if (detect_common_prefix(*argv, "data_in", "data_out",
1295					 "data_size_out", NULL))
1296			return -1;
1297		if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
1298					 "ctx_size_out", NULL))
1299			return -1;
1300
1301		if (is_prefix(*argv, "data_in")) {
1302			NEXT_ARG();
1303			if (!REQ_ARGS(1))
1304				return -1;
1305
1306			data_fname_in = GET_ARG();
1307			if (check_single_stdin(data_fname_in, ctx_fname_in))
1308				return -1;
1309		} else if (is_prefix(*argv, "data_out")) {
1310			NEXT_ARG();
1311			if (!REQ_ARGS(1))
1312				return -1;
1313
1314			data_fname_out = GET_ARG();
1315		} else if (is_prefix(*argv, "data_size_out")) {
1316			char *endptr;
1317
1318			NEXT_ARG();
1319			if (!REQ_ARGS(1))
1320				return -1;
1321
1322			test_attr.data_size_out = strtoul(*argv, &endptr, 0);
1323			if (*endptr) {
1324				p_err("can't parse %s as output data size",
1325				      *argv);
1326				return -1;
1327			}
1328			NEXT_ARG();
1329		} else if (is_prefix(*argv, "ctx_in")) {
1330			NEXT_ARG();
1331			if (!REQ_ARGS(1))
1332				return -1;
1333
1334			ctx_fname_in = GET_ARG();
1335			if (check_single_stdin(data_fname_in, ctx_fname_in))
1336				return -1;
1337		} else if (is_prefix(*argv, "ctx_out")) {
1338			NEXT_ARG();
1339			if (!REQ_ARGS(1))
1340				return -1;
1341
1342			ctx_fname_out = GET_ARG();
1343		} else if (is_prefix(*argv, "ctx_size_out")) {
1344			char *endptr;
1345
1346			NEXT_ARG();
1347			if (!REQ_ARGS(1))
1348				return -1;
1349
1350			test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
1351			if (*endptr) {
1352				p_err("can't parse %s as output context size",
1353				      *argv);
1354				return -1;
1355			}
1356			NEXT_ARG();
1357		} else if (is_prefix(*argv, "repeat")) {
1358			char *endptr;
1359
1360			NEXT_ARG();
1361			if (!REQ_ARGS(1))
1362				return -1;
1363
1364			repeat = strtoul(*argv, &endptr, 0);
1365			if (*endptr) {
1366				p_err("can't parse %s as repeat number",
1367				      *argv);
1368				return -1;
1369			}
1370			NEXT_ARG();
1371		} else {
1372			p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1373			      *argv);
1374			return -1;
1375		}
1376	}
1377
1378	err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
1379	if (err)
1380		return -1;
1381
1382	if (data_in) {
1383		if (!test_attr.data_size_out)
1384			test_attr.data_size_out = default_size;
1385		err = alloc_run_data(&data_out, test_attr.data_size_out);
1386		if (err)
1387			goto free_data_in;
1388	}
1389
1390	err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
1391	if (err)
1392		goto free_data_out;
1393
1394	if (ctx_in) {
1395		if (!test_attr.ctx_size_out)
1396			test_attr.ctx_size_out = default_size;
1397		err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
1398		if (err)
1399			goto free_ctx_in;
1400	}
1401
 
1402	test_attr.repeat	= repeat;
1403	test_attr.data_in	= data_in;
1404	test_attr.data_out	= data_out;
1405	test_attr.ctx_in	= ctx_in;
1406	test_attr.ctx_out	= ctx_out;
1407
1408	err = bpf_prog_test_run_opts(fd, &test_attr);
1409	if (err) {
1410		p_err("failed to run program: %s", strerror(errno));
1411		goto free_ctx_out;
1412	}
1413
1414	err = 0;
1415
1416	if (json_output)
1417		jsonw_start_object(json_wtr);	/* root */
1418
1419	/* Do not exit on errors occurring when printing output data/context,
1420	 * we still want to print return value and duration for program run.
1421	 */
1422	if (test_attr.data_size_out)
1423		err += print_run_output(test_attr.data_out,
1424					test_attr.data_size_out,
1425					data_fname_out, "data_out");
1426	if (test_attr.ctx_size_out)
1427		err += print_run_output(test_attr.ctx_out,
1428					test_attr.ctx_size_out,
1429					ctx_fname_out, "ctx_out");
1430
1431	if (json_output) {
1432		jsonw_uint_field(json_wtr, "retval", test_attr.retval);
1433		jsonw_uint_field(json_wtr, "duration", test_attr.duration);
1434		jsonw_end_object(json_wtr);	/* root */
1435	} else {
1436		fprintf(stdout, "Return value: %u, duration%s: %uns\n",
1437			test_attr.retval,
1438			repeat > 1 ? " (average)" : "", test_attr.duration);
1439	}
1440
1441free_ctx_out:
1442	free(ctx_out);
1443free_ctx_in:
1444	free(ctx_in);
1445free_data_out:
1446	free(data_out);
1447free_data_in:
1448	free(data_in);
1449
1450	return err;
1451}
1452
1453static int
1454get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
1455		      enum bpf_attach_type *expected_attach_type)
1456{
1457	libbpf_print_fn_t print_backup;
1458	int ret;
1459
1460	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1461	if (!ret)
1462		return ret;
1463
1464	/* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1465	print_backup = libbpf_set_print(print_all_levels);
1466	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1467	libbpf_set_print(print_backup);
1468
1469	return ret;
1470}
1471
1472static int
1473auto_attach_program(struct bpf_program *prog, const char *path)
1474{
1475	struct bpf_link *link;
1476	int err;
1477
1478	link = bpf_program__attach(prog);
1479	if (!link) {
1480		p_info("Program %s does not support autoattach, falling back to pinning",
1481		       bpf_program__name(prog));
1482		return bpf_obj_pin(bpf_program__fd(prog), path);
1483	}
1484
1485	err = bpf_link__pin(link, path);
1486	bpf_link__destroy(link);
1487	return err;
1488}
1489
1490static int
1491auto_attach_programs(struct bpf_object *obj, const char *path)
1492{
1493	struct bpf_program *prog;
1494	char buf[PATH_MAX];
1495	int err;
1496
1497	bpf_object__for_each_program(prog, obj) {
1498		err = pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog));
1499		if (err)
1500			goto err_unpin_programs;
1501
1502		err = auto_attach_program(prog, buf);
1503		if (err)
1504			goto err_unpin_programs;
1505	}
1506
1507	return 0;
1508
1509err_unpin_programs:
1510	while ((prog = bpf_object__prev_program(obj, prog))) {
1511		if (pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog)))
1512			continue;
1513
1514		bpf_program__unpin(prog, buf);
1515	}
1516
1517	return err;
1518}
1519
1520static int load_with_options(int argc, char **argv, bool first_prog_only)
1521{
1522	enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
1523	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
1524		.relaxed_maps = relaxed_maps,
1525	);
 
1526	enum bpf_attach_type expected_attach_type;
1527	struct map_replace *map_replace = NULL;
1528	struct bpf_program *prog = NULL, *pos;
1529	unsigned int old_map_fds = 0;
1530	const char *pinmaps = NULL;
1531	__u32 xdpmeta_ifindex = 0;
1532	__u32 offload_ifindex = 0;
1533	bool auto_attach = false;
1534	struct bpf_object *obj;
1535	struct bpf_map *map;
1536	const char *pinfile;
1537	unsigned int i, j;
 
1538	const char *file;
1539	int idx, err;
1540
1541
1542	if (!REQ_ARGS(2))
1543		return -1;
1544	file = GET_ARG();
1545	pinfile = GET_ARG();
1546
1547	while (argc) {
1548		if (is_prefix(*argv, "type")) {
 
 
1549			NEXT_ARG();
1550
1551			if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
1552				p_err("program type already specified");
1553				goto err_free_reuse_maps;
1554			}
1555			if (!REQ_ARGS(1))
1556				goto err_free_reuse_maps;
1557
1558			err = libbpf_prog_type_by_name(*argv, &common_prog_type,
1559						       &expected_attach_type);
1560			if (err < 0) {
1561				/* Put a '/' at the end of type to appease libbpf */
1562				char *type = malloc(strlen(*argv) + 2);
1563
1564				if (!type) {
1565					p_err("mem alloc failed");
1566					goto err_free_reuse_maps;
1567				}
1568				*type = 0;
1569				strcat(type, *argv);
1570				strcat(type, "/");
1571
1572				err = get_prog_type_by_name(type, &common_prog_type,
1573							    &expected_attach_type);
1574				free(type);
1575				if (err < 0)
1576					goto err_free_reuse_maps;
1577			}
 
 
 
 
 
 
 
 
 
1578
1579			NEXT_ARG();
1580		} else if (is_prefix(*argv, "map")) {
1581			void *new_map_replace;
1582			char *endptr, *name;
1583			int fd;
1584
1585			NEXT_ARG();
1586
1587			if (!REQ_ARGS(4))
1588				goto err_free_reuse_maps;
1589
1590			if (is_prefix(*argv, "idx")) {
1591				NEXT_ARG();
1592
1593				idx = strtoul(*argv, &endptr, 0);
1594				if (*endptr) {
1595					p_err("can't parse %s as IDX", *argv);
1596					goto err_free_reuse_maps;
1597				}
1598				name = NULL;
1599			} else if (is_prefix(*argv, "name")) {
1600				NEXT_ARG();
1601
1602				name = *argv;
1603				idx = -1;
1604			} else {
1605				p_err("expected 'idx' or 'name', got: '%s'?",
1606				      *argv);
1607				goto err_free_reuse_maps;
1608			}
1609			NEXT_ARG();
1610
1611			fd = map_parse_fd(&argc, &argv);
1612			if (fd < 0)
1613				goto err_free_reuse_maps;
1614
1615			new_map_replace = libbpf_reallocarray(map_replace,
1616							      old_map_fds + 1,
1617							      sizeof(*map_replace));
1618			if (!new_map_replace) {
1619				p_err("mem alloc failed");
1620				goto err_free_reuse_maps;
1621			}
1622			map_replace = new_map_replace;
1623
1624			map_replace[old_map_fds].idx = idx;
1625			map_replace[old_map_fds].name = name;
1626			map_replace[old_map_fds].fd = fd;
1627			old_map_fds++;
1628		} else if (is_prefix(*argv, "dev")) {
1629			p_info("Warning: 'bpftool prog load [...] dev <ifname>' syntax is deprecated.\n"
1630			       "Going further, please use 'offload_dev <ifname>' to offload program to device.\n"
1631			       "For applications using XDP hints only, use 'xdpmeta_dev <ifname>'.");
1632			goto offload_dev;
1633		} else if (is_prefix(*argv, "offload_dev")) {
1634offload_dev:
1635			NEXT_ARG();
1636
1637			if (offload_ifindex) {
1638				p_err("offload_dev already specified");
1639				goto err_free_reuse_maps;
1640			} else if (xdpmeta_ifindex) {
1641				p_err("xdpmeta_dev and offload_dev are mutually exclusive");
1642				goto err_free_reuse_maps;
1643			}
1644			if (!REQ_ARGS(1))
1645				goto err_free_reuse_maps;
1646
1647			offload_ifindex = if_nametoindex(*argv);
1648			if (!offload_ifindex) {
1649				p_err("unrecognized netdevice '%s': %s",
1650				      *argv, strerror(errno));
1651				goto err_free_reuse_maps;
1652			}
1653			NEXT_ARG();
1654		} else if (is_prefix(*argv, "xdpmeta_dev")) {
1655			NEXT_ARG();
1656
1657			if (xdpmeta_ifindex) {
1658				p_err("xdpmeta_dev already specified");
1659				goto err_free_reuse_maps;
1660			} else if (offload_ifindex) {
1661				p_err("xdpmeta_dev and offload_dev are mutually exclusive");
1662				goto err_free_reuse_maps;
1663			}
1664			if (!REQ_ARGS(1))
1665				goto err_free_reuse_maps;
1666
1667			xdpmeta_ifindex = if_nametoindex(*argv);
1668			if (!xdpmeta_ifindex) {
1669				p_err("unrecognized netdevice '%s': %s",
1670				      *argv, strerror(errno));
1671				goto err_free_reuse_maps;
1672			}
1673			NEXT_ARG();
1674		} else if (is_prefix(*argv, "pinmaps")) {
1675			NEXT_ARG();
1676
1677			if (!REQ_ARGS(1))
1678				goto err_free_reuse_maps;
1679
1680			pinmaps = GET_ARG();
1681		} else if (is_prefix(*argv, "autoattach")) {
1682			auto_attach = true;
1683			NEXT_ARG();
1684		} else {
1685			p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1686			      *argv);
1687			goto err_free_reuse_maps;
1688		}
1689	}
1690
1691	set_max_rlimit();
1692
1693	if (verifier_logs)
1694		/* log_level1 + log_level2 + stats, but not stable UAPI */
1695		open_opts.kernel_log_level = 1 + 2 + 4;
1696
1697	obj = bpf_object__open_file(file, &open_opts);
1698	if (!obj) {
1699		p_err("failed to open object file");
1700		goto err_free_reuse_maps;
1701	}
1702
1703	bpf_object__for_each_program(pos, obj) {
1704		enum bpf_prog_type prog_type = common_prog_type;
1705
1706		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
1707			const char *sec_name = bpf_program__section_name(pos);
1708
1709			err = get_prog_type_by_name(sec_name, &prog_type,
1710						    &expected_attach_type);
1711			if (err < 0)
1712				goto err_close_obj;
1713		}
1714
1715		if (prog_type == BPF_PROG_TYPE_XDP && xdpmeta_ifindex) {
1716			bpf_program__set_flags(pos, BPF_F_XDP_DEV_BOUND_ONLY);
1717			bpf_program__set_ifindex(pos, xdpmeta_ifindex);
1718		} else {
1719			bpf_program__set_ifindex(pos, offload_ifindex);
1720		}
1721		if (bpf_program__type(pos) != prog_type)
1722			bpf_program__set_type(pos, prog_type);
1723		bpf_program__set_expected_attach_type(pos, expected_attach_type);
1724	}
1725
1726	qsort(map_replace, old_map_fds, sizeof(*map_replace),
1727	      map_replace_compar);
1728
1729	/* After the sort maps by name will be first on the list, because they
1730	 * have idx == -1.  Resolve them.
1731	 */
1732	j = 0;
1733	while (j < old_map_fds && map_replace[j].name) {
1734		i = 0;
1735		bpf_object__for_each_map(map, obj) {
1736			if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
1737				map_replace[j].idx = i;
1738				break;
1739			}
1740			i++;
1741		}
1742		if (map_replace[j].idx == -1) {
1743			p_err("unable to find map '%s'", map_replace[j].name);
1744			goto err_close_obj;
1745		}
1746		j++;
1747	}
1748	/* Resort if any names were resolved */
1749	if (j)
1750		qsort(map_replace, old_map_fds, sizeof(*map_replace),
1751		      map_replace_compar);
1752
1753	/* Set ifindex and name reuse */
1754	j = 0;
1755	idx = 0;
1756	bpf_object__for_each_map(map, obj) {
1757		if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
1758			bpf_map__set_ifindex(map, offload_ifindex);
1759
1760		if (j < old_map_fds && idx == map_replace[j].idx) {
1761			err = bpf_map__reuse_fd(map, map_replace[j++].fd);
1762			if (err) {
1763				p_err("unable to set up map reuse: %d", err);
1764				goto err_close_obj;
1765			}
1766
1767			/* Next reuse wants to apply to the same map */
1768			if (j < old_map_fds && map_replace[j].idx == idx) {
1769				p_err("replacement for map idx %d specified more than once",
1770				      idx);
1771				goto err_close_obj;
1772			}
1773		}
1774
1775		idx++;
1776	}
1777	if (j < old_map_fds) {
1778		p_err("map idx '%d' not used", map_replace[j].idx);
1779		goto err_close_obj;
1780	}
1781
1782	err = bpf_object__load(obj);
 
 
 
 
 
1783	if (err) {
1784		p_err("failed to load object file");
1785		goto err_close_obj;
1786	}
1787
1788	if (first_prog_only)
1789		err = mount_bpffs_for_file(pinfile);
1790	else
1791		err = create_and_mount_bpffs_dir(pinfile);
1792	if (err)
1793		goto err_close_obj;
1794
1795	if (first_prog_only) {
1796		prog = bpf_object__next_program(obj, NULL);
1797		if (!prog) {
1798			p_err("object file doesn't contain any bpf program");
1799			goto err_close_obj;
1800		}
1801
1802		if (auto_attach)
1803			err = auto_attach_program(prog, pinfile);
1804		else
1805			err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
1806		if (err) {
1807			p_err("failed to pin program %s",
1808			      bpf_program__section_name(prog));
1809			goto err_close_obj;
1810		}
1811	} else {
1812		if (auto_attach)
1813			err = auto_attach_programs(obj, pinfile);
1814		else
1815			err = bpf_object__pin_programs(obj, pinfile);
1816		if (err) {
1817			p_err("failed to pin all programs");
1818			goto err_close_obj;
1819		}
1820	}
1821
1822	if (pinmaps) {
1823		err = create_and_mount_bpffs_dir(pinmaps);
1824		if (err)
1825			goto err_unpin;
1826
1827		err = bpf_object__pin_maps(obj, pinmaps);
1828		if (err) {
1829			p_err("failed to pin all maps");
1830			goto err_unpin;
1831		}
1832	}
1833
1834	if (json_output)
1835		jsonw_null(json_wtr);
1836
1837	bpf_object__close(obj);
1838	for (i = 0; i < old_map_fds; i++)
1839		close(map_replace[i].fd);
1840	free(map_replace);
1841
1842	return 0;
1843
1844err_unpin:
1845	if (first_prog_only)
1846		unlink(pinfile);
1847	else
1848		bpf_object__unpin_programs(obj, pinfile);
1849err_close_obj:
1850	bpf_object__close(obj);
1851err_free_reuse_maps:
1852	for (i = 0; i < old_map_fds; i++)
1853		close(map_replace[i].fd);
1854	free(map_replace);
1855	return -1;
1856}
1857
1858static int count_open_fds(void)
1859{
1860	DIR *dp = opendir("/proc/self/fd");
1861	struct dirent *de;
1862	int cnt = -3;
1863
1864	if (!dp)
1865		return -1;
1866
1867	while ((de = readdir(dp)))
1868		cnt++;
1869
1870	closedir(dp);
1871	return cnt;
1872}
1873
1874static int try_loader(struct gen_loader_opts *gen)
1875{
1876	struct bpf_load_and_run_opts opts = {};
1877	struct bpf_loader_ctx *ctx;
1878	int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
1879					     sizeof(struct bpf_prog_desc));
1880	int log_buf_sz = (1u << 24) - 1;
1881	int err, fds_before, fd_delta;
1882	char *log_buf = NULL;
1883
1884	ctx = alloca(ctx_sz);
1885	memset(ctx, 0, ctx_sz);
1886	ctx->sz = ctx_sz;
1887	if (verifier_logs) {
1888		ctx->log_level = 1 + 2 + 4;
1889		ctx->log_size = log_buf_sz;
1890		log_buf = malloc(log_buf_sz);
1891		if (!log_buf)
1892			return -ENOMEM;
1893		ctx->log_buf = (long) log_buf;
1894	}
1895	opts.ctx = ctx;
1896	opts.data = gen->data;
1897	opts.data_sz = gen->data_sz;
1898	opts.insns = gen->insns;
1899	opts.insns_sz = gen->insns_sz;
1900	fds_before = count_open_fds();
1901	err = bpf_load_and_run(&opts);
1902	fd_delta = count_open_fds() - fds_before;
1903	if (err < 0 || verifier_logs) {
1904		fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
1905		if (fd_delta && err < 0)
1906			fprintf(stderr, "loader prog leaked %d FDs\n",
1907				fd_delta);
1908	}
1909	free(log_buf);
1910	return err;
1911}
1912
1913static int do_loader(int argc, char **argv)
1914{
1915	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
1916	DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
1917	struct bpf_object *obj;
1918	const char *file;
1919	int err = 0;
1920
1921	if (!REQ_ARGS(1))
1922		return -1;
1923	file = GET_ARG();
1924
1925	if (verifier_logs)
1926		/* log_level1 + log_level2 + stats, but not stable UAPI */
1927		open_opts.kernel_log_level = 1 + 2 + 4;
1928
1929	obj = bpf_object__open_file(file, &open_opts);
1930	if (!obj) {
1931		p_err("failed to open object file");
1932		goto err_close_obj;
1933	}
1934
1935	err = bpf_object__gen_loader(obj, &gen);
1936	if (err)
1937		goto err_close_obj;
1938
1939	err = bpf_object__load(obj);
1940	if (err) {
1941		p_err("failed to load object file");
1942		goto err_close_obj;
1943	}
1944
1945	if (verifier_logs) {
1946		struct dump_data dd = {};
1947
1948		kernel_syms_load(&dd);
1949		dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
1950		kernel_syms_destroy(&dd);
1951	}
1952	err = try_loader(&gen);
1953err_close_obj:
1954	bpf_object__close(obj);
1955	return err;
1956}
1957
1958static int do_load(int argc, char **argv)
1959{
1960	if (use_loader)
1961		return do_loader(argc, argv);
1962	return load_with_options(argc, argv, true);
1963}
1964
1965static int do_loadall(int argc, char **argv)
1966{
1967	return load_with_options(argc, argv, false);
1968}
1969
1970#ifdef BPFTOOL_WITHOUT_SKELETONS
1971
1972static int do_profile(int argc, char **argv)
1973{
1974	p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1975	return 0;
1976}
1977
1978#else /* BPFTOOL_WITHOUT_SKELETONS */
1979
1980#include "profiler.skel.h"
1981
1982struct profile_metric {
1983	const char *name;
1984	struct bpf_perf_event_value val;
1985	struct perf_event_attr attr;
1986	bool selected;
1987
1988	/* calculate ratios like instructions per cycle */
1989	const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
1990	const char *ratio_desc;
1991	const float ratio_mul;
1992} metrics[] = {
1993	{
1994		.name = "cycles",
1995		.attr = {
1996			.type = PERF_TYPE_HARDWARE,
1997			.config = PERF_COUNT_HW_CPU_CYCLES,
1998			.exclude_user = 1,
1999		},
2000	},
2001	{
2002		.name = "instructions",
2003		.attr = {
2004			.type = PERF_TYPE_HARDWARE,
2005			.config = PERF_COUNT_HW_INSTRUCTIONS,
2006			.exclude_user = 1,
2007		},
2008		.ratio_metric = 1,
2009		.ratio_desc = "insns per cycle",
2010		.ratio_mul = 1.0,
2011	},
2012	{
2013		.name = "l1d_loads",
2014		.attr = {
2015			.type = PERF_TYPE_HW_CACHE,
2016			.config =
2017				PERF_COUNT_HW_CACHE_L1D |
2018				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2019				(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
2020			.exclude_user = 1,
2021		},
2022	},
2023	{
2024		.name = "llc_misses",
2025		.attr = {
2026			.type = PERF_TYPE_HW_CACHE,
2027			.config =
2028				PERF_COUNT_HW_CACHE_LL |
2029				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2030				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
2031			.exclude_user = 1
2032		},
2033		.ratio_metric = 2,
2034		.ratio_desc = "LLC misses per million insns",
2035		.ratio_mul = 1e6,
2036	},
2037	{
2038		.name = "itlb_misses",
2039		.attr = {
2040			.type = PERF_TYPE_HW_CACHE,
2041			.config =
2042				PERF_COUNT_HW_CACHE_ITLB |
2043				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2044				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
2045			.exclude_user = 1
2046		},
2047		.ratio_metric = 2,
2048		.ratio_desc = "itlb misses per million insns",
2049		.ratio_mul = 1e6,
2050	},
2051	{
2052		.name = "dtlb_misses",
2053		.attr = {
2054			.type = PERF_TYPE_HW_CACHE,
2055			.config =
2056				PERF_COUNT_HW_CACHE_DTLB |
2057				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2058				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
2059			.exclude_user = 1
2060		},
2061		.ratio_metric = 2,
2062		.ratio_desc = "dtlb misses per million insns",
2063		.ratio_mul = 1e6,
2064	},
2065};
2066
2067static __u64 profile_total_count;
2068
2069#define MAX_NUM_PROFILE_METRICS 4
2070
2071static int profile_parse_metrics(int argc, char **argv)
2072{
2073	unsigned int metric_cnt;
2074	int selected_cnt = 0;
2075	unsigned int i;
2076
2077	metric_cnt = ARRAY_SIZE(metrics);
2078
2079	while (argc > 0) {
2080		for (i = 0; i < metric_cnt; i++) {
2081			if (is_prefix(argv[0], metrics[i].name)) {
2082				if (!metrics[i].selected)
2083					selected_cnt++;
2084				metrics[i].selected = true;
2085				break;
2086			}
2087		}
2088		if (i == metric_cnt) {
2089			p_err("unknown metric %s", argv[0]);
2090			return -1;
2091		}
2092		NEXT_ARG();
2093	}
2094	if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
2095		p_err("too many (%d) metrics, please specify no more than %d metrics at a time",
2096		      selected_cnt, MAX_NUM_PROFILE_METRICS);
2097		return -1;
2098	}
2099	return selected_cnt;
2100}
2101
2102static void profile_read_values(struct profiler_bpf *obj)
2103{
2104	__u32 m, cpu, num_cpu = obj->rodata->num_cpu;
2105	int reading_map_fd, count_map_fd;
2106	__u64 counts[num_cpu];
2107	__u32 key = 0;
2108	int err;
2109
2110	reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
2111	count_map_fd = bpf_map__fd(obj->maps.counts);
2112	if (reading_map_fd < 0 || count_map_fd < 0) {
2113		p_err("failed to get fd for map");
2114		return;
2115	}
2116
2117	err = bpf_map_lookup_elem(count_map_fd, &key, counts);
2118	if (err) {
2119		p_err("failed to read count_map: %s", strerror(errno));
2120		return;
2121	}
2122
2123	profile_total_count = 0;
2124	for (cpu = 0; cpu < num_cpu; cpu++)
2125		profile_total_count += counts[cpu];
2126
2127	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2128		struct bpf_perf_event_value values[num_cpu];
2129
2130		if (!metrics[m].selected)
2131			continue;
2132
2133		err = bpf_map_lookup_elem(reading_map_fd, &key, values);
2134		if (err) {
2135			p_err("failed to read reading_map: %s",
2136			      strerror(errno));
2137			return;
2138		}
2139		for (cpu = 0; cpu < num_cpu; cpu++) {
2140			metrics[m].val.counter += values[cpu].counter;
2141			metrics[m].val.enabled += values[cpu].enabled;
2142			metrics[m].val.running += values[cpu].running;
2143		}
2144		key++;
2145	}
2146}
2147
2148static void profile_print_readings_json(void)
2149{
2150	__u32 m;
2151
2152	jsonw_start_array(json_wtr);
2153	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2154		if (!metrics[m].selected)
2155			continue;
2156		jsonw_start_object(json_wtr);
2157		jsonw_string_field(json_wtr, "metric", metrics[m].name);
2158		jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
2159		jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
2160		jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
2161		jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
2162
2163		jsonw_end_object(json_wtr);
2164	}
2165	jsonw_end_array(json_wtr);
2166}
2167
2168static void profile_print_readings_plain(void)
2169{
2170	__u32 m;
2171
2172	printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
2173	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2174		struct bpf_perf_event_value *val = &metrics[m].val;
2175		int r;
2176
2177		if (!metrics[m].selected)
2178			continue;
2179		printf("%18llu %-20s", val->counter, metrics[m].name);
2180
2181		r = metrics[m].ratio_metric - 1;
2182		if (r >= 0 && metrics[r].selected &&
2183		    metrics[r].val.counter > 0) {
2184			printf("# %8.2f %-30s",
2185			       val->counter * metrics[m].ratio_mul /
2186			       metrics[r].val.counter,
2187			       metrics[m].ratio_desc);
2188		} else {
2189			printf("%-41s", "");
2190		}
2191
2192		if (val->enabled > val->running)
2193			printf("(%4.2f%%)",
2194			       val->running * 100.0 / val->enabled);
2195		printf("\n");
2196	}
2197}
2198
2199static void profile_print_readings(void)
2200{
2201	if (json_output)
2202		profile_print_readings_json();
2203	else
2204		profile_print_readings_plain();
2205}
2206
2207static char *profile_target_name(int tgt_fd)
2208{
2209	struct bpf_func_info func_info;
2210	struct bpf_prog_info info = {};
2211	__u32 info_len = sizeof(info);
2212	const struct btf_type *t;
2213	__u32 func_info_rec_size;
2214	struct btf *btf = NULL;
2215	char *name = NULL;
2216	int err;
2217
2218	err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
2219	if (err) {
2220		p_err("failed to get info for prog FD %d", tgt_fd);
2221		goto out;
 
2222	}
2223
2224	if (info.btf_id == 0) {
 
2225		p_err("prog FD %d doesn't have valid btf", tgt_fd);
2226		goto out;
2227	}
2228
2229	func_info_rec_size = info.func_info_rec_size;
2230	if (info.nr_func_info == 0) {
2231		p_err("found 0 func_info for prog FD %d", tgt_fd);
2232		goto out;
2233	}
2234
2235	memset(&info, 0, sizeof(info));
2236	info.nr_func_info = 1;
2237	info.func_info_rec_size = func_info_rec_size;
2238	info.func_info = ptr_to_u64(&func_info);
2239
2240	err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
2241	if (err) {
2242		p_err("failed to get func_info for prog FD %d", tgt_fd);
2243		goto out;
2244	}
2245
2246	btf = btf__load_from_kernel_by_id(info.btf_id);
2247	if (!btf) {
2248		p_err("failed to load btf for prog FD %d", tgt_fd);
2249		goto out;
2250	}
2251
2252	t = btf__type_by_id(btf, func_info.type_id);
2253	if (!t) {
2254		p_err("btf %d doesn't have type %d",
2255		      info.btf_id, func_info.type_id);
2256		goto out;
2257	}
2258	name = strdup(btf__name_by_offset(btf, t->name_off));
2259out:
2260	btf__free(btf);
2261	return name;
2262}
2263
2264static struct profiler_bpf *profile_obj;
2265static int profile_tgt_fd = -1;
2266static char *profile_tgt_name;
2267static int *profile_perf_events;
2268static int profile_perf_event_cnt;
2269
2270static void profile_close_perf_events(struct profiler_bpf *obj)
2271{
2272	int i;
2273
2274	for (i = profile_perf_event_cnt - 1; i >= 0; i--)
2275		close(profile_perf_events[i]);
2276
2277	free(profile_perf_events);
2278	profile_perf_event_cnt = 0;
2279}
2280
2281static int profile_open_perf_event(int mid, int cpu, int map_fd)
2282{
2283	int pmu_fd;
2284
2285	pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr,
2286			 -1 /*pid*/, cpu, -1 /*group_fd*/, 0);
2287	if (pmu_fd < 0) {
2288		if (errno == ENODEV) {
2289			p_info("cpu %d may be offline, skip %s profiling.",
2290				cpu, metrics[mid].name);
2291			profile_perf_event_cnt++;
2292			return 0;
2293		}
2294		return -1;
2295	}
2296
2297	if (bpf_map_update_elem(map_fd,
2298				&profile_perf_event_cnt,
2299				&pmu_fd, BPF_ANY) ||
2300	    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
2301		close(pmu_fd);
2302		return -1;
2303	}
2304
2305	profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
2306	return 0;
2307}
2308
2309static int profile_open_perf_events(struct profiler_bpf *obj)
2310{
2311	unsigned int cpu, m;
2312	int map_fd;
2313
2314	profile_perf_events = calloc(
2315		obj->rodata->num_cpu * obj->rodata->num_metric, sizeof(int));
2316	if (!profile_perf_events) {
2317		p_err("failed to allocate memory for perf_event array: %s",
2318		      strerror(errno));
2319		return -1;
2320	}
2321	map_fd = bpf_map__fd(obj->maps.events);
2322	if (map_fd < 0) {
2323		p_err("failed to get fd for events map");
2324		return -1;
2325	}
2326
2327	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2328		if (!metrics[m].selected)
2329			continue;
2330		for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
2331			if (profile_open_perf_event(m, cpu, map_fd)) {
 
 
 
 
 
2332				p_err("failed to create event %s on cpu %d",
2333				      metrics[m].name, cpu);
2334				return -1;
2335			}
 
2336		}
2337	}
2338	return 0;
2339}
2340
2341static void profile_print_and_cleanup(void)
2342{
2343	profile_close_perf_events(profile_obj);
2344	profile_read_values(profile_obj);
2345	profile_print_readings();
2346	profiler_bpf__destroy(profile_obj);
2347
2348	close(profile_tgt_fd);
2349	free(profile_tgt_name);
2350}
2351
2352static void int_exit(int signo)
2353{
2354	profile_print_and_cleanup();
2355	exit(0);
2356}
2357
2358static int do_profile(int argc, char **argv)
2359{
2360	int num_metric, num_cpu, err = -1;
2361	struct bpf_program *prog;
2362	unsigned long duration;
2363	char *endptr;
2364
2365	/* we at least need two args for the prog and one metric */
2366	if (!REQ_ARGS(3))
2367		return -EINVAL;
2368
2369	/* parse target fd */
2370	profile_tgt_fd = prog_parse_fd(&argc, &argv);
2371	if (profile_tgt_fd < 0) {
2372		p_err("failed to parse fd");
2373		return -1;
2374	}
2375
2376	/* parse profiling optional duration */
2377	if (argc > 2 && is_prefix(argv[0], "duration")) {
2378		NEXT_ARG();
2379		duration = strtoul(*argv, &endptr, 0);
2380		if (*endptr)
2381			usage();
2382		NEXT_ARG();
2383	} else {
2384		duration = UINT_MAX;
2385	}
2386
2387	num_metric = profile_parse_metrics(argc, argv);
2388	if (num_metric <= 0)
2389		goto out;
2390
2391	num_cpu = libbpf_num_possible_cpus();
2392	if (num_cpu <= 0) {
2393		p_err("failed to identify number of CPUs");
2394		goto out;
2395	}
2396
2397	profile_obj = profiler_bpf__open();
2398	if (!profile_obj) {
2399		p_err("failed to open and/or load BPF object");
2400		goto out;
2401	}
2402
2403	profile_obj->rodata->num_cpu = num_cpu;
2404	profile_obj->rodata->num_metric = num_metric;
2405
2406	/* adjust map sizes */
2407	bpf_map__set_max_entries(profile_obj->maps.events, num_metric * num_cpu);
2408	bpf_map__set_max_entries(profile_obj->maps.fentry_readings, num_metric);
2409	bpf_map__set_max_entries(profile_obj->maps.accum_readings, num_metric);
2410	bpf_map__set_max_entries(profile_obj->maps.counts, 1);
2411
2412	/* change target name */
2413	profile_tgt_name = profile_target_name(profile_tgt_fd);
2414	if (!profile_tgt_name)
2415		goto out;
2416
2417	bpf_object__for_each_program(prog, profile_obj->obj) {
2418		err = bpf_program__set_attach_target(prog, profile_tgt_fd,
2419						     profile_tgt_name);
2420		if (err) {
2421			p_err("failed to set attach target\n");
2422			goto out;
2423		}
2424	}
2425
2426	set_max_rlimit();
2427	err = profiler_bpf__load(profile_obj);
2428	if (err) {
2429		p_err("failed to load profile_obj");
2430		goto out;
2431	}
2432
2433	err = profile_open_perf_events(profile_obj);
2434	if (err)
2435		goto out;
2436
2437	err = profiler_bpf__attach(profile_obj);
2438	if (err) {
2439		p_err("failed to attach profile_obj");
2440		goto out;
2441	}
2442	signal(SIGINT, int_exit);
2443
2444	sleep(duration);
2445	profile_print_and_cleanup();
2446	return 0;
2447
2448out:
2449	profile_close_perf_events(profile_obj);
2450	if (profile_obj)
2451		profiler_bpf__destroy(profile_obj);
2452	close(profile_tgt_fd);
2453	free(profile_tgt_name);
2454	return err;
2455}
2456
2457#endif /* BPFTOOL_WITHOUT_SKELETONS */
2458
2459static int do_help(int argc, char **argv)
2460{
2461	if (json_output) {
2462		jsonw_null(json_wtr);
2463		return 0;
2464	}
2465
2466	fprintf(stderr,
2467		"Usage: %1$s %2$s { show | list } [PROG]\n"
2468		"       %1$s %2$s dump xlated PROG [{ file FILE | [opcodes] [linum] [visual] }]\n"
2469		"       %1$s %2$s dump jited  PROG [{ file FILE | [opcodes] [linum] }]\n"
2470		"       %1$s %2$s pin   PROG FILE\n"
2471		"       %1$s %2$s { load | loadall } OBJ  PATH \\\n"
2472		"                         [type TYPE] [{ offload_dev | xdpmeta_dev } NAME] \\\n"
2473		"                         [map { idx IDX | name NAME } MAP]\\\n"
2474		"                         [pinmaps MAP_DIR]\n"
2475		"                         [autoattach]\n"
2476		"       %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
2477		"       %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
2478		"       %1$s %2$s run PROG \\\n"
2479		"                         data_in FILE \\\n"
2480		"                         [data_out FILE [data_size_out L]] \\\n"
2481		"                         [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
2482		"                         [repeat N]\n"
2483		"       %1$s %2$s profile PROG [duration DURATION] METRICs\n"
2484		"       %1$s %2$s tracelog\n"
2485		"       %1$s %2$s help\n"
2486		"\n"
2487		"       " HELP_SPEC_MAP "\n"
2488		"       " HELP_SPEC_PROGRAM "\n"
2489		"       TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
2490		"                 tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
2491		"                 cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
2492		"                 lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
2493		"                 sk_reuseport | flow_dissector | cgroup/sysctl |\n"
2494		"                 cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
2495		"                 cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
2496		"                 cgroup/connect_unix | cgroup/getpeername4 | cgroup/getpeername6 |\n"
2497		"                 cgroup/getpeername_unix | cgroup/getsockname4 | cgroup/getsockname6 |\n"
2498		"                 cgroup/getsockname_unix | cgroup/sendmsg4 | cgroup/sendmsg6 |\n"
2499		"                 cgroup/sendmsg_unix | cgroup/recvmsg4 | cgroup/recvmsg6 | cgroup/recvmsg_unix |\n"
2500		"                 cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
2501		"                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
2502		"       ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
2503		"                        sk_skb_stream_parser | flow_dissector }\n"
2504		"       METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
2505		"       " HELP_SPEC_OPTIONS " |\n"
2506		"                    {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
2507		"                    {-L|--use-loader} }\n"
2508		"",
2509		bin_name, argv[-2]);
2510
2511	return 0;
2512}
2513
2514static const struct cmd cmds[] = {
2515	{ "show",	do_show },
2516	{ "list",	do_show },
2517	{ "help",	do_help },
2518	{ "dump",	do_dump },
2519	{ "pin",	do_pin },
2520	{ "load",	do_load },
2521	{ "loadall",	do_loadall },
2522	{ "attach",	do_attach },
2523	{ "detach",	do_detach },
2524	{ "tracelog",	do_tracelog },
2525	{ "run",	do_run },
2526	{ "profile",	do_profile },
2527	{ 0 }
2528};
2529
2530int do_prog(int argc, char **argv)
2531{
2532	return cmd_select(cmds, argc, argv, do_help);
2533}
v5.9
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
 
   4#define _GNU_SOURCE
 
   5#include <errno.h>
   6#include <fcntl.h>
   7#include <signal.h>
   8#include <stdarg.h>
   9#include <stdio.h>
  10#include <stdlib.h>
  11#include <string.h>
  12#include <time.h>
  13#include <unistd.h>
  14#include <net/if.h>
  15#include <sys/ioctl.h>
  16#include <sys/types.h>
  17#include <sys/stat.h>
  18#include <sys/syscall.h>
 
  19
  20#include <linux/err.h>
  21#include <linux/perf_event.h>
  22#include <linux/sizes.h>
  23
  24#include <bpf/bpf.h>
  25#include <bpf/btf.h>
 
  26#include <bpf/libbpf.h>
 
 
  27
  28#include "cfg.h"
  29#include "main.h"
  30#include "xlated_dumper.h"
  31
  32const char * const prog_type_name[] = {
  33	[BPF_PROG_TYPE_UNSPEC]			= "unspec",
  34	[BPF_PROG_TYPE_SOCKET_FILTER]		= "socket_filter",
  35	[BPF_PROG_TYPE_KPROBE]			= "kprobe",
  36	[BPF_PROG_TYPE_SCHED_CLS]		= "sched_cls",
  37	[BPF_PROG_TYPE_SCHED_ACT]		= "sched_act",
  38	[BPF_PROG_TYPE_TRACEPOINT]		= "tracepoint",
  39	[BPF_PROG_TYPE_XDP]			= "xdp",
  40	[BPF_PROG_TYPE_PERF_EVENT]		= "perf_event",
  41	[BPF_PROG_TYPE_CGROUP_SKB]		= "cgroup_skb",
  42	[BPF_PROG_TYPE_CGROUP_SOCK]		= "cgroup_sock",
  43	[BPF_PROG_TYPE_LWT_IN]			= "lwt_in",
  44	[BPF_PROG_TYPE_LWT_OUT]			= "lwt_out",
  45	[BPF_PROG_TYPE_LWT_XMIT]		= "lwt_xmit",
  46	[BPF_PROG_TYPE_SOCK_OPS]		= "sock_ops",
  47	[BPF_PROG_TYPE_SK_SKB]			= "sk_skb",
  48	[BPF_PROG_TYPE_CGROUP_DEVICE]		= "cgroup_device",
  49	[BPF_PROG_TYPE_SK_MSG]			= "sk_msg",
  50	[BPF_PROG_TYPE_RAW_TRACEPOINT]		= "raw_tracepoint",
  51	[BPF_PROG_TYPE_CGROUP_SOCK_ADDR]	= "cgroup_sock_addr",
  52	[BPF_PROG_TYPE_LWT_SEG6LOCAL]		= "lwt_seg6local",
  53	[BPF_PROG_TYPE_LIRC_MODE2]		= "lirc_mode2",
  54	[BPF_PROG_TYPE_SK_REUSEPORT]		= "sk_reuseport",
  55	[BPF_PROG_TYPE_FLOW_DISSECTOR]		= "flow_dissector",
  56	[BPF_PROG_TYPE_CGROUP_SYSCTL]		= "cgroup_sysctl",
  57	[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE]	= "raw_tracepoint_writable",
  58	[BPF_PROG_TYPE_CGROUP_SOCKOPT]		= "cgroup_sockopt",
  59	[BPF_PROG_TYPE_TRACING]			= "tracing",
  60	[BPF_PROG_TYPE_STRUCT_OPS]		= "struct_ops",
  61	[BPF_PROG_TYPE_EXT]			= "ext",
  62	[BPF_PROG_TYPE_LSM]			= "lsm",
  63	[BPF_PROG_TYPE_SK_LOOKUP]		= "sk_lookup",
  64};
  65
  66const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name);
  67
  68enum dump_mode {
  69	DUMP_JITED,
  70	DUMP_XLATED,
  71};
  72
 
 
 
 
 
 
 
 
 
 
 
 
  73static const char * const attach_type_strings[] = {
  74	[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
  75	[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
 
  76	[BPF_SK_MSG_VERDICT] = "msg_verdict",
  77	[BPF_FLOW_DISSECTOR] = "flow_dissector",
  78	[__MAX_BPF_ATTACH_TYPE] = NULL,
  79};
  80
 
 
  81static enum bpf_attach_type parse_attach_type(const char *str)
  82{
  83	enum bpf_attach_type type;
  84
  85	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
 
 
 
 
 
 
 
 
  86		if (attach_type_strings[type] &&
  87		    is_prefix(str, attach_type_strings[type]))
  88			return type;
  89	}
  90
  91	return __MAX_BPF_ATTACH_TYPE;
  92}
  93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
  95{
  96	struct timespec real_time_ts, boot_time_ts;
  97	time_t wallclock_secs;
  98	struct tm load_tm;
  99
 100	buf[--size] = '\0';
 101
 102	if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
 103	    clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
 104		perror("Can't read clocks");
 105		snprintf(buf, size, "%llu", nsecs / 1000000000);
 106		return;
 107	}
 108
 109	wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
 110		(real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
 111		1000000000;
 112
 113
 114	if (!localtime_r(&wallclock_secs, &load_tm)) {
 115		snprintf(buf, size, "%llu", nsecs / 1000000000);
 116		return;
 117	}
 118
 119	if (json_output)
 120		strftime(buf, size, "%s", &load_tm);
 121	else
 122		strftime(buf, size, "%FT%T%z", &load_tm);
 123}
 124
 125static void show_prog_maps(int fd, __u32 num_maps)
 126{
 127	struct bpf_prog_info info = {};
 128	__u32 len = sizeof(info);
 129	__u32 map_ids[num_maps];
 130	unsigned int i;
 131	int err;
 132
 133	info.nr_map_ids = num_maps;
 134	info.map_ids = ptr_to_u64(map_ids);
 135
 136	err = bpf_obj_get_info_by_fd(fd, &info, &len);
 137	if (err || !info.nr_map_ids)
 138		return;
 139
 140	if (json_output) {
 141		jsonw_name(json_wtr, "map_ids");
 142		jsonw_start_array(json_wtr);
 143		for (i = 0; i < info.nr_map_ids; i++)
 144			jsonw_uint(json_wtr, map_ids[i]);
 145		jsonw_end_array(json_wtr);
 146	} else {
 147		printf("  map_ids ");
 148		for (i = 0; i < info.nr_map_ids; i++)
 149			printf("%u%s", map_ids[i],
 150			       i == info.nr_map_ids - 1 ? "" : ",");
 151	}
 152}
 153
 154static void print_prog_header_json(struct bpf_prog_info *info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 155{
 
 
 
 156	jsonw_uint_field(json_wtr, "id", info->id);
 157	if (info->type < ARRAY_SIZE(prog_type_name))
 158		jsonw_string_field(json_wtr, "type",
 159				   prog_type_name[info->type]);
 
 160	else
 161		jsonw_uint_field(json_wtr, "type", info->type);
 162
 163	if (*info->name)
 164		jsonw_string_field(json_wtr, "name", info->name);
 
 
 165
 166	jsonw_name(json_wtr, "tag");
 167	jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
 168		     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
 169		     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
 170
 171	jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
 172	if (info->run_time_ns) {
 173		jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
 174		jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
 175	}
 
 
 176}
 177
 178static void print_prog_json(struct bpf_prog_info *info, int fd)
 179{
 180	char *memlock;
 181
 182	jsonw_start_object(json_wtr);
 183	print_prog_header_json(info);
 184	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
 185
 186	if (info->load_time) {
 187		char buf[32];
 188
 189		print_boot_time(info->load_time, buf, sizeof(buf));
 190
 191		/* Piggy back on load_time, since 0 uid is a valid one */
 192		jsonw_name(json_wtr, "loaded_at");
 193		jsonw_printf(json_wtr, "%s", buf);
 194		jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
 195	}
 196
 
 197	jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
 198
 199	if (info->jited_prog_len) {
 200		jsonw_bool_field(json_wtr, "jited", true);
 201		jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
 202	} else {
 203		jsonw_bool_field(json_wtr, "jited", false);
 204	}
 205
 206	memlock = get_fdinfo(fd, "memlock");
 207	if (memlock)
 208		jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
 209	free(memlock);
 210
 211	if (info->nr_map_ids)
 212		show_prog_maps(fd, info->nr_map_ids);
 213
 214	if (info->btf_id)
 215		jsonw_int_field(json_wtr, "btf_id", info->btf_id);
 216
 217	if (!hash_empty(prog_table.table)) {
 218		struct pinned_obj *obj;
 219
 220		jsonw_name(json_wtr, "pinned");
 221		jsonw_start_array(json_wtr);
 222		hash_for_each_possible(prog_table.table, obj, hash, info->id) {
 223			if (obj->id == info->id)
 224				jsonw_string(json_wtr, obj->path);
 225		}
 226		jsonw_end_array(json_wtr);
 227	}
 228
 229	emit_obj_refs_json(&refs_table, info->id, json_wtr);
 
 
 230
 231	jsonw_end_object(json_wtr);
 232}
 233
 234static void print_prog_header_plain(struct bpf_prog_info *info)
 235{
 
 
 
 236	printf("%u: ", info->id);
 237	if (info->type < ARRAY_SIZE(prog_type_name))
 238		printf("%s  ", prog_type_name[info->type]);
 
 239	else
 240		printf("type %u  ", info->type);
 241
 242	if (*info->name)
 243		printf("name %s  ", info->name);
 
 
 244
 245	printf("tag ");
 246	fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
 247	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
 248	printf("%s", info->gpl_compatible ? "  gpl" : "");
 249	if (info->run_time_ns)
 250		printf(" run_time_ns %lld run_cnt %lld",
 251		       info->run_time_ns, info->run_cnt);
 
 
 252	printf("\n");
 253}
 254
 255static void print_prog_plain(struct bpf_prog_info *info, int fd)
 256{
 257	char *memlock;
 258
 259	print_prog_header_plain(info);
 260
 261	if (info->load_time) {
 262		char buf[32];
 263
 264		print_boot_time(info->load_time, buf, sizeof(buf));
 265
 266		/* Piggy back on load_time, since 0 uid is a valid one */
 267		printf("\tloaded_at %s  uid %u\n", buf, info->created_by_uid);
 268	}
 269
 270	printf("\txlated %uB", info->xlated_prog_len);
 271
 272	if (info->jited_prog_len)
 273		printf("  jited %uB", info->jited_prog_len);
 274	else
 275		printf("  not jited");
 276
 277	memlock = get_fdinfo(fd, "memlock");
 278	if (memlock)
 279		printf("  memlock %sB", memlock);
 280	free(memlock);
 281
 
 
 
 282	if (info->nr_map_ids)
 283		show_prog_maps(fd, info->nr_map_ids);
 284
 285	if (!hash_empty(prog_table.table)) {
 286		struct pinned_obj *obj;
 287
 288		hash_for_each_possible(prog_table.table, obj, hash, info->id) {
 289			if (obj->id == info->id)
 290				printf("\n\tpinned %s", obj->path);
 291		}
 292	}
 293
 294	if (info->btf_id)
 295		printf("\n\tbtf_id %d", info->btf_id);
 296
 297	emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
 298
 299	printf("\n");
 
 
 300}
 301
 302static int show_prog(int fd)
 303{
 304	struct bpf_prog_info info = {};
 305	__u32 len = sizeof(info);
 306	int err;
 307
 308	err = bpf_obj_get_info_by_fd(fd, &info, &len);
 309	if (err) {
 310		p_err("can't get prog info: %s", strerror(errno));
 311		return -1;
 312	}
 313
 314	if (json_output)
 315		print_prog_json(&info, fd);
 316	else
 317		print_prog_plain(&info, fd);
 318
 319	return 0;
 320}
 321
 322static int do_show_subset(int argc, char **argv)
 323{
 324	int *fds = NULL;
 325	int nb_fds, i;
 326	int err = -1;
 327
 328	fds = malloc(sizeof(int));
 329	if (!fds) {
 330		p_err("mem alloc failed");
 331		return -1;
 332	}
 333	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 334	if (nb_fds < 1)
 335		goto exit_free;
 336
 337	if (json_output && nb_fds > 1)
 338		jsonw_start_array(json_wtr);	/* root array */
 339	for (i = 0; i < nb_fds; i++) {
 340		err = show_prog(fds[i]);
 341		if (err) {
 342			for (; i < nb_fds; i++)
 343				close(fds[i]);
 344			break;
 345		}
 346		close(fds[i]);
 347	}
 348	if (json_output && nb_fds > 1)
 349		jsonw_end_array(json_wtr);	/* root array */
 350
 351exit_free:
 352	free(fds);
 353	return err;
 354}
 355
 356static int do_show(int argc, char **argv)
 357{
 358	__u32 id = 0;
 359	int err;
 360	int fd;
 361
 362	if (show_pinned)
 363		build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
 
 
 
 
 
 
 
 364	build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
 365
 366	if (argc == 2)
 367		return do_show_subset(argc, argv);
 368
 369	if (argc)
 370		return BAD_ARG();
 371
 372	if (json_output)
 373		jsonw_start_array(json_wtr);
 374	while (true) {
 375		err = bpf_prog_get_next_id(id, &id);
 376		if (err) {
 377			if (errno == ENOENT) {
 378				err = 0;
 379				break;
 380			}
 381			p_err("can't get next program: %s%s", strerror(errno),
 382			      errno == EINVAL ? " -- kernel too old?" : "");
 383			err = -1;
 384			break;
 385		}
 386
 387		fd = bpf_prog_get_fd_by_id(id);
 388		if (fd < 0) {
 389			if (errno == ENOENT)
 390				continue;
 391			p_err("can't get prog by id (%u): %s",
 392			      id, strerror(errno));
 393			err = -1;
 394			break;
 395		}
 396
 397		err = show_prog(fd);
 398		close(fd);
 399		if (err)
 400			break;
 401	}
 402
 403	if (json_output)
 404		jsonw_end_array(json_wtr);
 405
 406	delete_obj_refs_table(&refs_table);
 
 
 
 407
 408	return err;
 409}
 410
 411static int
 412prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
 413	  char *filepath, bool opcodes, bool visual, bool linum)
 414{
 415	struct bpf_prog_linfo *prog_linfo = NULL;
 416	const char *disasm_opt = NULL;
 417	struct dump_data dd = {};
 418	void *func_info = NULL;
 419	struct btf *btf = NULL;
 420	char func_sig[1024];
 421	unsigned char *buf;
 422	__u32 member_len;
 
 423	ssize_t n;
 424	int fd;
 425
 426	if (mode == DUMP_JITED) {
 427		if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
 428			p_info("no instructions returned");
 429			return -1;
 430		}
 431		buf = u64_to_ptr(info->jited_prog_insns);
 432		member_len = info->jited_prog_len;
 433	} else {	/* DUMP_XLATED */
 434		if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
 435			p_err("error retrieving insn dump: kernel.kptr_restrict set?");
 436			return -1;
 437		}
 438		buf = u64_to_ptr(info->xlated_prog_insns);
 439		member_len = info->xlated_prog_len;
 440	}
 441
 442	if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
 443		p_err("failed to get btf");
 444		return -1;
 
 
 
 445	}
 446
 447	func_info = u64_to_ptr(info->func_info);
 448
 449	if (info->nr_line_info) {
 450		prog_linfo = bpf_prog_linfo__new(info);
 451		if (!prog_linfo)
 452			p_info("error in processing bpf_line_info.  continue without it.");
 453	}
 454
 455	if (filepath) {
 456		fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 457		if (fd < 0) {
 458			p_err("can't open file %s: %s", filepath,
 459			      strerror(errno));
 460			return -1;
 461		}
 462
 463		n = write(fd, buf, member_len);
 464		close(fd);
 465		if (n != (ssize_t)member_len) {
 466			p_err("error writing output file: %s",
 467			      n < 0 ? strerror(errno) : "short write");
 468			return -1;
 469		}
 470
 471		if (json_output)
 472			jsonw_null(json_wtr);
 473	} else if (mode == DUMP_JITED) {
 474		const char *name = NULL;
 475
 476		if (info->ifindex) {
 477			name = ifindex_to_bfd_params(info->ifindex,
 478						     info->netns_dev,
 479						     info->netns_ino,
 480						     &disasm_opt);
 481			if (!name)
 482				return -1;
 483		}
 484
 485		if (info->nr_jited_func_lens && info->jited_func_lens) {
 486			struct kernel_sym *sym = NULL;
 487			struct bpf_func_info *record;
 488			char sym_name[SYM_MAX_NAME];
 489			unsigned char *img = buf;
 490			__u64 *ksyms = NULL;
 491			__u32 *lens;
 492			__u32 i;
 493			if (info->nr_jited_ksyms) {
 494				kernel_syms_load(&dd);
 495				ksyms = u64_to_ptr(info->jited_ksyms);
 496			}
 497
 498			if (json_output)
 499				jsonw_start_array(json_wtr);
 500
 501			lens = u64_to_ptr(info->jited_func_lens);
 502			for (i = 0; i < info->nr_jited_func_lens; i++) {
 503				if (ksyms) {
 504					sym = kernel_syms_search(&dd, ksyms[i]);
 505					if (sym)
 506						sprintf(sym_name, "%s", sym->name);
 507					else
 508						sprintf(sym_name, "0x%016llx", ksyms[i]);
 509				} else {
 510					strcpy(sym_name, "unknown");
 511				}
 512
 513				if (func_info) {
 514					record = func_info + i * info->func_info_rec_size;
 515					btf_dumper_type_only(btf, record->type_id,
 516							     func_sig,
 517							     sizeof(func_sig));
 518				}
 519
 520				if (json_output) {
 521					jsonw_start_object(json_wtr);
 522					if (func_info && func_sig[0] != '\0') {
 523						jsonw_name(json_wtr, "proto");
 524						jsonw_string(json_wtr, func_sig);
 525					}
 526					jsonw_name(json_wtr, "name");
 527					jsonw_string(json_wtr, sym_name);
 528					jsonw_name(json_wtr, "insns");
 529				} else {
 530					if (func_info && func_sig[0] != '\0')
 531						printf("%s:\n", func_sig);
 532					printf("%s:\n", sym_name);
 533				}
 534
 535				disasm_print_insn(img, lens[i], opcodes,
 536						  name, disasm_opt, btf,
 537						  prog_linfo, ksyms[i], i,
 538						  linum);
 
 
 
 
 
 
 
 
 539
 540				img += lens[i];
 541
 542				if (json_output)
 543					jsonw_end_object(json_wtr);
 544				else
 545					printf("\n");
 546			}
 547
 548			if (json_output)
 549				jsonw_end_array(json_wtr);
 550		} else {
 551			disasm_print_insn(buf, member_len, opcodes, name,
 552					  disasm_opt, btf, NULL, 0, 0, false);
 
 
 553		}
 554	} else if (visual) {
 555		if (json_output)
 556			jsonw_null(json_wtr);
 557		else
 558			dump_xlated_cfg(buf, member_len);
 559	} else {
 560		kernel_syms_load(&dd);
 561		dd.nr_jited_ksyms = info->nr_jited_ksyms;
 562		dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
 563		dd.btf = btf;
 564		dd.func_info = func_info;
 565		dd.finfo_rec_size = info->func_info_rec_size;
 566		dd.prog_linfo = prog_linfo;
 567
 568		if (json_output)
 569			dump_xlated_json(&dd, buf, member_len, opcodes,
 570					 linum);
 
 571		else
 572			dump_xlated_plain(&dd, buf, member_len, opcodes,
 573					  linum);
 574		kernel_syms_destroy(&dd);
 575	}
 576
 577	return 0;
 
 
 
 
 
 578}
 579
 580static int do_dump(int argc, char **argv)
 581{
 582	struct bpf_prog_info_linear *info_linear;
 
 
 
 583	char *filepath = NULL;
 584	bool opcodes = false;
 585	bool visual = false;
 586	enum dump_mode mode;
 587	bool linum = false;
 
 588	int *fds = NULL;
 589	int nb_fds, i = 0;
 590	int err = -1;
 591	__u64 arrays;
 592
 593	if (is_prefix(*argv, "jited")) {
 594		if (disasm_init())
 595			return -1;
 596		mode = DUMP_JITED;
 597	} else if (is_prefix(*argv, "xlated")) {
 598		mode = DUMP_XLATED;
 599	} else {
 600		p_err("expected 'xlated' or 'jited', got: %s", *argv);
 601		return -1;
 602	}
 603	NEXT_ARG();
 604
 605	if (argc < 2)
 606		usage();
 607
 608	fds = malloc(sizeof(int));
 609	if (!fds) {
 610		p_err("mem alloc failed");
 611		return -1;
 612	}
 613	nb_fds = prog_parse_fds(&argc, &argv, &fds);
 614	if (nb_fds < 1)
 615		goto exit_free;
 616
 617	if (is_prefix(*argv, "file")) {
 618		NEXT_ARG();
 619		if (!argc) {
 620			p_err("expected file path");
 621			goto exit_close;
 622		}
 623		if (nb_fds > 1) {
 624			p_err("several programs matched");
 625			goto exit_close;
 626		}
 
 
 
 
 
 
 
 
 
 
 
 
 627
 628		filepath = *argv;
 629		NEXT_ARG();
 630	} else if (is_prefix(*argv, "opcodes")) {
 631		opcodes = true;
 632		NEXT_ARG();
 633	} else if (is_prefix(*argv, "visual")) {
 634		if (nb_fds > 1) {
 635			p_err("several programs matched");
 636			goto exit_close;
 637		}
 
 638
 639		visual = true;
 640		NEXT_ARG();
 641	} else if (is_prefix(*argv, "linum")) {
 642		linum = true;
 643		NEXT_ARG();
 644	}
 645
 646	if (argc) {
 647		usage();
 648		goto exit_close;
 649	}
 650
 651	if (mode == DUMP_JITED)
 652		arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
 653	else
 654		arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
 655
 656	arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
 657	arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
 658	arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
 659	arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
 660	arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 661
 662	if (json_output && nb_fds > 1)
 663		jsonw_start_array(json_wtr);	/* root array */
 664	for (i = 0; i < nb_fds; i++) {
 665		info_linear = bpf_program__get_prog_info_linear(fds[i], arrays);
 666		if (IS_ERR_OR_NULL(info_linear)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667			p_err("can't get prog info: %s", strerror(errno));
 668			break;
 669		}
 670
 671		if (json_output && nb_fds > 1) {
 672			jsonw_start_object(json_wtr);	/* prog object */
 673			print_prog_header_json(&info_linear->info);
 674			jsonw_name(json_wtr, "insns");
 675		} else if (nb_fds > 1) {
 676			print_prog_header_plain(&info_linear->info);
 677		}
 678
 679		err = prog_dump(&info_linear->info, mode, filepath, opcodes,
 680				visual, linum);
 681
 682		if (json_output && nb_fds > 1)
 683			jsonw_end_object(json_wtr);	/* prog object */
 684		else if (i != nb_fds - 1 && nb_fds > 1)
 685			printf("\n");
 686
 687		free(info_linear);
 688		if (err)
 689			break;
 690		close(fds[i]);
 691	}
 692	if (json_output && nb_fds > 1)
 693		jsonw_end_array(json_wtr);	/* root array */
 694
 695exit_close:
 696	for (; i < nb_fds; i++)
 697		close(fds[i]);
 698exit_free:
 
 699	free(fds);
 700	return err;
 701}
 702
 703static int do_pin(int argc, char **argv)
 704{
 705	int err;
 706
 707	err = do_pin_any(argc, argv, prog_parse_fd);
 708	if (!err && json_output)
 709		jsonw_null(json_wtr);
 710	return err;
 711}
 712
 713struct map_replace {
 714	int idx;
 715	int fd;
 716	char *name;
 717};
 718
 719static int map_replace_compar(const void *p1, const void *p2)
 720{
 721	const struct map_replace *a = p1, *b = p2;
 722
 723	return a->idx - b->idx;
 724}
 725
 726static int parse_attach_detach_args(int argc, char **argv, int *progfd,
 727				    enum bpf_attach_type *attach_type,
 728				    int *mapfd)
 729{
 730	if (!REQ_ARGS(3))
 731		return -EINVAL;
 732
 733	*progfd = prog_parse_fd(&argc, &argv);
 734	if (*progfd < 0)
 735		return *progfd;
 736
 737	*attach_type = parse_attach_type(*argv);
 738	if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
 739		p_err("invalid attach/detach type");
 740		return -EINVAL;
 741	}
 742
 743	if (*attach_type == BPF_FLOW_DISSECTOR) {
 744		*mapfd = -1;
 745		return 0;
 746	}
 747
 748	NEXT_ARG();
 749	if (!REQ_ARGS(2))
 750		return -EINVAL;
 751
 752	*mapfd = map_parse_fd(&argc, &argv);
 753	if (*mapfd < 0)
 754		return *mapfd;
 755
 756	return 0;
 757}
 758
 759static int do_attach(int argc, char **argv)
 760{
 761	enum bpf_attach_type attach_type;
 762	int err, progfd;
 763	int mapfd;
 764
 765	err = parse_attach_detach_args(argc, argv,
 766				       &progfd, &attach_type, &mapfd);
 767	if (err)
 768		return err;
 769
 770	err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
 771	if (err) {
 772		p_err("failed prog attach to map");
 773		return -EINVAL;
 774	}
 775
 776	if (json_output)
 777		jsonw_null(json_wtr);
 778	return 0;
 779}
 780
 781static int do_detach(int argc, char **argv)
 782{
 783	enum bpf_attach_type attach_type;
 784	int err, progfd;
 785	int mapfd;
 786
 787	err = parse_attach_detach_args(argc, argv,
 788				       &progfd, &attach_type, &mapfd);
 789	if (err)
 790		return err;
 791
 792	err = bpf_prog_detach2(progfd, mapfd, attach_type);
 793	if (err) {
 794		p_err("failed prog detach from map");
 795		return -EINVAL;
 796	}
 797
 798	if (json_output)
 799		jsonw_null(json_wtr);
 800	return 0;
 801}
 802
 803static int check_single_stdin(char *file_data_in, char *file_ctx_in)
 804{
 805	if (file_data_in && file_ctx_in &&
 806	    !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
 807		p_err("cannot use standard input for both data_in and ctx_in");
 808		return -1;
 809	}
 810
 811	return 0;
 812}
 813
 814static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
 815{
 816	size_t block_size = 256;
 817	size_t buf_size = block_size;
 818	size_t nb_read = 0;
 819	void *tmp;
 820	FILE *f;
 821
 822	if (!fname) {
 823		*data_ptr = NULL;
 824		*size = 0;
 825		return 0;
 826	}
 827
 828	if (!strcmp(fname, "-"))
 829		f = stdin;
 830	else
 831		f = fopen(fname, "r");
 832	if (!f) {
 833		p_err("failed to open %s: %s", fname, strerror(errno));
 834		return -1;
 835	}
 836
 837	*data_ptr = malloc(block_size);
 838	if (!*data_ptr) {
 839		p_err("failed to allocate memory for data_in/ctx_in: %s",
 840		      strerror(errno));
 841		goto err_fclose;
 842	}
 843
 844	while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
 845		if (feof(f))
 846			break;
 847		if (ferror(f)) {
 848			p_err("failed to read data_in/ctx_in from %s: %s",
 849			      fname, strerror(errno));
 850			goto err_free;
 851		}
 852		if (nb_read > buf_size - block_size) {
 853			if (buf_size == UINT32_MAX) {
 854				p_err("data_in/ctx_in is too long (max: %d)",
 855				      UINT32_MAX);
 856				goto err_free;
 857			}
 858			/* No space for fread()-ing next chunk; realloc() */
 859			buf_size *= 2;
 860			tmp = realloc(*data_ptr, buf_size);
 861			if (!tmp) {
 862				p_err("failed to reallocate data_in/ctx_in: %s",
 863				      strerror(errno));
 864				goto err_free;
 865			}
 866			*data_ptr = tmp;
 867		}
 868	}
 869	if (f != stdin)
 870		fclose(f);
 871
 872	*size = nb_read;
 873	return 0;
 874
 875err_free:
 876	free(*data_ptr);
 877	*data_ptr = NULL;
 878err_fclose:
 879	if (f != stdin)
 880		fclose(f);
 881	return -1;
 882}
 883
 884static void hex_print(void *data, unsigned int size, FILE *f)
 885{
 886	size_t i, j;
 887	char c;
 888
 889	for (i = 0; i < size; i += 16) {
 890		/* Row offset */
 891		fprintf(f, "%07zx\t", i);
 892
 893		/* Hexadecimal values */
 894		for (j = i; j < i + 16 && j < size; j++)
 895			fprintf(f, "%02x%s", *(uint8_t *)(data + j),
 896				j % 2 ? " " : "");
 897		for (; j < i + 16; j++)
 898			fprintf(f, "  %s", j % 2 ? " " : "");
 899
 900		/* ASCII values (if relevant), '.' otherwise */
 901		fprintf(f, "| ");
 902		for (j = i; j < i + 16 && j < size; j++) {
 903			c = *(char *)(data + j);
 904			if (c < ' ' || c > '~')
 905				c = '.';
 906			fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
 907		}
 908
 909		fprintf(f, "\n");
 910	}
 911}
 912
 913static int
 914print_run_output(void *data, unsigned int size, const char *fname,
 915		 const char *json_key)
 916{
 917	size_t nb_written;
 918	FILE *f;
 919
 920	if (!fname)
 921		return 0;
 922
 923	if (!strcmp(fname, "-")) {
 924		f = stdout;
 925		if (json_output) {
 926			jsonw_name(json_wtr, json_key);
 927			print_data_json(data, size);
 928		} else {
 929			hex_print(data, size, f);
 930		}
 931		return 0;
 932	}
 933
 934	f = fopen(fname, "w");
 935	if (!f) {
 936		p_err("failed to open %s: %s", fname, strerror(errno));
 937		return -1;
 938	}
 939
 940	nb_written = fwrite(data, 1, size, f);
 941	fclose(f);
 942	if (nb_written != size) {
 943		p_err("failed to write output data/ctx: %s", strerror(errno));
 944		return -1;
 945	}
 946
 947	return 0;
 948}
 949
 950static int alloc_run_data(void **data_ptr, unsigned int size_out)
 951{
 952	*data_ptr = calloc(size_out, 1);
 953	if (!*data_ptr) {
 954		p_err("failed to allocate memory for output data/ctx: %s",
 955		      strerror(errno));
 956		return -1;
 957	}
 958
 959	return 0;
 960}
 961
 962static int do_run(int argc, char **argv)
 963{
 964	char *data_fname_in = NULL, *data_fname_out = NULL;
 965	char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
 966	struct bpf_prog_test_run_attr test_attr = {0};
 967	const unsigned int default_size = SZ_32K;
 968	void *data_in = NULL, *data_out = NULL;
 969	void *ctx_in = NULL, *ctx_out = NULL;
 970	unsigned int repeat = 1;
 971	int fd, err;
 
 972
 973	if (!REQ_ARGS(4))
 974		return -1;
 975
 976	fd = prog_parse_fd(&argc, &argv);
 977	if (fd < 0)
 978		return -1;
 979
 980	while (argc) {
 981		if (detect_common_prefix(*argv, "data_in", "data_out",
 982					 "data_size_out", NULL))
 983			return -1;
 984		if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
 985					 "ctx_size_out", NULL))
 986			return -1;
 987
 988		if (is_prefix(*argv, "data_in")) {
 989			NEXT_ARG();
 990			if (!REQ_ARGS(1))
 991				return -1;
 992
 993			data_fname_in = GET_ARG();
 994			if (check_single_stdin(data_fname_in, ctx_fname_in))
 995				return -1;
 996		} else if (is_prefix(*argv, "data_out")) {
 997			NEXT_ARG();
 998			if (!REQ_ARGS(1))
 999				return -1;
1000
1001			data_fname_out = GET_ARG();
1002		} else if (is_prefix(*argv, "data_size_out")) {
1003			char *endptr;
1004
1005			NEXT_ARG();
1006			if (!REQ_ARGS(1))
1007				return -1;
1008
1009			test_attr.data_size_out = strtoul(*argv, &endptr, 0);
1010			if (*endptr) {
1011				p_err("can't parse %s as output data size",
1012				      *argv);
1013				return -1;
1014			}
1015			NEXT_ARG();
1016		} else if (is_prefix(*argv, "ctx_in")) {
1017			NEXT_ARG();
1018			if (!REQ_ARGS(1))
1019				return -1;
1020
1021			ctx_fname_in = GET_ARG();
1022			if (check_single_stdin(data_fname_in, ctx_fname_in))
1023				return -1;
1024		} else if (is_prefix(*argv, "ctx_out")) {
1025			NEXT_ARG();
1026			if (!REQ_ARGS(1))
1027				return -1;
1028
1029			ctx_fname_out = GET_ARG();
1030		} else if (is_prefix(*argv, "ctx_size_out")) {
1031			char *endptr;
1032
1033			NEXT_ARG();
1034			if (!REQ_ARGS(1))
1035				return -1;
1036
1037			test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
1038			if (*endptr) {
1039				p_err("can't parse %s as output context size",
1040				      *argv);
1041				return -1;
1042			}
1043			NEXT_ARG();
1044		} else if (is_prefix(*argv, "repeat")) {
1045			char *endptr;
1046
1047			NEXT_ARG();
1048			if (!REQ_ARGS(1))
1049				return -1;
1050
1051			repeat = strtoul(*argv, &endptr, 0);
1052			if (*endptr) {
1053				p_err("can't parse %s as repeat number",
1054				      *argv);
1055				return -1;
1056			}
1057			NEXT_ARG();
1058		} else {
1059			p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1060			      *argv);
1061			return -1;
1062		}
1063	}
1064
1065	err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
1066	if (err)
1067		return -1;
1068
1069	if (data_in) {
1070		if (!test_attr.data_size_out)
1071			test_attr.data_size_out = default_size;
1072		err = alloc_run_data(&data_out, test_attr.data_size_out);
1073		if (err)
1074			goto free_data_in;
1075	}
1076
1077	err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
1078	if (err)
1079		goto free_data_out;
1080
1081	if (ctx_in) {
1082		if (!test_attr.ctx_size_out)
1083			test_attr.ctx_size_out = default_size;
1084		err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
1085		if (err)
1086			goto free_ctx_in;
1087	}
1088
1089	test_attr.prog_fd	= fd;
1090	test_attr.repeat	= repeat;
1091	test_attr.data_in	= data_in;
1092	test_attr.data_out	= data_out;
1093	test_attr.ctx_in	= ctx_in;
1094	test_attr.ctx_out	= ctx_out;
1095
1096	err = bpf_prog_test_run_xattr(&test_attr);
1097	if (err) {
1098		p_err("failed to run program: %s", strerror(errno));
1099		goto free_ctx_out;
1100	}
1101
1102	err = 0;
1103
1104	if (json_output)
1105		jsonw_start_object(json_wtr);	/* root */
1106
1107	/* Do not exit on errors occurring when printing output data/context,
1108	 * we still want to print return value and duration for program run.
1109	 */
1110	if (test_attr.data_size_out)
1111		err += print_run_output(test_attr.data_out,
1112					test_attr.data_size_out,
1113					data_fname_out, "data_out");
1114	if (test_attr.ctx_size_out)
1115		err += print_run_output(test_attr.ctx_out,
1116					test_attr.ctx_size_out,
1117					ctx_fname_out, "ctx_out");
1118
1119	if (json_output) {
1120		jsonw_uint_field(json_wtr, "retval", test_attr.retval);
1121		jsonw_uint_field(json_wtr, "duration", test_attr.duration);
1122		jsonw_end_object(json_wtr);	/* root */
1123	} else {
1124		fprintf(stdout, "Return value: %u, duration%s: %uns\n",
1125			test_attr.retval,
1126			repeat > 1 ? " (average)" : "", test_attr.duration);
1127	}
1128
1129free_ctx_out:
1130	free(ctx_out);
1131free_ctx_in:
1132	free(ctx_in);
1133free_data_out:
1134	free(data_out);
1135free_data_in:
1136	free(data_in);
1137
1138	return err;
1139}
1140
1141static int
1142get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
1143		      enum bpf_attach_type *expected_attach_type)
1144{
1145	libbpf_print_fn_t print_backup;
1146	int ret;
1147
1148	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1149	if (!ret)
1150		return ret;
1151
1152	/* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1153	print_backup = libbpf_set_print(print_all_levels);
1154	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1155	libbpf_set_print(print_backup);
1156
1157	return ret;
1158}
1159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1160static int load_with_options(int argc, char **argv, bool first_prog_only)
1161{
1162	enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
1163	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
1164		.relaxed_maps = relaxed_maps,
1165	);
1166	struct bpf_object_load_attr load_attr = { 0 };
1167	enum bpf_attach_type expected_attach_type;
1168	struct map_replace *map_replace = NULL;
1169	struct bpf_program *prog = NULL, *pos;
1170	unsigned int old_map_fds = 0;
1171	const char *pinmaps = NULL;
 
 
 
1172	struct bpf_object *obj;
1173	struct bpf_map *map;
1174	const char *pinfile;
1175	unsigned int i, j;
1176	__u32 ifindex = 0;
1177	const char *file;
1178	int idx, err;
1179
1180
1181	if (!REQ_ARGS(2))
1182		return -1;
1183	file = GET_ARG();
1184	pinfile = GET_ARG();
1185
1186	while (argc) {
1187		if (is_prefix(*argv, "type")) {
1188			char *type;
1189
1190			NEXT_ARG();
1191
1192			if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
1193				p_err("program type already specified");
1194				goto err_free_reuse_maps;
1195			}
1196			if (!REQ_ARGS(1))
1197				goto err_free_reuse_maps;
1198
1199			/* Put a '/' at the end of type to appease libbpf */
1200			type = malloc(strlen(*argv) + 2);
1201			if (!type) {
1202				p_err("mem alloc failed");
1203				goto err_free_reuse_maps;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1204			}
1205			*type = 0;
1206			strcat(type, *argv);
1207			strcat(type, "/");
1208
1209			err = get_prog_type_by_name(type, &common_prog_type,
1210						    &expected_attach_type);
1211			free(type);
1212			if (err < 0)
1213				goto err_free_reuse_maps;
1214
1215			NEXT_ARG();
1216		} else if (is_prefix(*argv, "map")) {
1217			void *new_map_replace;
1218			char *endptr, *name;
1219			int fd;
1220
1221			NEXT_ARG();
1222
1223			if (!REQ_ARGS(4))
1224				goto err_free_reuse_maps;
1225
1226			if (is_prefix(*argv, "idx")) {
1227				NEXT_ARG();
1228
1229				idx = strtoul(*argv, &endptr, 0);
1230				if (*endptr) {
1231					p_err("can't parse %s as IDX", *argv);
1232					goto err_free_reuse_maps;
1233				}
1234				name = NULL;
1235			} else if (is_prefix(*argv, "name")) {
1236				NEXT_ARG();
1237
1238				name = *argv;
1239				idx = -1;
1240			} else {
1241				p_err("expected 'idx' or 'name', got: '%s'?",
1242				      *argv);
1243				goto err_free_reuse_maps;
1244			}
1245			NEXT_ARG();
1246
1247			fd = map_parse_fd(&argc, &argv);
1248			if (fd < 0)
1249				goto err_free_reuse_maps;
1250
1251			new_map_replace = reallocarray(map_replace,
1252						       old_map_fds + 1,
1253						       sizeof(*map_replace));
1254			if (!new_map_replace) {
1255				p_err("mem alloc failed");
1256				goto err_free_reuse_maps;
1257			}
1258			map_replace = new_map_replace;
1259
1260			map_replace[old_map_fds].idx = idx;
1261			map_replace[old_map_fds].name = name;
1262			map_replace[old_map_fds].fd = fd;
1263			old_map_fds++;
1264		} else if (is_prefix(*argv, "dev")) {
 
 
 
 
 
 
1265			NEXT_ARG();
1266
1267			if (ifindex) {
1268				p_err("offload device already specified");
 
 
 
1269				goto err_free_reuse_maps;
1270			}
1271			if (!REQ_ARGS(1))
1272				goto err_free_reuse_maps;
1273
1274			ifindex = if_nametoindex(*argv);
1275			if (!ifindex) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1276				p_err("unrecognized netdevice '%s': %s",
1277				      *argv, strerror(errno));
1278				goto err_free_reuse_maps;
1279			}
1280			NEXT_ARG();
1281		} else if (is_prefix(*argv, "pinmaps")) {
1282			NEXT_ARG();
1283
1284			if (!REQ_ARGS(1))
1285				goto err_free_reuse_maps;
1286
1287			pinmaps = GET_ARG();
 
 
 
1288		} else {
1289			p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1290			      *argv);
1291			goto err_free_reuse_maps;
1292		}
1293	}
1294
1295	set_max_rlimit();
1296
 
 
 
 
1297	obj = bpf_object__open_file(file, &open_opts);
1298	if (IS_ERR_OR_NULL(obj)) {
1299		p_err("failed to open object file");
1300		goto err_free_reuse_maps;
1301	}
1302
1303	bpf_object__for_each_program(pos, obj) {
1304		enum bpf_prog_type prog_type = common_prog_type;
1305
1306		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
1307			const char *sec_name = bpf_program__title(pos, false);
1308
1309			err = get_prog_type_by_name(sec_name, &prog_type,
1310						    &expected_attach_type);
1311			if (err < 0)
1312				goto err_close_obj;
1313		}
1314
1315		bpf_program__set_ifindex(pos, ifindex);
1316		bpf_program__set_type(pos, prog_type);
 
 
 
 
 
 
1317		bpf_program__set_expected_attach_type(pos, expected_attach_type);
1318	}
1319
1320	qsort(map_replace, old_map_fds, sizeof(*map_replace),
1321	      map_replace_compar);
1322
1323	/* After the sort maps by name will be first on the list, because they
1324	 * have idx == -1.  Resolve them.
1325	 */
1326	j = 0;
1327	while (j < old_map_fds && map_replace[j].name) {
1328		i = 0;
1329		bpf_object__for_each_map(map, obj) {
1330			if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
1331				map_replace[j].idx = i;
1332				break;
1333			}
1334			i++;
1335		}
1336		if (map_replace[j].idx == -1) {
1337			p_err("unable to find map '%s'", map_replace[j].name);
1338			goto err_close_obj;
1339		}
1340		j++;
1341	}
1342	/* Resort if any names were resolved */
1343	if (j)
1344		qsort(map_replace, old_map_fds, sizeof(*map_replace),
1345		      map_replace_compar);
1346
1347	/* Set ifindex and name reuse */
1348	j = 0;
1349	idx = 0;
1350	bpf_object__for_each_map(map, obj) {
1351		if (!bpf_map__is_offload_neutral(map))
1352			bpf_map__set_ifindex(map, ifindex);
1353
1354		if (j < old_map_fds && idx == map_replace[j].idx) {
1355			err = bpf_map__reuse_fd(map, map_replace[j++].fd);
1356			if (err) {
1357				p_err("unable to set up map reuse: %d", err);
1358				goto err_close_obj;
1359			}
1360
1361			/* Next reuse wants to apply to the same map */
1362			if (j < old_map_fds && map_replace[j].idx == idx) {
1363				p_err("replacement for map idx %d specified more than once",
1364				      idx);
1365				goto err_close_obj;
1366			}
1367		}
1368
1369		idx++;
1370	}
1371	if (j < old_map_fds) {
1372		p_err("map idx '%d' not used", map_replace[j].idx);
1373		goto err_close_obj;
1374	}
1375
1376	load_attr.obj = obj;
1377	if (verifier_logs)
1378		/* log_level1 + log_level2 + stats, but not stable UAPI */
1379		load_attr.log_level = 1 + 2 + 4;
1380
1381	err = bpf_object__load_xattr(&load_attr);
1382	if (err) {
1383		p_err("failed to load object file");
1384		goto err_close_obj;
1385	}
1386
1387	err = mount_bpffs_for_pin(pinfile);
 
 
 
1388	if (err)
1389		goto err_close_obj;
1390
1391	if (first_prog_only) {
1392		prog = bpf_program__next(NULL, obj);
1393		if (!prog) {
1394			p_err("object file doesn't contain any bpf program");
1395			goto err_close_obj;
1396		}
1397
1398		err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
 
 
 
1399		if (err) {
1400			p_err("failed to pin program %s",
1401			      bpf_program__title(prog, false));
1402			goto err_close_obj;
1403		}
1404	} else {
1405		err = bpf_object__pin_programs(obj, pinfile);
 
 
 
1406		if (err) {
1407			p_err("failed to pin all programs");
1408			goto err_close_obj;
1409		}
1410	}
1411
1412	if (pinmaps) {
 
 
 
 
1413		err = bpf_object__pin_maps(obj, pinmaps);
1414		if (err) {
1415			p_err("failed to pin all maps");
1416			goto err_unpin;
1417		}
1418	}
1419
1420	if (json_output)
1421		jsonw_null(json_wtr);
1422
1423	bpf_object__close(obj);
1424	for (i = 0; i < old_map_fds; i++)
1425		close(map_replace[i].fd);
1426	free(map_replace);
1427
1428	return 0;
1429
1430err_unpin:
1431	if (first_prog_only)
1432		unlink(pinfile);
1433	else
1434		bpf_object__unpin_programs(obj, pinfile);
1435err_close_obj:
1436	bpf_object__close(obj);
1437err_free_reuse_maps:
1438	for (i = 0; i < old_map_fds; i++)
1439		close(map_replace[i].fd);
1440	free(map_replace);
1441	return -1;
1442}
1443
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444static int do_load(int argc, char **argv)
1445{
 
 
1446	return load_with_options(argc, argv, true);
1447}
1448
1449static int do_loadall(int argc, char **argv)
1450{
1451	return load_with_options(argc, argv, false);
1452}
1453
1454#ifdef BPFTOOL_WITHOUT_SKELETONS
1455
1456static int do_profile(int argc, char **argv)
1457{
1458	p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1459	return 0;
1460}
1461
1462#else /* BPFTOOL_WITHOUT_SKELETONS */
1463
1464#include "profiler.skel.h"
1465
1466struct profile_metric {
1467	const char *name;
1468	struct bpf_perf_event_value val;
1469	struct perf_event_attr attr;
1470	bool selected;
1471
1472	/* calculate ratios like instructions per cycle */
1473	const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
1474	const char *ratio_desc;
1475	const float ratio_mul;
1476} metrics[] = {
1477	{
1478		.name = "cycles",
1479		.attr = {
1480			.type = PERF_TYPE_HARDWARE,
1481			.config = PERF_COUNT_HW_CPU_CYCLES,
1482			.exclude_user = 1,
1483		},
1484	},
1485	{
1486		.name = "instructions",
1487		.attr = {
1488			.type = PERF_TYPE_HARDWARE,
1489			.config = PERF_COUNT_HW_INSTRUCTIONS,
1490			.exclude_user = 1,
1491		},
1492		.ratio_metric = 1,
1493		.ratio_desc = "insns per cycle",
1494		.ratio_mul = 1.0,
1495	},
1496	{
1497		.name = "l1d_loads",
1498		.attr = {
1499			.type = PERF_TYPE_HW_CACHE,
1500			.config =
1501				PERF_COUNT_HW_CACHE_L1D |
1502				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1503				(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
1504			.exclude_user = 1,
1505		},
1506	},
1507	{
1508		.name = "llc_misses",
1509		.attr = {
1510			.type = PERF_TYPE_HW_CACHE,
1511			.config =
1512				PERF_COUNT_HW_CACHE_LL |
1513				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1514				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1515			.exclude_user = 1
1516		},
1517		.ratio_metric = 2,
1518		.ratio_desc = "LLC misses per million insns",
1519		.ratio_mul = 1e6,
1520	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1521};
1522
1523static __u64 profile_total_count;
1524
1525#define MAX_NUM_PROFILE_METRICS 4
1526
1527static int profile_parse_metrics(int argc, char **argv)
1528{
1529	unsigned int metric_cnt;
1530	int selected_cnt = 0;
1531	unsigned int i;
1532
1533	metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
1534
1535	while (argc > 0) {
1536		for (i = 0; i < metric_cnt; i++) {
1537			if (is_prefix(argv[0], metrics[i].name)) {
1538				if (!metrics[i].selected)
1539					selected_cnt++;
1540				metrics[i].selected = true;
1541				break;
1542			}
1543		}
1544		if (i == metric_cnt) {
1545			p_err("unknown metric %s", argv[0]);
1546			return -1;
1547		}
1548		NEXT_ARG();
1549	}
1550	if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
1551		p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
1552		      selected_cnt, MAX_NUM_PROFILE_METRICS);
1553		return -1;
1554	}
1555	return selected_cnt;
1556}
1557
1558static void profile_read_values(struct profiler_bpf *obj)
1559{
1560	__u32 m, cpu, num_cpu = obj->rodata->num_cpu;
1561	int reading_map_fd, count_map_fd;
1562	__u64 counts[num_cpu];
1563	__u32 key = 0;
1564	int err;
1565
1566	reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
1567	count_map_fd = bpf_map__fd(obj->maps.counts);
1568	if (reading_map_fd < 0 || count_map_fd < 0) {
1569		p_err("failed to get fd for map");
1570		return;
1571	}
1572
1573	err = bpf_map_lookup_elem(count_map_fd, &key, counts);
1574	if (err) {
1575		p_err("failed to read count_map: %s", strerror(errno));
1576		return;
1577	}
1578
1579	profile_total_count = 0;
1580	for (cpu = 0; cpu < num_cpu; cpu++)
1581		profile_total_count += counts[cpu];
1582
1583	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1584		struct bpf_perf_event_value values[num_cpu];
1585
1586		if (!metrics[m].selected)
1587			continue;
1588
1589		err = bpf_map_lookup_elem(reading_map_fd, &key, values);
1590		if (err) {
1591			p_err("failed to read reading_map: %s",
1592			      strerror(errno));
1593			return;
1594		}
1595		for (cpu = 0; cpu < num_cpu; cpu++) {
1596			metrics[m].val.counter += values[cpu].counter;
1597			metrics[m].val.enabled += values[cpu].enabled;
1598			metrics[m].val.running += values[cpu].running;
1599		}
1600		key++;
1601	}
1602}
1603
1604static void profile_print_readings_json(void)
1605{
1606	__u32 m;
1607
1608	jsonw_start_array(json_wtr);
1609	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1610		if (!metrics[m].selected)
1611			continue;
1612		jsonw_start_object(json_wtr);
1613		jsonw_string_field(json_wtr, "metric", metrics[m].name);
1614		jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
1615		jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
1616		jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
1617		jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
1618
1619		jsonw_end_object(json_wtr);
1620	}
1621	jsonw_end_array(json_wtr);
1622}
1623
1624static void profile_print_readings_plain(void)
1625{
1626	__u32 m;
1627
1628	printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
1629	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1630		struct bpf_perf_event_value *val = &metrics[m].val;
1631		int r;
1632
1633		if (!metrics[m].selected)
1634			continue;
1635		printf("%18llu %-20s", val->counter, metrics[m].name);
1636
1637		r = metrics[m].ratio_metric - 1;
1638		if (r >= 0 && metrics[r].selected &&
1639		    metrics[r].val.counter > 0) {
1640			printf("# %8.2f %-30s",
1641			       val->counter * metrics[m].ratio_mul /
1642			       metrics[r].val.counter,
1643			       metrics[m].ratio_desc);
1644		} else {
1645			printf("%-41s", "");
1646		}
1647
1648		if (val->enabled > val->running)
1649			printf("(%4.2f%%)",
1650			       val->running * 100.0 / val->enabled);
1651		printf("\n");
1652	}
1653}
1654
1655static void profile_print_readings(void)
1656{
1657	if (json_output)
1658		profile_print_readings_json();
1659	else
1660		profile_print_readings_plain();
1661}
1662
1663static char *profile_target_name(int tgt_fd)
1664{
1665	struct bpf_prog_info_linear *info_linear;
1666	struct bpf_func_info *func_info;
 
1667	const struct btf_type *t;
 
 
1668	char *name = NULL;
1669	struct btf *btf;
1670
1671	info_linear = bpf_program__get_prog_info_linear(
1672		tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
1673	if (IS_ERR_OR_NULL(info_linear)) {
1674		p_err("failed to get info_linear for prog FD %d", tgt_fd);
1675		return NULL;
1676	}
1677
1678	if (info_linear->info.btf_id == 0 ||
1679	    btf__get_from_id(info_linear->info.btf_id, &btf)) {
1680		p_err("prog FD %d doesn't have valid btf", tgt_fd);
1681		goto out;
1682	}
1683
1684	func_info = u64_to_ptr(info_linear->info.func_info);
1685	t = btf__type_by_id(btf, func_info[0].type_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1686	if (!t) {
1687		p_err("btf %d doesn't have type %d",
1688		      info_linear->info.btf_id, func_info[0].type_id);
1689		goto out;
1690	}
1691	name = strdup(btf__name_by_offset(btf, t->name_off));
1692out:
1693	free(info_linear);
1694	return name;
1695}
1696
1697static struct profiler_bpf *profile_obj;
1698static int profile_tgt_fd = -1;
1699static char *profile_tgt_name;
1700static int *profile_perf_events;
1701static int profile_perf_event_cnt;
1702
1703static void profile_close_perf_events(struct profiler_bpf *obj)
1704{
1705	int i;
1706
1707	for (i = profile_perf_event_cnt - 1; i >= 0; i--)
1708		close(profile_perf_events[i]);
1709
1710	free(profile_perf_events);
1711	profile_perf_event_cnt = 0;
1712}
1713
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1714static int profile_open_perf_events(struct profiler_bpf *obj)
1715{
1716	unsigned int cpu, m;
1717	int map_fd, pmu_fd;
1718
1719	profile_perf_events = calloc(
1720		sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
1721	if (!profile_perf_events) {
1722		p_err("failed to allocate memory for perf_event array: %s",
1723		      strerror(errno));
1724		return -1;
1725	}
1726	map_fd = bpf_map__fd(obj->maps.events);
1727	if (map_fd < 0) {
1728		p_err("failed to get fd for events map");
1729		return -1;
1730	}
1731
1732	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1733		if (!metrics[m].selected)
1734			continue;
1735		for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
1736			pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
1737					 -1/*pid*/, cpu, -1/*group_fd*/, 0);
1738			if (pmu_fd < 0 ||
1739			    bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
1740						&pmu_fd, BPF_ANY) ||
1741			    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
1742				p_err("failed to create event %s on cpu %d",
1743				      metrics[m].name, cpu);
1744				return -1;
1745			}
1746			profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
1747		}
1748	}
1749	return 0;
1750}
1751
1752static void profile_print_and_cleanup(void)
1753{
1754	profile_close_perf_events(profile_obj);
1755	profile_read_values(profile_obj);
1756	profile_print_readings();
1757	profiler_bpf__destroy(profile_obj);
1758
1759	close(profile_tgt_fd);
1760	free(profile_tgt_name);
1761}
1762
1763static void int_exit(int signo)
1764{
1765	profile_print_and_cleanup();
1766	exit(0);
1767}
1768
1769static int do_profile(int argc, char **argv)
1770{
1771	int num_metric, num_cpu, err = -1;
1772	struct bpf_program *prog;
1773	unsigned long duration;
1774	char *endptr;
1775
1776	/* we at least need two args for the prog and one metric */
1777	if (!REQ_ARGS(3))
1778		return -EINVAL;
1779
1780	/* parse target fd */
1781	profile_tgt_fd = prog_parse_fd(&argc, &argv);
1782	if (profile_tgt_fd < 0) {
1783		p_err("failed to parse fd");
1784		return -1;
1785	}
1786
1787	/* parse profiling optional duration */
1788	if (argc > 2 && is_prefix(argv[0], "duration")) {
1789		NEXT_ARG();
1790		duration = strtoul(*argv, &endptr, 0);
1791		if (*endptr)
1792			usage();
1793		NEXT_ARG();
1794	} else {
1795		duration = UINT_MAX;
1796	}
1797
1798	num_metric = profile_parse_metrics(argc, argv);
1799	if (num_metric <= 0)
1800		goto out;
1801
1802	num_cpu = libbpf_num_possible_cpus();
1803	if (num_cpu <= 0) {
1804		p_err("failed to identify number of CPUs");
1805		goto out;
1806	}
1807
1808	profile_obj = profiler_bpf__open();
1809	if (!profile_obj) {
1810		p_err("failed to open and/or load BPF object");
1811		goto out;
1812	}
1813
1814	profile_obj->rodata->num_cpu = num_cpu;
1815	profile_obj->rodata->num_metric = num_metric;
1816
1817	/* adjust map sizes */
1818	bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
1819	bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
1820	bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
1821	bpf_map__resize(profile_obj->maps.counts, 1);
1822
1823	/* change target name */
1824	profile_tgt_name = profile_target_name(profile_tgt_fd);
1825	if (!profile_tgt_name)
1826		goto out;
1827
1828	bpf_object__for_each_program(prog, profile_obj->obj) {
1829		err = bpf_program__set_attach_target(prog, profile_tgt_fd,
1830						     profile_tgt_name);
1831		if (err) {
1832			p_err("failed to set attach target\n");
1833			goto out;
1834		}
1835	}
1836
1837	set_max_rlimit();
1838	err = profiler_bpf__load(profile_obj);
1839	if (err) {
1840		p_err("failed to load profile_obj");
1841		goto out;
1842	}
1843
1844	err = profile_open_perf_events(profile_obj);
1845	if (err)
1846		goto out;
1847
1848	err = profiler_bpf__attach(profile_obj);
1849	if (err) {
1850		p_err("failed to attach profile_obj");
1851		goto out;
1852	}
1853	signal(SIGINT, int_exit);
1854
1855	sleep(duration);
1856	profile_print_and_cleanup();
1857	return 0;
1858
1859out:
1860	profile_close_perf_events(profile_obj);
1861	if (profile_obj)
1862		profiler_bpf__destroy(profile_obj);
1863	close(profile_tgt_fd);
1864	free(profile_tgt_name);
1865	return err;
1866}
1867
1868#endif /* BPFTOOL_WITHOUT_SKELETONS */
1869
1870static int do_help(int argc, char **argv)
1871{
1872	if (json_output) {
1873		jsonw_null(json_wtr);
1874		return 0;
1875	}
1876
1877	fprintf(stderr,
1878		"Usage: %1$s %2$s { show | list } [PROG]\n"
1879		"       %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
1880		"       %1$s %2$s dump jited  PROG [{ file FILE | opcodes | linum }]\n"
1881		"       %1$s %2$s pin   PROG FILE\n"
1882		"       %1$s %2$s { load | loadall } OBJ  PATH \\\n"
1883		"                         [type TYPE] [dev NAME] \\\n"
1884		"                         [map { idx IDX | name NAME } MAP]\\\n"
1885		"                         [pinmaps MAP_DIR]\n"
 
1886		"       %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
1887		"       %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
1888		"       %1$s %2$s run PROG \\\n"
1889		"                         data_in FILE \\\n"
1890		"                         [data_out FILE [data_size_out L]] \\\n"
1891		"                         [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
1892		"                         [repeat N]\n"
1893		"       %1$s %2$s profile PROG [duration DURATION] METRICs\n"
1894		"       %1$s %2$s tracelog\n"
1895		"       %1$s %2$s help\n"
1896		"\n"
1897		"       " HELP_SPEC_MAP "\n"
1898		"       " HELP_SPEC_PROGRAM "\n"
1899		"       TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
1900		"                 tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
1901		"                 cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
1902		"                 lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
1903		"                 sk_reuseport | flow_dissector | cgroup/sysctl |\n"
1904		"                 cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
1905		"                 cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
1906		"                 cgroup/getpeername4 | cgroup/getpeername6 |\n"
1907		"                 cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
1908		"                 cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
1909		"                 cgroup/getsockopt | cgroup/setsockopt |\n"
 
1910		"                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
1911		"       ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
1912		"                        flow_dissector }\n"
1913		"       METRIC := { cycles | instructions | l1d_loads | llc_misses }\n"
1914		"       " HELP_SPEC_OPTIONS "\n"
 
 
1915		"",
1916		bin_name, argv[-2]);
1917
1918	return 0;
1919}
1920
1921static const struct cmd cmds[] = {
1922	{ "show",	do_show },
1923	{ "list",	do_show },
1924	{ "help",	do_help },
1925	{ "dump",	do_dump },
1926	{ "pin",	do_pin },
1927	{ "load",	do_load },
1928	{ "loadall",	do_loadall },
1929	{ "attach",	do_attach },
1930	{ "detach",	do_detach },
1931	{ "tracelog",	do_tracelog },
1932	{ "run",	do_run },
1933	{ "profile",	do_profile },
1934	{ 0 }
1935};
1936
1937int do_prog(int argc, char **argv)
1938{
1939	return cmd_select(cmds, argc, argv, do_help);
1940}