Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#ifndef _GNU_SOURCE
   5#define _GNU_SOURCE
   6#endif
   7#include <ctype.h>
   8#include <errno.h>
   9#include <fcntl.h>
  10#include <ftw.h>
  11#include <libgen.h>
  12#include <mntent.h>
  13#include <stdbool.h>
  14#include <stdio.h>
  15#include <stdlib.h>
  16#include <string.h>
  17#include <unistd.h>
  18#include <net/if.h>
  19#include <sys/mount.h>
  20#include <sys/resource.h>
  21#include <sys/stat.h>
  22#include <sys/vfs.h>
  23
  24#include <linux/filter.h>
  25#include <linux/limits.h>
  26#include <linux/magic.h>
  27#include <linux/unistd.h>
  28
  29#include <bpf/bpf.h>
  30#include <bpf/hashmap.h>
  31#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
  32#include <bpf/btf.h>
  33
  34#include "main.h"
  35
  36#ifndef BPF_FS_MAGIC
  37#define BPF_FS_MAGIC		0xcafe4a11
  38#endif
  39
  40void p_err(const char *fmt, ...)
  41{
  42	va_list ap;
  43
  44	va_start(ap, fmt);
  45	if (json_output) {
  46		jsonw_start_object(json_wtr);
  47		jsonw_name(json_wtr, "error");
  48		jsonw_vprintf_enquote(json_wtr, fmt, ap);
  49		jsonw_end_object(json_wtr);
  50	} else {
  51		fprintf(stderr, "Error: ");
  52		vfprintf(stderr, fmt, ap);
  53		fprintf(stderr, "\n");
  54	}
  55	va_end(ap);
  56}
  57
  58void p_info(const char *fmt, ...)
  59{
  60	va_list ap;
  61
  62	if (json_output)
  63		return;
  64
  65	va_start(ap, fmt);
  66	vfprintf(stderr, fmt, ap);
  67	fprintf(stderr, "\n");
  68	va_end(ap);
  69}
  70
  71static bool is_bpffs(const char *path)
  72{
  73	struct statfs st_fs;
  74
  75	if (statfs(path, &st_fs) < 0)
  76		return false;
  77
  78	return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
  79}
  80
  81/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
  82 * memcg-based memory accounting for BPF maps and programs. This was done in
  83 * commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
  84 * accounting'"), in Linux 5.11.
  85 *
  86 * Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
  87 * so by checking for the availability of a given BPF helper and this has
  88 * failed on some kernels with backports in the past, see commit 6b4384ff1088
  89 * ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
  90 * Instead, we can probe by lowering the process-based rlimit to 0, trying to
  91 * load a BPF object, and resetting the rlimit. If the load succeeds then
  92 * memcg-based accounting is supported.
  93 *
  94 * This would be too dangerous to do in the library, because multithreaded
  95 * applications might attempt to load items while the rlimit is at 0. Given
  96 * that bpftool is single-threaded, this is fine to do here.
  97 */
  98static bool known_to_need_rlimit(void)
  99{
 100	struct rlimit rlim_init, rlim_cur_zero = {};
 101	struct bpf_insn insns[] = {
 102		BPF_MOV64_IMM(BPF_REG_0, 0),
 103		BPF_EXIT_INSN(),
 104	};
 105	size_t insn_cnt = ARRAY_SIZE(insns);
 106	union bpf_attr attr;
 107	int prog_fd, err;
 108
 109	memset(&attr, 0, sizeof(attr));
 110	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 111	attr.insns = ptr_to_u64(insns);
 112	attr.insn_cnt = insn_cnt;
 113	attr.license = ptr_to_u64("GPL");
 114
 115	if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
 116		return false;
 117
 118	/* Drop the soft limit to zero. We maintain the hard limit to its
 119	 * current value, because lowering it would be a permanent operation
 120	 * for unprivileged users.
 121	 */
 122	rlim_cur_zero.rlim_max = rlim_init.rlim_max;
 123	if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
 124		return false;
 125
 126	/* Do not use bpf_prog_load() from libbpf here, because it calls
 127	 * bump_rlimit_memlock(), interfering with the current probe.
 128	 */
 129	prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
 130	err = errno;
 131
 132	/* reset soft rlimit to its initial value */
 133	setrlimit(RLIMIT_MEMLOCK, &rlim_init);
 134
 135	if (prog_fd < 0)
 136		return err == EPERM;
 137
 138	close(prog_fd);
 139	return false;
 140}
 141
 142void set_max_rlimit(void)
 143{
 144	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
 145
 146	if (known_to_need_rlimit())
 147		setrlimit(RLIMIT_MEMLOCK, &rinf);
 148}
 149
 150static int
 151mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
 152{
 153	bool bind_done = false;
 154
 155	while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
 156		if (errno != EINVAL || bind_done) {
 157			snprintf(buff, bufflen,
 158				 "mount --make-private %s failed: %s",
 159				 target, strerror(errno));
 160			return -1;
 161		}
 162
 163		if (mount(target, target, "none", MS_BIND, NULL)) {
 164			snprintf(buff, bufflen,
 165				 "mount --bind %s %s failed: %s",
 166				 target, target, strerror(errno));
 167			return -1;
 168		}
 169
 170		bind_done = true;
 171	}
 172
 173	if (mount(type, target, type, 0, "mode=0700")) {
 174		snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
 175			 type, type, target, strerror(errno));
 176		return -1;
 177	}
 178
 179	return 0;
 180}
 181
 182int mount_tracefs(const char *target)
 183{
 184	char err_str[ERR_MAX_LEN];
 185	int err;
 186
 187	err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
 188	if (err) {
 189		err_str[ERR_MAX_LEN - 1] = '\0';
 190		p_err("can't mount tracefs: %s", err_str);
 191	}
 192
 193	return err;
 194}
 195
 196int open_obj_pinned(const char *path, bool quiet)
 197{
 198	char *pname;
 199	int fd = -1;
 200
 201	pname = strdup(path);
 202	if (!pname) {
 203		if (!quiet)
 204			p_err("mem alloc failed");
 205		goto out_ret;
 206	}
 207
 208	fd = bpf_obj_get(pname);
 209	if (fd < 0) {
 210		if (!quiet)
 211			p_err("bpf obj get (%s): %s", pname,
 212			      errno == EACCES && !is_bpffs(dirname(pname)) ?
 213			    "directory not in bpf file system (bpffs)" :
 214			    strerror(errno));
 215		goto out_free;
 216	}
 217
 218out_free:
 219	free(pname);
 220out_ret:
 221	return fd;
 222}
 223
 224int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
 225{
 226	enum bpf_obj_type type;
 227	int fd;
 228
 229	fd = open_obj_pinned(path, false);
 230	if (fd < 0)
 231		return -1;
 232
 233	type = get_fd_type(fd);
 234	if (type < 0) {
 235		close(fd);
 236		return type;
 237	}
 238	if (type != exp_type) {
 239		p_err("incorrect object type: %s", get_fd_type_name(type));
 240		close(fd);
 241		return -1;
 242	}
 243
 244	return fd;
 245}
 246
 247int create_and_mount_bpffs_dir(const char *dir_name)
 248{
 249	char err_str[ERR_MAX_LEN];
 250	bool dir_exists;
 251	int err = 0;
 252
 253	if (is_bpffs(dir_name))
 254		return err;
 255
 256	dir_exists = access(dir_name, F_OK) == 0;
 257
 258	if (!dir_exists) {
 259		char *temp_name;
 260		char *parent_name;
 261
 262		temp_name = strdup(dir_name);
 263		if (!temp_name) {
 264			p_err("mem alloc failed");
 265			return -1;
 266		}
 267
 268		parent_name = dirname(temp_name);
 269
 270		if (is_bpffs(parent_name)) {
 271			/* nothing to do if already mounted */
 272			free(temp_name);
 273			return err;
 274		}
 275
 276		if (access(parent_name, F_OK) == -1) {
 277			p_err("can't create dir '%s' to pin BPF object: parent dir '%s' doesn't exist",
 278			      dir_name, parent_name);
 279			free(temp_name);
 280			return -1;
 281		}
 282
 283		free(temp_name);
 284	}
 285
 286	if (block_mount) {
 287		p_err("no BPF file system found, not mounting it due to --nomount option");
 288		return -1;
 289	}
 290
 291	if (!dir_exists) {
 292		err = mkdir(dir_name, S_IRWXU);
 293		if (err) {
 294			p_err("failed to create dir '%s': %s", dir_name, strerror(errno));
 295			return err;
 296		}
 297	}
 298
 299	err = mnt_fs(dir_name, "bpf", err_str, ERR_MAX_LEN);
 300	if (err) {
 301		err_str[ERR_MAX_LEN - 1] = '\0';
 302		p_err("can't mount BPF file system on given dir '%s': %s",
 303		      dir_name, err_str);
 304
 305		if (!dir_exists)
 306			rmdir(dir_name);
 307	}
 308
 309	return err;
 310}
 311
 312int mount_bpffs_for_file(const char *file_name)
 313{
 314	char err_str[ERR_MAX_LEN];
 315	char *temp_name;
 316	char *dir;
 317	int err = 0;
 318
 319	if (access(file_name, F_OK) != -1) {
 320		p_err("can't pin BPF object: path '%s' already exists", file_name);
 321		return -1;
 322	}
 323
 324	temp_name = strdup(file_name);
 325	if (!temp_name) {
 326		p_err("mem alloc failed");
 327		return -1;
 328	}
 329
 330	dir = dirname(temp_name);
 
 331
 332	if (is_bpffs(dir))
 333		/* nothing to do if already mounted */
 334		goto out_free;
 335
 336	if (access(dir, F_OK) == -1) {
 337		p_err("can't pin BPF object: dir '%s' doesn't exist", dir);
 338		err = -1;
 339		goto out_free;
 340	}
 341
 342	if (block_mount) {
 343		p_err("no BPF file system found, not mounting it due to --nomount option");
 344		err = -1;
 345		goto out_free;
 346	}
 347
 348	err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
 349	if (err) {
 350		err_str[ERR_MAX_LEN - 1] = '\0';
 351		p_err("can't mount BPF file system to pin the object '%s': %s",
 352		      file_name, err_str);
 353	}
 354
 355out_free:
 356	free(temp_name);
 357	return err;
 358}
 359
 360int do_pin_fd(int fd, const char *name)
 361{
 362	int err;
 363
 364	err = mount_bpffs_for_file(name);
 365	if (err)
 366		return err;
 367
 368	err = bpf_obj_pin(fd, name);
 369	if (err)
 370		p_err("can't pin the object (%s): %s", name, strerror(errno));
 371
 372	return err;
 373}
 374
 375int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
 376{
 377	int err;
 378	int fd;
 379
 380	if (!REQ_ARGS(3))
 381		return -EINVAL;
 382
 383	fd = get_fd(&argc, &argv);
 384	if (fd < 0)
 385		return fd;
 386
 387	err = do_pin_fd(fd, *argv);
 388
 389	close(fd);
 390	return err;
 391}
 392
 393const char *get_fd_type_name(enum bpf_obj_type type)
 394{
 395	static const char * const names[] = {
 396		[BPF_OBJ_UNKNOWN]	= "unknown",
 397		[BPF_OBJ_PROG]		= "prog",
 398		[BPF_OBJ_MAP]		= "map",
 399		[BPF_OBJ_LINK]		= "link",
 400	};
 401
 402	if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
 403		return names[BPF_OBJ_UNKNOWN];
 404
 405	return names[type];
 406}
 407
 408void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
 409			char *name_buff, size_t buff_len)
 410{
 411	const char *prog_name = prog_info->name;
 412	const struct btf_type *func_type;
 413	struct bpf_func_info finfo = {};
 414	struct bpf_prog_info info = {};
 415	__u32 info_len = sizeof(info);
 416	struct btf *prog_btf = NULL;
 417
 418	if (buff_len <= BPF_OBJ_NAME_LEN ||
 419	    strlen(prog_info->name) < BPF_OBJ_NAME_LEN - 1)
 420		goto copy_name;
 421
 422	if (!prog_info->btf_id || prog_info->nr_func_info == 0)
 423		goto copy_name;
 424
 425	info.nr_func_info = 1;
 426	info.func_info_rec_size = prog_info->func_info_rec_size;
 427	if (info.func_info_rec_size > sizeof(finfo))
 428		info.func_info_rec_size = sizeof(finfo);
 429	info.func_info = ptr_to_u64(&finfo);
 430
 431	if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len))
 432		goto copy_name;
 433
 434	prog_btf = btf__load_from_kernel_by_id(info.btf_id);
 435	if (!prog_btf)
 436		goto copy_name;
 437
 438	func_type = btf__type_by_id(prog_btf, finfo.type_id);
 439	if (!func_type || !btf_is_func(func_type))
 440		goto copy_name;
 441
 442	prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
 443
 444copy_name:
 445	snprintf(name_buff, buff_len, "%s", prog_name);
 446
 447	if (prog_btf)
 448		btf__free(prog_btf);
 449}
 450
 451int get_fd_type(int fd)
 452{
 453	char path[PATH_MAX];
 454	char buf[512];
 455	ssize_t n;
 456
 457	snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
 458
 459	n = readlink(path, buf, sizeof(buf));
 460	if (n < 0) {
 461		p_err("can't read link type: %s", strerror(errno));
 462		return -1;
 463	}
 464	if (n == sizeof(path)) {
 465		p_err("can't read link type: path too long!");
 466		return -1;
 467	}
 468
 469	if (strstr(buf, "bpf-map"))
 470		return BPF_OBJ_MAP;
 471	else if (strstr(buf, "bpf-prog"))
 472		return BPF_OBJ_PROG;
 473	else if (strstr(buf, "bpf-link"))
 474		return BPF_OBJ_LINK;
 475
 476	return BPF_OBJ_UNKNOWN;
 477}
 478
 479char *get_fdinfo(int fd, const char *key)
 480{
 481	char path[PATH_MAX];
 482	char *line = NULL;
 483	size_t line_n = 0;
 484	ssize_t n;
 485	FILE *fdi;
 486
 487	snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
 488
 489	fdi = fopen(path, "r");
 490	if (!fdi)
 491		return NULL;
 492
 493	while ((n = getline(&line, &line_n, fdi)) > 0) {
 494		char *value;
 495		int len;
 496
 497		if (!strstr(line, key))
 498			continue;
 499
 500		fclose(fdi);
 501
 502		value = strchr(line, '\t');
 503		if (!value || !value[1]) {
 504			free(line);
 505			return NULL;
 506		}
 507		value++;
 508
 509		len = strlen(value);
 510		memmove(line, value, len);
 511		line[len - 1] = '\0';
 512
 513		return line;
 514	}
 515
 516	free(line);
 517	fclose(fdi);
 518	return NULL;
 519}
 520
 521void print_data_json(uint8_t *data, size_t len)
 522{
 523	unsigned int i;
 524
 525	jsonw_start_array(json_wtr);
 526	for (i = 0; i < len; i++)
 527		jsonw_printf(json_wtr, "%d", data[i]);
 528	jsonw_end_array(json_wtr);
 529}
 530
 531void print_hex_data_json(uint8_t *data, size_t len)
 532{
 533	unsigned int i;
 534
 535	jsonw_start_array(json_wtr);
 536	for (i = 0; i < len; i++)
 537		jsonw_printf(json_wtr, "\"0x%02hhx\"", data[i]);
 538	jsonw_end_array(json_wtr);
 539}
 540
 541/* extra params for nftw cb */
 542static struct hashmap *build_fn_table;
 543static enum bpf_obj_type build_fn_type;
 544
 545static int do_build_table_cb(const char *fpath, const struct stat *sb,
 546			     int typeflag, struct FTW *ftwbuf)
 547{
 548	struct bpf_prog_info pinned_info;
 549	__u32 len = sizeof(pinned_info);
 550	enum bpf_obj_type objtype;
 551	int fd, err = 0;
 552	char *path;
 553
 554	if (typeflag != FTW_F)
 555		goto out_ret;
 556
 557	fd = open_obj_pinned(fpath, true);
 558	if (fd < 0)
 559		goto out_ret;
 560
 561	objtype = get_fd_type(fd);
 562	if (objtype != build_fn_type)
 563		goto out_close;
 564
 565	memset(&pinned_info, 0, sizeof(pinned_info));
 566	if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len))
 567		goto out_close;
 568
 569	path = strdup(fpath);
 570	if (!path) {
 571		err = -1;
 572		goto out_close;
 573	}
 574
 575	err = hashmap__append(build_fn_table, pinned_info.id, path);
 576	if (err) {
 577		p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
 578		      pinned_info.id, path, strerror(errno));
 579		free(path);
 580		goto out_close;
 581	}
 582
 583out_close:
 584	close(fd);
 585out_ret:
 586	return err;
 587}
 588
 589int build_pinned_obj_table(struct hashmap *tab,
 590			   enum bpf_obj_type type)
 591{
 592	struct mntent *mntent = NULL;
 593	FILE *mntfile = NULL;
 594	int flags = FTW_PHYS;
 595	int nopenfd = 16;
 596	int err = 0;
 597
 598	mntfile = setmntent("/proc/mounts", "r");
 599	if (!mntfile)
 600		return -1;
 601
 602	build_fn_table = tab;
 603	build_fn_type = type;
 604
 605	while ((mntent = getmntent(mntfile))) {
 606		char *path = mntent->mnt_dir;
 607
 608		if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
 609			continue;
 610		err = nftw(path, do_build_table_cb, nopenfd, flags);
 611		if (err)
 612			break;
 613	}
 614	fclose(mntfile);
 615	return err;
 616}
 617
 618void delete_pinned_obj_table(struct hashmap *map)
 619{
 620	struct hashmap_entry *entry;
 621	size_t bkt;
 622
 623	if (!map)
 624		return;
 625
 626	hashmap__for_each_entry(map, entry, bkt)
 627		free(entry->pvalue);
 628
 629	hashmap__free(map);
 630}
 631
 632unsigned int get_page_size(void)
 633{
 634	static int result;
 635
 636	if (!result)
 637		result = getpagesize();
 638	return result;
 639}
 640
 641unsigned int get_possible_cpus(void)
 642{
 643	int cpus = libbpf_num_possible_cpus();
 644
 645	if (cpus < 0) {
 646		p_err("Can't get # of possible cpus: %s", strerror(-cpus));
 647		exit(-1);
 648	}
 649	return cpus;
 650}
 651
 652static char *
 653ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
 654{
 655	struct stat st;
 656	int err;
 657
 658	err = stat("/proc/self/ns/net", &st);
 659	if (err) {
 660		p_err("Can't stat /proc/self: %s", strerror(errno));
 661		return NULL;
 662	}
 663
 664	if (st.st_dev != ns_dev || st.st_ino != ns_ino)
 665		return NULL;
 666
 667	return if_indextoname(ifindex, buf);
 668}
 669
 670static int read_sysfs_hex_int(char *path)
 671{
 672	char vendor_id_buf[8];
 673	int len;
 674	int fd;
 675
 676	fd = open(path, O_RDONLY);
 677	if (fd < 0) {
 678		p_err("Can't open %s: %s", path, strerror(errno));
 679		return -1;
 680	}
 681
 682	len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
 683	close(fd);
 684	if (len < 0) {
 685		p_err("Can't read %s: %s", path, strerror(errno));
 686		return -1;
 687	}
 688	if (len >= (int)sizeof(vendor_id_buf)) {
 689		p_err("Value in %s too long", path);
 690		return -1;
 691	}
 692
 693	vendor_id_buf[len] = 0;
 694
 695	return strtol(vendor_id_buf, NULL, 0);
 696}
 697
 698static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
 699{
 700	char full_path[64];
 701
 702	snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
 703		 devname, entry_name);
 704
 705	return read_sysfs_hex_int(full_path);
 706}
 707
 708const char *
 709ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt)
 710{
 711	__maybe_unused int device_id;
 712	char devname[IF_NAMESIZE];
 713	int vendor_id;
 714
 715	if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
 716		p_err("Can't get net device name for ifindex %d: %s", ifindex,
 717		      strerror(errno));
 718		return NULL;
 719	}
 720
 721	vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
 722	if (vendor_id < 0) {
 723		p_err("Can't get device vendor id for %s", devname);
 724		return NULL;
 725	}
 726
 727	switch (vendor_id) {
 728#ifdef HAVE_LIBBFD_SUPPORT
 729	case 0x19ee:
 730		device_id = read_sysfs_netdev_hex_int(devname, "device");
 731		if (device_id != 0x4000 &&
 732		    device_id != 0x6000 &&
 733		    device_id != 0x6003)
 734			p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
 735		*opt = "ctx4";
 736		return "NFP-6xxx";
 737#endif /* HAVE_LIBBFD_SUPPORT */
 738	/* No NFP support in LLVM, we have no valid triple to return. */
 739	default:
 740		p_err("Can't get arch name for device vendor id 0x%04x",
 741		      vendor_id);
 742		return NULL;
 743	}
 744}
 745
 746void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
 747{
 748	char name[IF_NAMESIZE];
 749
 750	if (!ifindex)
 751		return;
 752
 753	printf("  offloaded_to ");
 754	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
 755		printf("%s", name);
 756	else
 757		printf("ifindex %u ns_dev %llu ns_ino %llu",
 758		       ifindex, ns_dev, ns_inode);
 759}
 760
 761void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
 762{
 763	char name[IF_NAMESIZE];
 764
 765	if (!ifindex)
 766		return;
 767
 768	jsonw_name(json_wtr, "dev");
 769	jsonw_start_object(json_wtr);
 770	jsonw_uint_field(json_wtr, "ifindex", ifindex);
 771	jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
 772	jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
 773	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
 774		jsonw_string_field(json_wtr, "ifname", name);
 775	jsonw_end_object(json_wtr);
 776}
 777
 778int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
 779{
 780	char *endptr;
 781
 782	NEXT_ARGP();
 783
 784	if (*val) {
 785		p_err("%s already specified", what);
 786		return -1;
 787	}
 788
 789	*val = strtoul(**argv, &endptr, 0);
 790	if (*endptr) {
 791		p_err("can't parse %s as %s", **argv, what);
 792		return -1;
 793	}
 794	NEXT_ARGP();
 795
 796	return 0;
 797}
 798
 799int __printf(2, 0)
 800print_all_levels(__maybe_unused enum libbpf_print_level level,
 801		 const char *format, va_list args)
 802{
 803	return vfprintf(stderr, format, args);
 804}
 805
 806static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
 807{
 808	char prog_name[MAX_PROG_FULL_NAME];
 809	unsigned int id = 0;
 810	int fd, nb_fds = 0;
 811	void *tmp;
 812	int err;
 813
 814	while (true) {
 815		struct bpf_prog_info info = {};
 816		__u32 len = sizeof(info);
 817
 818		err = bpf_prog_get_next_id(id, &id);
 819		if (err) {
 820			if (errno != ENOENT) {
 821				p_err("%s", strerror(errno));
 822				goto err_close_fds;
 823			}
 824			return nb_fds;
 825		}
 826
 827		fd = bpf_prog_get_fd_by_id(id);
 828		if (fd < 0) {
 829			p_err("can't get prog by id (%u): %s",
 830			      id, strerror(errno));
 831			goto err_close_fds;
 832		}
 833
 834		err = bpf_prog_get_info_by_fd(fd, &info, &len);
 835		if (err) {
 836			p_err("can't get prog info (%u): %s",
 837			      id, strerror(errno));
 838			goto err_close_fd;
 839		}
 840
 841		if (tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) {
 842			close(fd);
 843			continue;
 844		}
 845
 846		if (!tag) {
 847			get_prog_full_name(&info, fd, prog_name,
 848					   sizeof(prog_name));
 849			if (strncmp(nametag, prog_name, sizeof(prog_name))) {
 850				close(fd);
 851				continue;
 852			}
 853		}
 854
 855		if (nb_fds > 0) {
 856			tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
 857			if (!tmp) {
 858				p_err("failed to realloc");
 859				goto err_close_fd;
 860			}
 861			*fds = tmp;
 862		}
 863		(*fds)[nb_fds++] = fd;
 864	}
 865
 866err_close_fd:
 867	close(fd);
 868err_close_fds:
 869	while (--nb_fds >= 0)
 870		close((*fds)[nb_fds]);
 871	return -1;
 872}
 873
 874int prog_parse_fds(int *argc, char ***argv, int **fds)
 875{
 876	if (is_prefix(**argv, "id")) {
 877		unsigned int id;
 878		char *endptr;
 879
 880		NEXT_ARGP();
 881
 882		id = strtoul(**argv, &endptr, 0);
 883		if (*endptr) {
 884			p_err("can't parse %s as ID", **argv);
 885			return -1;
 886		}
 887		NEXT_ARGP();
 888
 889		(*fds)[0] = bpf_prog_get_fd_by_id(id);
 890		if ((*fds)[0] < 0) {
 891			p_err("get by id (%u): %s", id, strerror(errno));
 892			return -1;
 893		}
 894		return 1;
 895	} else if (is_prefix(**argv, "tag")) {
 896		unsigned char tag[BPF_TAG_SIZE];
 897
 898		NEXT_ARGP();
 899
 900		if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
 901			   tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
 902		    != BPF_TAG_SIZE) {
 903			p_err("can't parse tag");
 904			return -1;
 905		}
 906		NEXT_ARGP();
 907
 908		return prog_fd_by_nametag(tag, fds, true);
 909	} else if (is_prefix(**argv, "name")) {
 910		char *name;
 911
 912		NEXT_ARGP();
 913
 914		name = **argv;
 915		if (strlen(name) > MAX_PROG_FULL_NAME - 1) {
 916			p_err("can't parse name");
 917			return -1;
 918		}
 919		NEXT_ARGP();
 920
 921		return prog_fd_by_nametag(name, fds, false);
 922	} else if (is_prefix(**argv, "pinned")) {
 923		char *path;
 924
 925		NEXT_ARGP();
 926
 927		path = **argv;
 928		NEXT_ARGP();
 929
 930		(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
 931		if ((*fds)[0] < 0)
 932			return -1;
 933		return 1;
 934	}
 935
 936	p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
 937	return -1;
 938}
 939
 940int prog_parse_fd(int *argc, char ***argv)
 941{
 942	int *fds = NULL;
 943	int nb_fds, fd;
 944
 945	fds = malloc(sizeof(int));
 946	if (!fds) {
 947		p_err("mem alloc failed");
 948		return -1;
 949	}
 950	nb_fds = prog_parse_fds(argc, argv, &fds);
 951	if (nb_fds != 1) {
 952		if (nb_fds > 1) {
 953			p_err("several programs match this handle");
 954			while (nb_fds--)
 955				close(fds[nb_fds]);
 956		}
 957		fd = -1;
 958		goto exit_free;
 959	}
 960
 961	fd = fds[0];
 962exit_free:
 963	free(fds);
 964	return fd;
 965}
 966
 967static int map_fd_by_name(char *name, int **fds)
 968{
 969	unsigned int id = 0;
 970	int fd, nb_fds = 0;
 971	void *tmp;
 972	int err;
 973
 974	while (true) {
 975		struct bpf_map_info info = {};
 976		__u32 len = sizeof(info);
 977
 978		err = bpf_map_get_next_id(id, &id);
 979		if (err) {
 980			if (errno != ENOENT) {
 981				p_err("%s", strerror(errno));
 982				goto err_close_fds;
 983			}
 984			return nb_fds;
 985		}
 986
 987		fd = bpf_map_get_fd_by_id(id);
 988		if (fd < 0) {
 989			p_err("can't get map by id (%u): %s",
 990			      id, strerror(errno));
 991			goto err_close_fds;
 992		}
 993
 994		err = bpf_map_get_info_by_fd(fd, &info, &len);
 995		if (err) {
 996			p_err("can't get map info (%u): %s",
 997			      id, strerror(errno));
 998			goto err_close_fd;
 999		}
1000
1001		if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
1002			close(fd);
1003			continue;
1004		}
1005
1006		if (nb_fds > 0) {
1007			tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
1008			if (!tmp) {
1009				p_err("failed to realloc");
1010				goto err_close_fd;
1011			}
1012			*fds = tmp;
1013		}
1014		(*fds)[nb_fds++] = fd;
1015	}
1016
1017err_close_fd:
1018	close(fd);
1019err_close_fds:
1020	while (--nb_fds >= 0)
1021		close((*fds)[nb_fds]);
1022	return -1;
1023}
1024
1025int map_parse_fds(int *argc, char ***argv, int **fds)
1026{
1027	if (is_prefix(**argv, "id")) {
1028		unsigned int id;
1029		char *endptr;
1030
1031		NEXT_ARGP();
1032
1033		id = strtoul(**argv, &endptr, 0);
1034		if (*endptr) {
1035			p_err("can't parse %s as ID", **argv);
1036			return -1;
1037		}
1038		NEXT_ARGP();
1039
1040		(*fds)[0] = bpf_map_get_fd_by_id(id);
1041		if ((*fds)[0] < 0) {
1042			p_err("get map by id (%u): %s", id, strerror(errno));
1043			return -1;
1044		}
1045		return 1;
1046	} else if (is_prefix(**argv, "name")) {
1047		char *name;
1048
1049		NEXT_ARGP();
1050
1051		name = **argv;
1052		if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
1053			p_err("can't parse name");
1054			return -1;
1055		}
1056		NEXT_ARGP();
1057
1058		return map_fd_by_name(name, fds);
1059	} else if (is_prefix(**argv, "pinned")) {
1060		char *path;
1061
1062		NEXT_ARGP();
1063
1064		path = **argv;
1065		NEXT_ARGP();
1066
1067		(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
1068		if ((*fds)[0] < 0)
1069			return -1;
1070		return 1;
1071	}
1072
1073	p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
1074	return -1;
1075}
1076
1077int map_parse_fd(int *argc, char ***argv)
1078{
1079	int *fds = NULL;
1080	int nb_fds, fd;
1081
1082	fds = malloc(sizeof(int));
1083	if (!fds) {
1084		p_err("mem alloc failed");
1085		return -1;
1086	}
1087	nb_fds = map_parse_fds(argc, argv, &fds);
1088	if (nb_fds != 1) {
1089		if (nb_fds > 1) {
1090			p_err("several maps match this handle");
1091			while (nb_fds--)
1092				close(fds[nb_fds]);
1093		}
1094		fd = -1;
1095		goto exit_free;
1096	}
1097
1098	fd = fds[0];
1099exit_free:
1100	free(fds);
1101	return fd;
1102}
1103
1104int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
1105			  __u32 *info_len)
1106{
1107	int err;
1108	int fd;
1109
1110	fd = map_parse_fd(argc, argv);
1111	if (fd < 0)
1112		return -1;
1113
1114	err = bpf_map_get_info_by_fd(fd, info, info_len);
1115	if (err) {
1116		p_err("can't get map info: %s", strerror(errno));
1117		close(fd);
1118		return err;
1119	}
1120
1121	return fd;
1122}
1123
1124size_t hash_fn_for_key_as_id(long key, void *ctx)
1125{
1126	return key;
1127}
1128
1129bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
1130{
1131	return k1 == k2;
1132}
1133
1134const char *bpf_attach_type_input_str(enum bpf_attach_type t)
1135{
1136	switch (t) {
1137	case BPF_CGROUP_INET_INGRESS:		return "ingress";
1138	case BPF_CGROUP_INET_EGRESS:		return "egress";
1139	case BPF_CGROUP_INET_SOCK_CREATE:	return "sock_create";
1140	case BPF_CGROUP_INET_SOCK_RELEASE:	return "sock_release";
1141	case BPF_CGROUP_SOCK_OPS:		return "sock_ops";
1142	case BPF_CGROUP_DEVICE:			return "device";
1143	case BPF_CGROUP_INET4_BIND:		return "bind4";
1144	case BPF_CGROUP_INET6_BIND:		return "bind6";
1145	case BPF_CGROUP_INET4_CONNECT:		return "connect4";
1146	case BPF_CGROUP_INET6_CONNECT:		return "connect6";
1147	case BPF_CGROUP_INET4_POST_BIND:	return "post_bind4";
1148	case BPF_CGROUP_INET6_POST_BIND:	return "post_bind6";
1149	case BPF_CGROUP_INET4_GETPEERNAME:	return "getpeername4";
1150	case BPF_CGROUP_INET6_GETPEERNAME:	return "getpeername6";
1151	case BPF_CGROUP_INET4_GETSOCKNAME:	return "getsockname4";
1152	case BPF_CGROUP_INET6_GETSOCKNAME:	return "getsockname6";
1153	case BPF_CGROUP_UDP4_SENDMSG:		return "sendmsg4";
1154	case BPF_CGROUP_UDP6_SENDMSG:		return "sendmsg6";
1155	case BPF_CGROUP_SYSCTL:			return "sysctl";
1156	case BPF_CGROUP_UDP4_RECVMSG:		return "recvmsg4";
1157	case BPF_CGROUP_UDP6_RECVMSG:		return "recvmsg6";
1158	case BPF_CGROUP_GETSOCKOPT:		return "getsockopt";
1159	case BPF_CGROUP_SETSOCKOPT:		return "setsockopt";
1160	case BPF_TRACE_RAW_TP:			return "raw_tp";
1161	case BPF_TRACE_FENTRY:			return "fentry";
1162	case BPF_TRACE_FEXIT:			return "fexit";
1163	case BPF_MODIFY_RETURN:			return "mod_ret";
1164	case BPF_SK_REUSEPORT_SELECT:		return "sk_skb_reuseport_select";
1165	case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:	return "sk_skb_reuseport_select_or_migrate";
1166	default:	return libbpf_bpf_attach_type_str(t);
1167	}
1168}
1169
1170int pathname_concat(char *buf, int buf_sz, const char *path,
1171		    const char *name)
1172{
1173	int len;
1174
1175	len = snprintf(buf, buf_sz, "%s/%s", path, name);
1176	if (len < 0)
1177		return -EINVAL;
1178	if (len >= buf_sz)
1179		return -ENAMETOOLONG;
1180
1181	return 0;
1182}
v6.2
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#ifndef _GNU_SOURCE
   5#define _GNU_SOURCE
   6#endif
   7#include <ctype.h>
   8#include <errno.h>
   9#include <fcntl.h>
  10#include <ftw.h>
  11#include <libgen.h>
  12#include <mntent.h>
  13#include <stdbool.h>
  14#include <stdio.h>
  15#include <stdlib.h>
  16#include <string.h>
  17#include <unistd.h>
  18#include <net/if.h>
  19#include <sys/mount.h>
  20#include <sys/resource.h>
  21#include <sys/stat.h>
  22#include <sys/vfs.h>
  23
  24#include <linux/filter.h>
  25#include <linux/limits.h>
  26#include <linux/magic.h>
  27#include <linux/unistd.h>
  28
  29#include <bpf/bpf.h>
  30#include <bpf/hashmap.h>
  31#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
  32#include <bpf/btf.h>
  33
  34#include "main.h"
  35
  36#ifndef BPF_FS_MAGIC
  37#define BPF_FS_MAGIC		0xcafe4a11
  38#endif
  39
  40void p_err(const char *fmt, ...)
  41{
  42	va_list ap;
  43
  44	va_start(ap, fmt);
  45	if (json_output) {
  46		jsonw_start_object(json_wtr);
  47		jsonw_name(json_wtr, "error");
  48		jsonw_vprintf_enquote(json_wtr, fmt, ap);
  49		jsonw_end_object(json_wtr);
  50	} else {
  51		fprintf(stderr, "Error: ");
  52		vfprintf(stderr, fmt, ap);
  53		fprintf(stderr, "\n");
  54	}
  55	va_end(ap);
  56}
  57
  58void p_info(const char *fmt, ...)
  59{
  60	va_list ap;
  61
  62	if (json_output)
  63		return;
  64
  65	va_start(ap, fmt);
  66	vfprintf(stderr, fmt, ap);
  67	fprintf(stderr, "\n");
  68	va_end(ap);
  69}
  70
  71static bool is_bpffs(char *path)
  72{
  73	struct statfs st_fs;
  74
  75	if (statfs(path, &st_fs) < 0)
  76		return false;
  77
  78	return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
  79}
  80
  81/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
  82 * memcg-based memory accounting for BPF maps and programs. This was done in
  83 * commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
  84 * accounting'"), in Linux 5.11.
  85 *
  86 * Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
  87 * so by checking for the availability of a given BPF helper and this has
  88 * failed on some kernels with backports in the past, see commit 6b4384ff1088
  89 * ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
  90 * Instead, we can probe by lowering the process-based rlimit to 0, trying to
  91 * load a BPF object, and resetting the rlimit. If the load succeeds then
  92 * memcg-based accounting is supported.
  93 *
  94 * This would be too dangerous to do in the library, because multithreaded
  95 * applications might attempt to load items while the rlimit is at 0. Given
  96 * that bpftool is single-threaded, this is fine to do here.
  97 */
  98static bool known_to_need_rlimit(void)
  99{
 100	struct rlimit rlim_init, rlim_cur_zero = {};
 101	struct bpf_insn insns[] = {
 102		BPF_MOV64_IMM(BPF_REG_0, 0),
 103		BPF_EXIT_INSN(),
 104	};
 105	size_t insn_cnt = ARRAY_SIZE(insns);
 106	union bpf_attr attr;
 107	int prog_fd, err;
 108
 109	memset(&attr, 0, sizeof(attr));
 110	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 111	attr.insns = ptr_to_u64(insns);
 112	attr.insn_cnt = insn_cnt;
 113	attr.license = ptr_to_u64("GPL");
 114
 115	if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
 116		return false;
 117
 118	/* Drop the soft limit to zero. We maintain the hard limit to its
 119	 * current value, because lowering it would be a permanent operation
 120	 * for unprivileged users.
 121	 */
 122	rlim_cur_zero.rlim_max = rlim_init.rlim_max;
 123	if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
 124		return false;
 125
 126	/* Do not use bpf_prog_load() from libbpf here, because it calls
 127	 * bump_rlimit_memlock(), interfering with the current probe.
 128	 */
 129	prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
 130	err = errno;
 131
 132	/* reset soft rlimit to its initial value */
 133	setrlimit(RLIMIT_MEMLOCK, &rlim_init);
 134
 135	if (prog_fd < 0)
 136		return err == EPERM;
 137
 138	close(prog_fd);
 139	return false;
 140}
 141
 142void set_max_rlimit(void)
 143{
 144	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
 145
 146	if (known_to_need_rlimit())
 147		setrlimit(RLIMIT_MEMLOCK, &rinf);
 148}
 149
 150static int
 151mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
 152{
 153	bool bind_done = false;
 154
 155	while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
 156		if (errno != EINVAL || bind_done) {
 157			snprintf(buff, bufflen,
 158				 "mount --make-private %s failed: %s",
 159				 target, strerror(errno));
 160			return -1;
 161		}
 162
 163		if (mount(target, target, "none", MS_BIND, NULL)) {
 164			snprintf(buff, bufflen,
 165				 "mount --bind %s %s failed: %s",
 166				 target, target, strerror(errno));
 167			return -1;
 168		}
 169
 170		bind_done = true;
 171	}
 172
 173	if (mount(type, target, type, 0, "mode=0700")) {
 174		snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
 175			 type, type, target, strerror(errno));
 176		return -1;
 177	}
 178
 179	return 0;
 180}
 181
 182int mount_tracefs(const char *target)
 183{
 184	char err_str[ERR_MAX_LEN];
 185	int err;
 186
 187	err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
 188	if (err) {
 189		err_str[ERR_MAX_LEN - 1] = '\0';
 190		p_err("can't mount tracefs: %s", err_str);
 191	}
 192
 193	return err;
 194}
 195
 196int open_obj_pinned(const char *path, bool quiet)
 197{
 198	char *pname;
 199	int fd = -1;
 200
 201	pname = strdup(path);
 202	if (!pname) {
 203		if (!quiet)
 204			p_err("mem alloc failed");
 205		goto out_ret;
 206	}
 207
 208	fd = bpf_obj_get(pname);
 209	if (fd < 0) {
 210		if (!quiet)
 211			p_err("bpf obj get (%s): %s", pname,
 212			      errno == EACCES && !is_bpffs(dirname(pname)) ?
 213			    "directory not in bpf file system (bpffs)" :
 214			    strerror(errno));
 215		goto out_free;
 216	}
 217
 218out_free:
 219	free(pname);
 220out_ret:
 221	return fd;
 222}
 223
 224int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
 225{
 226	enum bpf_obj_type type;
 227	int fd;
 228
 229	fd = open_obj_pinned(path, false);
 230	if (fd < 0)
 231		return -1;
 232
 233	type = get_fd_type(fd);
 234	if (type < 0) {
 235		close(fd);
 236		return type;
 237	}
 238	if (type != exp_type) {
 239		p_err("incorrect object type: %s", get_fd_type_name(type));
 240		close(fd);
 241		return -1;
 242	}
 243
 244	return fd;
 245}
 246
 247int mount_bpffs_for_pin(const char *name)
 248{
 249	char err_str[ERR_MAX_LEN];
 250	char *file;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251	char *dir;
 252	int err = 0;
 253
 254	file = malloc(strlen(name) + 1);
 255	if (!file) {
 
 
 
 
 
 256		p_err("mem alloc failed");
 257		return -1;
 258	}
 259
 260	strcpy(file, name);
 261	dir = dirname(file);
 262
 263	if (is_bpffs(dir))
 264		/* nothing to do if already mounted */
 265		goto out_free;
 266
 
 
 
 
 
 
 267	if (block_mount) {
 268		p_err("no BPF file system found, not mounting it due to --nomount option");
 269		err = -1;
 270		goto out_free;
 271	}
 272
 273	err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
 274	if (err) {
 275		err_str[ERR_MAX_LEN - 1] = '\0';
 276		p_err("can't mount BPF file system to pin the object (%s): %s",
 277		      name, err_str);
 278	}
 279
 280out_free:
 281	free(file);
 282	return err;
 283}
 284
 285int do_pin_fd(int fd, const char *name)
 286{
 287	int err;
 288
 289	err = mount_bpffs_for_pin(name);
 290	if (err)
 291		return err;
 292
 293	err = bpf_obj_pin(fd, name);
 294	if (err)
 295		p_err("can't pin the object (%s): %s", name, strerror(errno));
 296
 297	return err;
 298}
 299
 300int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
 301{
 302	int err;
 303	int fd;
 304
 305	if (!REQ_ARGS(3))
 306		return -EINVAL;
 307
 308	fd = get_fd(&argc, &argv);
 309	if (fd < 0)
 310		return fd;
 311
 312	err = do_pin_fd(fd, *argv);
 313
 314	close(fd);
 315	return err;
 316}
 317
 318const char *get_fd_type_name(enum bpf_obj_type type)
 319{
 320	static const char * const names[] = {
 321		[BPF_OBJ_UNKNOWN]	= "unknown",
 322		[BPF_OBJ_PROG]		= "prog",
 323		[BPF_OBJ_MAP]		= "map",
 324		[BPF_OBJ_LINK]		= "link",
 325	};
 326
 327	if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
 328		return names[BPF_OBJ_UNKNOWN];
 329
 330	return names[type];
 331}
 332
 333void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
 334			char *name_buff, size_t buff_len)
 335{
 336	const char *prog_name = prog_info->name;
 337	const struct btf_type *func_type;
 338	const struct bpf_func_info finfo = {};
 339	struct bpf_prog_info info = {};
 340	__u32 info_len = sizeof(info);
 341	struct btf *prog_btf = NULL;
 342
 343	if (buff_len <= BPF_OBJ_NAME_LEN ||
 344	    strlen(prog_info->name) < BPF_OBJ_NAME_LEN - 1)
 345		goto copy_name;
 346
 347	if (!prog_info->btf_id || prog_info->nr_func_info == 0)
 348		goto copy_name;
 349
 350	info.nr_func_info = 1;
 351	info.func_info_rec_size = prog_info->func_info_rec_size;
 352	if (info.func_info_rec_size > sizeof(finfo))
 353		info.func_info_rec_size = sizeof(finfo);
 354	info.func_info = ptr_to_u64(&finfo);
 355
 356	if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len))
 357		goto copy_name;
 358
 359	prog_btf = btf__load_from_kernel_by_id(info.btf_id);
 360	if (!prog_btf)
 361		goto copy_name;
 362
 363	func_type = btf__type_by_id(prog_btf, finfo.type_id);
 364	if (!func_type || !btf_is_func(func_type))
 365		goto copy_name;
 366
 367	prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
 368
 369copy_name:
 370	snprintf(name_buff, buff_len, "%s", prog_name);
 371
 372	if (prog_btf)
 373		btf__free(prog_btf);
 374}
 375
 376int get_fd_type(int fd)
 377{
 378	char path[PATH_MAX];
 379	char buf[512];
 380	ssize_t n;
 381
 382	snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
 383
 384	n = readlink(path, buf, sizeof(buf));
 385	if (n < 0) {
 386		p_err("can't read link type: %s", strerror(errno));
 387		return -1;
 388	}
 389	if (n == sizeof(path)) {
 390		p_err("can't read link type: path too long!");
 391		return -1;
 392	}
 393
 394	if (strstr(buf, "bpf-map"))
 395		return BPF_OBJ_MAP;
 396	else if (strstr(buf, "bpf-prog"))
 397		return BPF_OBJ_PROG;
 398	else if (strstr(buf, "bpf-link"))
 399		return BPF_OBJ_LINK;
 400
 401	return BPF_OBJ_UNKNOWN;
 402}
 403
 404char *get_fdinfo(int fd, const char *key)
 405{
 406	char path[PATH_MAX];
 407	char *line = NULL;
 408	size_t line_n = 0;
 409	ssize_t n;
 410	FILE *fdi;
 411
 412	snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
 413
 414	fdi = fopen(path, "r");
 415	if (!fdi)
 416		return NULL;
 417
 418	while ((n = getline(&line, &line_n, fdi)) > 0) {
 419		char *value;
 420		int len;
 421
 422		if (!strstr(line, key))
 423			continue;
 424
 425		fclose(fdi);
 426
 427		value = strchr(line, '\t');
 428		if (!value || !value[1]) {
 429			free(line);
 430			return NULL;
 431		}
 432		value++;
 433
 434		len = strlen(value);
 435		memmove(line, value, len);
 436		line[len - 1] = '\0';
 437
 438		return line;
 439	}
 440
 441	free(line);
 442	fclose(fdi);
 443	return NULL;
 444}
 445
 446void print_data_json(uint8_t *data, size_t len)
 447{
 448	unsigned int i;
 449
 450	jsonw_start_array(json_wtr);
 451	for (i = 0; i < len; i++)
 452		jsonw_printf(json_wtr, "%d", data[i]);
 453	jsonw_end_array(json_wtr);
 454}
 455
 456void print_hex_data_json(uint8_t *data, size_t len)
 457{
 458	unsigned int i;
 459
 460	jsonw_start_array(json_wtr);
 461	for (i = 0; i < len; i++)
 462		jsonw_printf(json_wtr, "\"0x%02hhx\"", data[i]);
 463	jsonw_end_array(json_wtr);
 464}
 465
 466/* extra params for nftw cb */
 467static struct hashmap *build_fn_table;
 468static enum bpf_obj_type build_fn_type;
 469
 470static int do_build_table_cb(const char *fpath, const struct stat *sb,
 471			     int typeflag, struct FTW *ftwbuf)
 472{
 473	struct bpf_prog_info pinned_info;
 474	__u32 len = sizeof(pinned_info);
 475	enum bpf_obj_type objtype;
 476	int fd, err = 0;
 477	char *path;
 478
 479	if (typeflag != FTW_F)
 480		goto out_ret;
 481
 482	fd = open_obj_pinned(fpath, true);
 483	if (fd < 0)
 484		goto out_ret;
 485
 486	objtype = get_fd_type(fd);
 487	if (objtype != build_fn_type)
 488		goto out_close;
 489
 490	memset(&pinned_info, 0, sizeof(pinned_info));
 491	if (bpf_obj_get_info_by_fd(fd, &pinned_info, &len))
 492		goto out_close;
 493
 494	path = strdup(fpath);
 495	if (!path) {
 496		err = -1;
 497		goto out_close;
 498	}
 499
 500	err = hashmap__append(build_fn_table, pinned_info.id, path);
 501	if (err) {
 502		p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
 503		      pinned_info.id, path, strerror(errno));
 504		free(path);
 505		goto out_close;
 506	}
 507
 508out_close:
 509	close(fd);
 510out_ret:
 511	return err;
 512}
 513
 514int build_pinned_obj_table(struct hashmap *tab,
 515			   enum bpf_obj_type type)
 516{
 517	struct mntent *mntent = NULL;
 518	FILE *mntfile = NULL;
 519	int flags = FTW_PHYS;
 520	int nopenfd = 16;
 521	int err = 0;
 522
 523	mntfile = setmntent("/proc/mounts", "r");
 524	if (!mntfile)
 525		return -1;
 526
 527	build_fn_table = tab;
 528	build_fn_type = type;
 529
 530	while ((mntent = getmntent(mntfile))) {
 531		char *path = mntent->mnt_dir;
 532
 533		if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
 534			continue;
 535		err = nftw(path, do_build_table_cb, nopenfd, flags);
 536		if (err)
 537			break;
 538	}
 539	fclose(mntfile);
 540	return err;
 541}
 542
 543void delete_pinned_obj_table(struct hashmap *map)
 544{
 545	struct hashmap_entry *entry;
 546	size_t bkt;
 547
 548	if (!map)
 549		return;
 550
 551	hashmap__for_each_entry(map, entry, bkt)
 552		free(entry->pvalue);
 553
 554	hashmap__free(map);
 555}
 556
 557unsigned int get_page_size(void)
 558{
 559	static int result;
 560
 561	if (!result)
 562		result = getpagesize();
 563	return result;
 564}
 565
 566unsigned int get_possible_cpus(void)
 567{
 568	int cpus = libbpf_num_possible_cpus();
 569
 570	if (cpus < 0) {
 571		p_err("Can't get # of possible cpus: %s", strerror(-cpus));
 572		exit(-1);
 573	}
 574	return cpus;
 575}
 576
 577static char *
 578ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
 579{
 580	struct stat st;
 581	int err;
 582
 583	err = stat("/proc/self/ns/net", &st);
 584	if (err) {
 585		p_err("Can't stat /proc/self: %s", strerror(errno));
 586		return NULL;
 587	}
 588
 589	if (st.st_dev != ns_dev || st.st_ino != ns_ino)
 590		return NULL;
 591
 592	return if_indextoname(ifindex, buf);
 593}
 594
 595static int read_sysfs_hex_int(char *path)
 596{
 597	char vendor_id_buf[8];
 598	int len;
 599	int fd;
 600
 601	fd = open(path, O_RDONLY);
 602	if (fd < 0) {
 603		p_err("Can't open %s: %s", path, strerror(errno));
 604		return -1;
 605	}
 606
 607	len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
 608	close(fd);
 609	if (len < 0) {
 610		p_err("Can't read %s: %s", path, strerror(errno));
 611		return -1;
 612	}
 613	if (len >= (int)sizeof(vendor_id_buf)) {
 614		p_err("Value in %s too long", path);
 615		return -1;
 616	}
 617
 618	vendor_id_buf[len] = 0;
 619
 620	return strtol(vendor_id_buf, NULL, 0);
 621}
 622
 623static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
 624{
 625	char full_path[64];
 626
 627	snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
 628		 devname, entry_name);
 629
 630	return read_sysfs_hex_int(full_path);
 631}
 632
 633const char *
 634ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt)
 635{
 636	__maybe_unused int device_id;
 637	char devname[IF_NAMESIZE];
 638	int vendor_id;
 639
 640	if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
 641		p_err("Can't get net device name for ifindex %d: %s", ifindex,
 642		      strerror(errno));
 643		return NULL;
 644	}
 645
 646	vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
 647	if (vendor_id < 0) {
 648		p_err("Can't get device vendor id for %s", devname);
 649		return NULL;
 650	}
 651
 652	switch (vendor_id) {
 653#ifdef HAVE_LIBBFD_SUPPORT
 654	case 0x19ee:
 655		device_id = read_sysfs_netdev_hex_int(devname, "device");
 656		if (device_id != 0x4000 &&
 657		    device_id != 0x6000 &&
 658		    device_id != 0x6003)
 659			p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
 660		*opt = "ctx4";
 661		return "NFP-6xxx";
 662#endif /* HAVE_LIBBFD_SUPPORT */
 663	/* No NFP support in LLVM, we have no valid triple to return. */
 664	default:
 665		p_err("Can't get arch name for device vendor id 0x%04x",
 666		      vendor_id);
 667		return NULL;
 668	}
 669}
 670
 671void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
 672{
 673	char name[IF_NAMESIZE];
 674
 675	if (!ifindex)
 676		return;
 677
 678	printf("  offloaded_to ");
 679	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
 680		printf("%s", name);
 681	else
 682		printf("ifindex %u ns_dev %llu ns_ino %llu",
 683		       ifindex, ns_dev, ns_inode);
 684}
 685
 686void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
 687{
 688	char name[IF_NAMESIZE];
 689
 690	if (!ifindex)
 691		return;
 692
 693	jsonw_name(json_wtr, "dev");
 694	jsonw_start_object(json_wtr);
 695	jsonw_uint_field(json_wtr, "ifindex", ifindex);
 696	jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
 697	jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
 698	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
 699		jsonw_string_field(json_wtr, "ifname", name);
 700	jsonw_end_object(json_wtr);
 701}
 702
 703int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
 704{
 705	char *endptr;
 706
 707	NEXT_ARGP();
 708
 709	if (*val) {
 710		p_err("%s already specified", what);
 711		return -1;
 712	}
 713
 714	*val = strtoul(**argv, &endptr, 0);
 715	if (*endptr) {
 716		p_err("can't parse %s as %s", **argv, what);
 717		return -1;
 718	}
 719	NEXT_ARGP();
 720
 721	return 0;
 722}
 723
 724int __printf(2, 0)
 725print_all_levels(__maybe_unused enum libbpf_print_level level,
 726		 const char *format, va_list args)
 727{
 728	return vfprintf(stderr, format, args);
 729}
 730
 731static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
 732{
 733	char prog_name[MAX_PROG_FULL_NAME];
 734	unsigned int id = 0;
 735	int fd, nb_fds = 0;
 736	void *tmp;
 737	int err;
 738
 739	while (true) {
 740		struct bpf_prog_info info = {};
 741		__u32 len = sizeof(info);
 742
 743		err = bpf_prog_get_next_id(id, &id);
 744		if (err) {
 745			if (errno != ENOENT) {
 746				p_err("%s", strerror(errno));
 747				goto err_close_fds;
 748			}
 749			return nb_fds;
 750		}
 751
 752		fd = bpf_prog_get_fd_by_id(id);
 753		if (fd < 0) {
 754			p_err("can't get prog by id (%u): %s",
 755			      id, strerror(errno));
 756			goto err_close_fds;
 757		}
 758
 759		err = bpf_obj_get_info_by_fd(fd, &info, &len);
 760		if (err) {
 761			p_err("can't get prog info (%u): %s",
 762			      id, strerror(errno));
 763			goto err_close_fd;
 764		}
 765
 766		if (tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) {
 767			close(fd);
 768			continue;
 769		}
 770
 771		if (!tag) {
 772			get_prog_full_name(&info, fd, prog_name,
 773					   sizeof(prog_name));
 774			if (strncmp(nametag, prog_name, sizeof(prog_name))) {
 775				close(fd);
 776				continue;
 777			}
 778		}
 779
 780		if (nb_fds > 0) {
 781			tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
 782			if (!tmp) {
 783				p_err("failed to realloc");
 784				goto err_close_fd;
 785			}
 786			*fds = tmp;
 787		}
 788		(*fds)[nb_fds++] = fd;
 789	}
 790
 791err_close_fd:
 792	close(fd);
 793err_close_fds:
 794	while (--nb_fds >= 0)
 795		close((*fds)[nb_fds]);
 796	return -1;
 797}
 798
 799int prog_parse_fds(int *argc, char ***argv, int **fds)
 800{
 801	if (is_prefix(**argv, "id")) {
 802		unsigned int id;
 803		char *endptr;
 804
 805		NEXT_ARGP();
 806
 807		id = strtoul(**argv, &endptr, 0);
 808		if (*endptr) {
 809			p_err("can't parse %s as ID", **argv);
 810			return -1;
 811		}
 812		NEXT_ARGP();
 813
 814		(*fds)[0] = bpf_prog_get_fd_by_id(id);
 815		if ((*fds)[0] < 0) {
 816			p_err("get by id (%u): %s", id, strerror(errno));
 817			return -1;
 818		}
 819		return 1;
 820	} else if (is_prefix(**argv, "tag")) {
 821		unsigned char tag[BPF_TAG_SIZE];
 822
 823		NEXT_ARGP();
 824
 825		if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
 826			   tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
 827		    != BPF_TAG_SIZE) {
 828			p_err("can't parse tag");
 829			return -1;
 830		}
 831		NEXT_ARGP();
 832
 833		return prog_fd_by_nametag(tag, fds, true);
 834	} else if (is_prefix(**argv, "name")) {
 835		char *name;
 836
 837		NEXT_ARGP();
 838
 839		name = **argv;
 840		if (strlen(name) > MAX_PROG_FULL_NAME - 1) {
 841			p_err("can't parse name");
 842			return -1;
 843		}
 844		NEXT_ARGP();
 845
 846		return prog_fd_by_nametag(name, fds, false);
 847	} else if (is_prefix(**argv, "pinned")) {
 848		char *path;
 849
 850		NEXT_ARGP();
 851
 852		path = **argv;
 853		NEXT_ARGP();
 854
 855		(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
 856		if ((*fds)[0] < 0)
 857			return -1;
 858		return 1;
 859	}
 860
 861	p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
 862	return -1;
 863}
 864
 865int prog_parse_fd(int *argc, char ***argv)
 866{
 867	int *fds = NULL;
 868	int nb_fds, fd;
 869
 870	fds = malloc(sizeof(int));
 871	if (!fds) {
 872		p_err("mem alloc failed");
 873		return -1;
 874	}
 875	nb_fds = prog_parse_fds(argc, argv, &fds);
 876	if (nb_fds != 1) {
 877		if (nb_fds > 1) {
 878			p_err("several programs match this handle");
 879			while (nb_fds--)
 880				close(fds[nb_fds]);
 881		}
 882		fd = -1;
 883		goto exit_free;
 884	}
 885
 886	fd = fds[0];
 887exit_free:
 888	free(fds);
 889	return fd;
 890}
 891
 892static int map_fd_by_name(char *name, int **fds)
 893{
 894	unsigned int id = 0;
 895	int fd, nb_fds = 0;
 896	void *tmp;
 897	int err;
 898
 899	while (true) {
 900		struct bpf_map_info info = {};
 901		__u32 len = sizeof(info);
 902
 903		err = bpf_map_get_next_id(id, &id);
 904		if (err) {
 905			if (errno != ENOENT) {
 906				p_err("%s", strerror(errno));
 907				goto err_close_fds;
 908			}
 909			return nb_fds;
 910		}
 911
 912		fd = bpf_map_get_fd_by_id(id);
 913		if (fd < 0) {
 914			p_err("can't get map by id (%u): %s",
 915			      id, strerror(errno));
 916			goto err_close_fds;
 917		}
 918
 919		err = bpf_obj_get_info_by_fd(fd, &info, &len);
 920		if (err) {
 921			p_err("can't get map info (%u): %s",
 922			      id, strerror(errno));
 923			goto err_close_fd;
 924		}
 925
 926		if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
 927			close(fd);
 928			continue;
 929		}
 930
 931		if (nb_fds > 0) {
 932			tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
 933			if (!tmp) {
 934				p_err("failed to realloc");
 935				goto err_close_fd;
 936			}
 937			*fds = tmp;
 938		}
 939		(*fds)[nb_fds++] = fd;
 940	}
 941
 942err_close_fd:
 943	close(fd);
 944err_close_fds:
 945	while (--nb_fds >= 0)
 946		close((*fds)[nb_fds]);
 947	return -1;
 948}
 949
 950int map_parse_fds(int *argc, char ***argv, int **fds)
 951{
 952	if (is_prefix(**argv, "id")) {
 953		unsigned int id;
 954		char *endptr;
 955
 956		NEXT_ARGP();
 957
 958		id = strtoul(**argv, &endptr, 0);
 959		if (*endptr) {
 960			p_err("can't parse %s as ID", **argv);
 961			return -1;
 962		}
 963		NEXT_ARGP();
 964
 965		(*fds)[0] = bpf_map_get_fd_by_id(id);
 966		if ((*fds)[0] < 0) {
 967			p_err("get map by id (%u): %s", id, strerror(errno));
 968			return -1;
 969		}
 970		return 1;
 971	} else if (is_prefix(**argv, "name")) {
 972		char *name;
 973
 974		NEXT_ARGP();
 975
 976		name = **argv;
 977		if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
 978			p_err("can't parse name");
 979			return -1;
 980		}
 981		NEXT_ARGP();
 982
 983		return map_fd_by_name(name, fds);
 984	} else if (is_prefix(**argv, "pinned")) {
 985		char *path;
 986
 987		NEXT_ARGP();
 988
 989		path = **argv;
 990		NEXT_ARGP();
 991
 992		(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
 993		if ((*fds)[0] < 0)
 994			return -1;
 995		return 1;
 996	}
 997
 998	p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
 999	return -1;
1000}
1001
1002int map_parse_fd(int *argc, char ***argv)
1003{
1004	int *fds = NULL;
1005	int nb_fds, fd;
1006
1007	fds = malloc(sizeof(int));
1008	if (!fds) {
1009		p_err("mem alloc failed");
1010		return -1;
1011	}
1012	nb_fds = map_parse_fds(argc, argv, &fds);
1013	if (nb_fds != 1) {
1014		if (nb_fds > 1) {
1015			p_err("several maps match this handle");
1016			while (nb_fds--)
1017				close(fds[nb_fds]);
1018		}
1019		fd = -1;
1020		goto exit_free;
1021	}
1022
1023	fd = fds[0];
1024exit_free:
1025	free(fds);
1026	return fd;
1027}
1028
1029int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
 
1030{
1031	int err;
1032	int fd;
1033
1034	fd = map_parse_fd(argc, argv);
1035	if (fd < 0)
1036		return -1;
1037
1038	err = bpf_obj_get_info_by_fd(fd, info, info_len);
1039	if (err) {
1040		p_err("can't get map info: %s", strerror(errno));
1041		close(fd);
1042		return err;
1043	}
1044
1045	return fd;
1046}
1047
1048size_t hash_fn_for_key_as_id(long key, void *ctx)
1049{
1050	return key;
1051}
1052
1053bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
1054{
1055	return k1 == k2;
1056}
1057
1058const char *bpf_attach_type_input_str(enum bpf_attach_type t)
1059{
1060	switch (t) {
1061	case BPF_CGROUP_INET_INGRESS:		return "ingress";
1062	case BPF_CGROUP_INET_EGRESS:		return "egress";
1063	case BPF_CGROUP_INET_SOCK_CREATE:	return "sock_create";
1064	case BPF_CGROUP_INET_SOCK_RELEASE:	return "sock_release";
1065	case BPF_CGROUP_SOCK_OPS:		return "sock_ops";
1066	case BPF_CGROUP_DEVICE:			return "device";
1067	case BPF_CGROUP_INET4_BIND:		return "bind4";
1068	case BPF_CGROUP_INET6_BIND:		return "bind6";
1069	case BPF_CGROUP_INET4_CONNECT:		return "connect4";
1070	case BPF_CGROUP_INET6_CONNECT:		return "connect6";
1071	case BPF_CGROUP_INET4_POST_BIND:	return "post_bind4";
1072	case BPF_CGROUP_INET6_POST_BIND:	return "post_bind6";
1073	case BPF_CGROUP_INET4_GETPEERNAME:	return "getpeername4";
1074	case BPF_CGROUP_INET6_GETPEERNAME:	return "getpeername6";
1075	case BPF_CGROUP_INET4_GETSOCKNAME:	return "getsockname4";
1076	case BPF_CGROUP_INET6_GETSOCKNAME:	return "getsockname6";
1077	case BPF_CGROUP_UDP4_SENDMSG:		return "sendmsg4";
1078	case BPF_CGROUP_UDP6_SENDMSG:		return "sendmsg6";
1079	case BPF_CGROUP_SYSCTL:			return "sysctl";
1080	case BPF_CGROUP_UDP4_RECVMSG:		return "recvmsg4";
1081	case BPF_CGROUP_UDP6_RECVMSG:		return "recvmsg6";
1082	case BPF_CGROUP_GETSOCKOPT:		return "getsockopt";
1083	case BPF_CGROUP_SETSOCKOPT:		return "setsockopt";
1084	case BPF_TRACE_RAW_TP:			return "raw_tp";
1085	case BPF_TRACE_FENTRY:			return "fentry";
1086	case BPF_TRACE_FEXIT:			return "fexit";
1087	case BPF_MODIFY_RETURN:			return "mod_ret";
1088	case BPF_SK_REUSEPORT_SELECT:		return "sk_skb_reuseport_select";
1089	case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:	return "sk_skb_reuseport_select_or_migrate";
1090	default:	return libbpf_bpf_attach_type_str(t);
1091	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1092}