Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
   2
   3/*
   4 * Common eBPF ELF object loading operations.
   5 *
   6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
   7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
   8 * Copyright (C) 2015 Huawei Inc.
   9 * Copyright (C) 2017 Nicira, Inc.
  10 * Copyright (C) 2019 Isovalent, Inc.
  11 */
  12
  13#ifndef _GNU_SOURCE
  14#define _GNU_SOURCE
  15#endif
  16#include <stdlib.h>
  17#include <stdio.h>
  18#include <stdarg.h>
  19#include <libgen.h>
  20#include <inttypes.h>
  21#include <string.h>
  22#include <unistd.h>
  23#include <endian.h>
  24#include <fcntl.h>
  25#include <errno.h>
  26#include <asm/unistd.h>
  27#include <linux/err.h>
  28#include <linux/kernel.h>
  29#include <linux/bpf.h>
  30#include <linux/btf.h>
  31#include <linux/filter.h>
  32#include <linux/list.h>
  33#include <linux/limits.h>
  34#include <linux/perf_event.h>
  35#include <linux/ring_buffer.h>
  36#include <sys/epoll.h>
  37#include <sys/ioctl.h>
  38#include <sys/mman.h>
  39#include <sys/stat.h>
  40#include <sys/types.h>
  41#include <sys/vfs.h>
  42#include <sys/utsname.h>
  43#include <tools/libc_compat.h>
  44#include <libelf.h>
  45#include <gelf.h>
  46
  47#include "libbpf.h"
  48#include "bpf.h"
  49#include "btf.h"
  50#include "str_error.h"
  51#include "libbpf_internal.h"
  52#include "hashmap.h"
  53
  54#ifndef EM_BPF
  55#define EM_BPF 247
  56#endif
  57
  58#ifndef BPF_FS_MAGIC
  59#define BPF_FS_MAGIC		0xcafe4a11
  60#endif
  61
  62/* vsprintf() in __base_pr() uses nonliteral format string. It may break
  63 * compilation if user enables corresponding warning. Disable it explicitly.
  64 */
  65#pragma GCC diagnostic ignored "-Wformat-nonliteral"
  66
  67#define __printf(a, b)	__attribute__((format(printf, a, b)))
  68
  69static int __base_pr(enum libbpf_print_level level, const char *format,
  70		     va_list args)
  71{
  72	if (level == LIBBPF_DEBUG)
  73		return 0;
  74
  75	return vfprintf(stderr, format, args);
  76}
  77
  78static libbpf_print_fn_t __libbpf_pr = __base_pr;
  79
  80libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
  81{
  82	libbpf_print_fn_t old_print_fn = __libbpf_pr;
  83
  84	__libbpf_pr = fn;
  85	return old_print_fn;
  86}
  87
  88__printf(2, 3)
  89void libbpf_print(enum libbpf_print_level level, const char *format, ...)
  90{
  91	va_list args;
  92
  93	if (!__libbpf_pr)
  94		return;
  95
  96	va_start(args, format);
  97	__libbpf_pr(level, format, args);
  98	va_end(args);
  99}
 100
 101#define STRERR_BUFSIZE  128
 102
 103#define CHECK_ERR(action, err, out) do {	\
 104	err = action;			\
 105	if (err)			\
 106		goto out;		\
 107} while(0)
 108
 109
 110/* Copied from tools/perf/util/util.h */
 111#ifndef zfree
 112# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
 113#endif
 114
 115#ifndef zclose
 116# define zclose(fd) ({			\
 117	int ___err = 0;			\
 118	if ((fd) >= 0)			\
 119		___err = close((fd));	\
 120	fd = -1;			\
 121	___err; })
 122#endif
 123
 124#ifdef HAVE_LIBELF_MMAP_SUPPORT
 125# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
 126#else
 127# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
 128#endif
 129
 130static inline __u64 ptr_to_u64(const void *ptr)
 131{
 132	return (__u64) (unsigned long) ptr;
 133}
 134
 135struct bpf_capabilities {
 136	/* v4.14: kernel support for program & map names. */
 137	__u32 name:1;
 138	/* v5.2: kernel support for global data sections. */
 139	__u32 global_data:1;
 140	/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
 141	__u32 btf_func:1;
 142	/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
 143	__u32 btf_datasec:1;
 144};
 145
 146/*
 147 * bpf_prog should be a better name but it has been used in
 148 * linux/filter.h.
 149 */
 150struct bpf_program {
 151	/* Index in elf obj file, for relocation use. */
 152	int idx;
 153	char *name;
 154	int prog_ifindex;
 155	char *section_name;
 156	/* section_name with / replaced by _; makes recursive pinning
 157	 * in bpf_object__pin_programs easier
 158	 */
 159	char *pin_name;
 160	struct bpf_insn *insns;
 161	size_t insns_cnt, main_prog_cnt;
 162	enum bpf_prog_type type;
 163
 164	struct reloc_desc {
 165		enum {
 166			RELO_LD64,
 167			RELO_CALL,
 168			RELO_DATA,
 169		} type;
 170		int insn_idx;
 171		union {
 172			int map_idx;
 173			int text_off;
 174		};
 175	} *reloc_desc;
 176	int nr_reloc;
 177	int log_level;
 178
 179	struct {
 180		int nr;
 181		int *fds;
 182	} instances;
 183	bpf_program_prep_t preprocessor;
 184
 185	struct bpf_object *obj;
 186	void *priv;
 187	bpf_program_clear_priv_t clear_priv;
 188
 189	enum bpf_attach_type expected_attach_type;
 190	void *func_info;
 191	__u32 func_info_rec_size;
 192	__u32 func_info_cnt;
 193
 194	struct bpf_capabilities *caps;
 195
 196	void *line_info;
 197	__u32 line_info_rec_size;
 198	__u32 line_info_cnt;
 199	__u32 prog_flags;
 200};
 201
 202enum libbpf_map_type {
 203	LIBBPF_MAP_UNSPEC,
 204	LIBBPF_MAP_DATA,
 205	LIBBPF_MAP_BSS,
 206	LIBBPF_MAP_RODATA,
 207};
 208
 209static const char * const libbpf_type_to_btf_name[] = {
 210	[LIBBPF_MAP_DATA]	= ".data",
 211	[LIBBPF_MAP_BSS]	= ".bss",
 212	[LIBBPF_MAP_RODATA]	= ".rodata",
 213};
 214
 215struct bpf_map {
 216	int fd;
 217	char *name;
 218	int sec_idx;
 219	size_t sec_offset;
 220	int map_ifindex;
 221	int inner_map_fd;
 222	struct bpf_map_def def;
 223	__u32 btf_key_type_id;
 224	__u32 btf_value_type_id;
 225	void *priv;
 226	bpf_map_clear_priv_t clear_priv;
 227	enum libbpf_map_type libbpf_type;
 228};
 229
 230struct bpf_secdata {
 231	void *rodata;
 232	void *data;
 233};
 234
 235static LIST_HEAD(bpf_objects_list);
 236
 237struct bpf_object {
 238	char name[BPF_OBJ_NAME_LEN];
 239	char license[64];
 240	__u32 kern_version;
 241
 242	struct bpf_program *programs;
 243	size_t nr_programs;
 244	struct bpf_map *maps;
 245	size_t nr_maps;
 246	size_t maps_cap;
 247	struct bpf_secdata sections;
 248
 249	bool loaded;
 250	bool has_pseudo_calls;
 251
 252	/*
 253	 * Information when doing elf related work. Only valid if fd
 254	 * is valid.
 255	 */
 256	struct {
 257		int fd;
 258		void *obj_buf;
 259		size_t obj_buf_sz;
 260		Elf *elf;
 261		GElf_Ehdr ehdr;
 262		Elf_Data *symbols;
 263		Elf_Data *data;
 264		Elf_Data *rodata;
 265		Elf_Data *bss;
 266		size_t strtabidx;
 267		struct {
 268			GElf_Shdr shdr;
 269			Elf_Data *data;
 270		} *reloc;
 271		int nr_reloc;
 272		int maps_shndx;
 273		int btf_maps_shndx;
 274		int text_shndx;
 275		int data_shndx;
 276		int rodata_shndx;
 277		int bss_shndx;
 278	} efile;
 279	/*
 280	 * All loaded bpf_object is linked in a list, which is
 281	 * hidden to caller. bpf_objects__<func> handlers deal with
 282	 * all objects.
 283	 */
 284	struct list_head list;
 285
 286	struct btf *btf;
 287	struct btf_ext *btf_ext;
 288
 289	void *priv;
 290	bpf_object_clear_priv_t clear_priv;
 291
 292	struct bpf_capabilities caps;
 293
 294	char path[];
 295};
 296#define obj_elf_valid(o)	((o)->efile.elf)
 297
 298void bpf_program__unload(struct bpf_program *prog)
 299{
 300	int i;
 301
 302	if (!prog)
 303		return;
 304
 305	/*
 306	 * If the object is opened but the program was never loaded,
 307	 * it is possible that prog->instances.nr == -1.
 308	 */
 309	if (prog->instances.nr > 0) {
 310		for (i = 0; i < prog->instances.nr; i++)
 311			zclose(prog->instances.fds[i]);
 312	} else if (prog->instances.nr != -1) {
 313		pr_warning("Internal error: instances.nr is %d\n",
 314			   prog->instances.nr);
 315	}
 316
 317	prog->instances.nr = -1;
 318	zfree(&prog->instances.fds);
 319
 320	zfree(&prog->func_info);
 321	zfree(&prog->line_info);
 322}
 323
 324static void bpf_program__exit(struct bpf_program *prog)
 325{
 326	if (!prog)
 327		return;
 328
 329	if (prog->clear_priv)
 330		prog->clear_priv(prog, prog->priv);
 331
 332	prog->priv = NULL;
 333	prog->clear_priv = NULL;
 334
 335	bpf_program__unload(prog);
 336	zfree(&prog->name);
 337	zfree(&prog->section_name);
 338	zfree(&prog->pin_name);
 339	zfree(&prog->insns);
 340	zfree(&prog->reloc_desc);
 341
 342	prog->nr_reloc = 0;
 343	prog->insns_cnt = 0;
 344	prog->idx = -1;
 345}
 346
 347static char *__bpf_program__pin_name(struct bpf_program *prog)
 348{
 349	char *name, *p;
 350
 351	name = p = strdup(prog->section_name);
 352	while ((p = strchr(p, '/')))
 353		*p = '_';
 354
 355	return name;
 356}
 357
 358static int
 359bpf_program__init(void *data, size_t size, char *section_name, int idx,
 360		  struct bpf_program *prog)
 361{
 362	const size_t bpf_insn_sz = sizeof(struct bpf_insn);
 363
 364	if (size == 0 || size % bpf_insn_sz) {
 365		pr_warning("corrupted section '%s', size: %zu\n",
 366			   section_name, size);
 367		return -EINVAL;
 368	}
 369
 370	memset(prog, 0, sizeof(*prog));
 371
 372	prog->section_name = strdup(section_name);
 373	if (!prog->section_name) {
 374		pr_warning("failed to alloc name for prog under section(%d) %s\n",
 375			   idx, section_name);
 376		goto errout;
 377	}
 378
 379	prog->pin_name = __bpf_program__pin_name(prog);
 380	if (!prog->pin_name) {
 381		pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
 382			   idx, section_name);
 383		goto errout;
 384	}
 385
 386	prog->insns = malloc(size);
 387	if (!prog->insns) {
 388		pr_warning("failed to alloc insns for prog under section %s\n",
 389			   section_name);
 390		goto errout;
 391	}
 392	prog->insns_cnt = size / bpf_insn_sz;
 393	memcpy(prog->insns, data, size);
 394	prog->idx = idx;
 395	prog->instances.fds = NULL;
 396	prog->instances.nr = -1;
 397	prog->type = BPF_PROG_TYPE_UNSPEC;
 398
 399	return 0;
 400errout:
 401	bpf_program__exit(prog);
 402	return -ENOMEM;
 403}
 404
 405static int
 406bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
 407			char *section_name, int idx)
 408{
 409	struct bpf_program prog, *progs;
 410	int nr_progs, err;
 411
 412	err = bpf_program__init(data, size, section_name, idx, &prog);
 413	if (err)
 414		return err;
 415
 416	prog.caps = &obj->caps;
 417	progs = obj->programs;
 418	nr_progs = obj->nr_programs;
 419
 420	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
 421	if (!progs) {
 422		/*
 423		 * In this case the original obj->programs
 424		 * is still valid, so don't need special treat for
 425		 * bpf_close_object().
 426		 */
 427		pr_warning("failed to alloc a new program under section '%s'\n",
 428			   section_name);
 429		bpf_program__exit(&prog);
 430		return -ENOMEM;
 431	}
 432
 433	pr_debug("found program %s\n", prog.section_name);
 434	obj->programs = progs;
 435	obj->nr_programs = nr_progs + 1;
 436	prog.obj = obj;
 437	progs[nr_progs] = prog;
 438	return 0;
 439}
 440
 441static int
 442bpf_object__init_prog_names(struct bpf_object *obj)
 443{
 444	Elf_Data *symbols = obj->efile.symbols;
 445	struct bpf_program *prog;
 446	size_t pi, si;
 447
 448	for (pi = 0; pi < obj->nr_programs; pi++) {
 449		const char *name = NULL;
 450
 451		prog = &obj->programs[pi];
 452
 453		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
 454		     si++) {
 455			GElf_Sym sym;
 456
 457			if (!gelf_getsym(symbols, si, &sym))
 458				continue;
 459			if (sym.st_shndx != prog->idx)
 460				continue;
 461			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
 462				continue;
 463
 464			name = elf_strptr(obj->efile.elf,
 465					  obj->efile.strtabidx,
 466					  sym.st_name);
 467			if (!name) {
 468				pr_warning("failed to get sym name string for prog %s\n",
 469					   prog->section_name);
 470				return -LIBBPF_ERRNO__LIBELF;
 471			}
 472		}
 473
 474		if (!name && prog->idx == obj->efile.text_shndx)
 475			name = ".text";
 476
 477		if (!name) {
 478			pr_warning("failed to find sym for prog %s\n",
 479				   prog->section_name);
 480			return -EINVAL;
 481		}
 482
 483		prog->name = strdup(name);
 484		if (!prog->name) {
 485			pr_warning("failed to allocate memory for prog sym %s\n",
 486				   name);
 487			return -ENOMEM;
 488		}
 489	}
 490
 491	return 0;
 492}
 493
 494static struct bpf_object *bpf_object__new(const char *path,
 495					  void *obj_buf,
 496					  size_t obj_buf_sz)
 497{
 498	struct bpf_object *obj;
 499	char *end;
 500
 501	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
 502	if (!obj) {
 503		pr_warning("alloc memory failed for %s\n", path);
 504		return ERR_PTR(-ENOMEM);
 505	}
 506
 507	strcpy(obj->path, path);
 508	/* Using basename() GNU version which doesn't modify arg. */
 509	strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1);
 510	end = strchr(obj->name, '.');
 511	if (end)
 512		*end = 0;
 513
 514	obj->efile.fd = -1;
 515	/*
 516	 * Caller of this function should also call
 517	 * bpf_object__elf_finish() after data collection to return
 518	 * obj_buf to user. If not, we should duplicate the buffer to
 519	 * avoid user freeing them before elf finish.
 520	 */
 521	obj->efile.obj_buf = obj_buf;
 522	obj->efile.obj_buf_sz = obj_buf_sz;
 523	obj->efile.maps_shndx = -1;
 524	obj->efile.btf_maps_shndx = -1;
 525	obj->efile.data_shndx = -1;
 526	obj->efile.rodata_shndx = -1;
 527	obj->efile.bss_shndx = -1;
 528
 529	obj->loaded = false;
 530
 531	INIT_LIST_HEAD(&obj->list);
 532	list_add(&obj->list, &bpf_objects_list);
 533	return obj;
 534}
 535
 536static void bpf_object__elf_finish(struct bpf_object *obj)
 537{
 538	if (!obj_elf_valid(obj))
 539		return;
 540
 541	if (obj->efile.elf) {
 542		elf_end(obj->efile.elf);
 543		obj->efile.elf = NULL;
 544	}
 545	obj->efile.symbols = NULL;
 546	obj->efile.data = NULL;
 547	obj->efile.rodata = NULL;
 548	obj->efile.bss = NULL;
 549
 550	zfree(&obj->efile.reloc);
 551	obj->efile.nr_reloc = 0;
 552	zclose(obj->efile.fd);
 553	obj->efile.obj_buf = NULL;
 554	obj->efile.obj_buf_sz = 0;
 555}
 556
 557static int bpf_object__elf_init(struct bpf_object *obj)
 558{
 559	int err = 0;
 560	GElf_Ehdr *ep;
 561
 562	if (obj_elf_valid(obj)) {
 563		pr_warning("elf init: internal error\n");
 564		return -LIBBPF_ERRNO__LIBELF;
 565	}
 566
 567	if (obj->efile.obj_buf_sz > 0) {
 568		/*
 569		 * obj_buf should have been validated by
 570		 * bpf_object__open_buffer().
 571		 */
 572		obj->efile.elf = elf_memory(obj->efile.obj_buf,
 573					    obj->efile.obj_buf_sz);
 574	} else {
 575		obj->efile.fd = open(obj->path, O_RDONLY);
 576		if (obj->efile.fd < 0) {
 577			char errmsg[STRERR_BUFSIZE], *cp;
 578
 579			err = -errno;
 580			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
 581			pr_warning("failed to open %s: %s\n", obj->path, cp);
 582			return err;
 583		}
 584
 585		obj->efile.elf = elf_begin(obj->efile.fd,
 586					   LIBBPF_ELF_C_READ_MMAP, NULL);
 587	}
 588
 589	if (!obj->efile.elf) {
 590		pr_warning("failed to open %s as ELF file\n", obj->path);
 591		err = -LIBBPF_ERRNO__LIBELF;
 592		goto errout;
 593	}
 594
 595	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
 596		pr_warning("failed to get EHDR from %s\n", obj->path);
 597		err = -LIBBPF_ERRNO__FORMAT;
 598		goto errout;
 599	}
 600	ep = &obj->efile.ehdr;
 601
 602	/* Old LLVM set e_machine to EM_NONE */
 603	if (ep->e_type != ET_REL ||
 604	    (ep->e_machine && ep->e_machine != EM_BPF)) {
 605		pr_warning("%s is not an eBPF object file\n", obj->path);
 606		err = -LIBBPF_ERRNO__FORMAT;
 607		goto errout;
 608	}
 609
 610	return 0;
 611errout:
 612	bpf_object__elf_finish(obj);
 613	return err;
 614}
 615
 616static int bpf_object__check_endianness(struct bpf_object *obj)
 617{
 618#if __BYTE_ORDER == __LITTLE_ENDIAN
 619	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
 620		return 0;
 621#elif __BYTE_ORDER == __BIG_ENDIAN
 622	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
 623		return 0;
 624#else
 625# error "Unrecognized __BYTE_ORDER__"
 626#endif
 627	pr_warning("endianness mismatch.\n");
 628	return -LIBBPF_ERRNO__ENDIAN;
 629}
 630
 631static int
 632bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
 633{
 634	memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
 635	pr_debug("license of %s is %s\n", obj->path, obj->license);
 636	return 0;
 637}
 638
 639static int
 640bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
 641{
 642	__u32 kver;
 643
 644	if (size != sizeof(kver)) {
 645		pr_warning("invalid kver section in %s\n", obj->path);
 646		return -LIBBPF_ERRNO__FORMAT;
 647	}
 648	memcpy(&kver, data, sizeof(kver));
 649	obj->kern_version = kver;
 650	pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
 651	return 0;
 652}
 653
 654static int compare_bpf_map(const void *_a, const void *_b)
 655{
 656	const struct bpf_map *a = _a;
 657	const struct bpf_map *b = _b;
 658
 659	if (a->sec_idx != b->sec_idx)
 660		return a->sec_idx - b->sec_idx;
 661	return a->sec_offset - b->sec_offset;
 662}
 663
 664static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
 665{
 666	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
 667	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
 668		return true;
 669	return false;
 670}
 671
 672static int bpf_object_search_section_size(const struct bpf_object *obj,
 673					  const char *name, size_t *d_size)
 674{
 675	const GElf_Ehdr *ep = &obj->efile.ehdr;
 676	Elf *elf = obj->efile.elf;
 677	Elf_Scn *scn = NULL;
 678	int idx = 0;
 679
 680	while ((scn = elf_nextscn(elf, scn)) != NULL) {
 681		const char *sec_name;
 682		Elf_Data *data;
 683		GElf_Shdr sh;
 684
 685		idx++;
 686		if (gelf_getshdr(scn, &sh) != &sh) {
 687			pr_warning("failed to get section(%d) header from %s\n",
 688				   idx, obj->path);
 689			return -EIO;
 690		}
 691
 692		sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
 693		if (!sec_name) {
 694			pr_warning("failed to get section(%d) name from %s\n",
 695				   idx, obj->path);
 696			return -EIO;
 697		}
 698
 699		if (strcmp(name, sec_name))
 700			continue;
 701
 702		data = elf_getdata(scn, 0);
 703		if (!data) {
 704			pr_warning("failed to get section(%d) data from %s(%s)\n",
 705				   idx, name, obj->path);
 706			return -EIO;
 707		}
 708
 709		*d_size = data->d_size;
 710		return 0;
 711	}
 712
 713	return -ENOENT;
 714}
 715
 716int bpf_object__section_size(const struct bpf_object *obj, const char *name,
 717			     __u32 *size)
 718{
 719	int ret = -ENOENT;
 720	size_t d_size;
 721
 722	*size = 0;
 723	if (!name) {
 724		return -EINVAL;
 725	} else if (!strcmp(name, ".data")) {
 726		if (obj->efile.data)
 727			*size = obj->efile.data->d_size;
 728	} else if (!strcmp(name, ".bss")) {
 729		if (obj->efile.bss)
 730			*size = obj->efile.bss->d_size;
 731	} else if (!strcmp(name, ".rodata")) {
 732		if (obj->efile.rodata)
 733			*size = obj->efile.rodata->d_size;
 734	} else {
 735		ret = bpf_object_search_section_size(obj, name, &d_size);
 736		if (!ret)
 737			*size = d_size;
 738	}
 739
 740	return *size ? 0 : ret;
 741}
 742
 743int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
 744				__u32 *off)
 745{
 746	Elf_Data *symbols = obj->efile.symbols;
 747	const char *sname;
 748	size_t si;
 749
 750	if (!name || !off)
 751		return -EINVAL;
 752
 753	for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
 754		GElf_Sym sym;
 755
 756		if (!gelf_getsym(symbols, si, &sym))
 757			continue;
 758		if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
 759		    GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
 760			continue;
 761
 762		sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
 763				   sym.st_name);
 764		if (!sname) {
 765			pr_warning("failed to get sym name string for var %s\n",
 766				   name);
 767			return -EIO;
 768		}
 769		if (strcmp(name, sname) == 0) {
 770			*off = sym.st_value;
 771			return 0;
 772		}
 773	}
 774
 775	return -ENOENT;
 776}
 777
 778static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
 779{
 780	struct bpf_map *new_maps;
 781	size_t new_cap;
 782	int i;
 783
 784	if (obj->nr_maps < obj->maps_cap)
 785		return &obj->maps[obj->nr_maps++];
 786
 787	new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
 788	new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
 789	if (!new_maps) {
 790		pr_warning("alloc maps for object failed\n");
 791		return ERR_PTR(-ENOMEM);
 792	}
 793
 794	obj->maps_cap = new_cap;
 795	obj->maps = new_maps;
 796
 797	/* zero out new maps */
 798	memset(obj->maps + obj->nr_maps, 0,
 799	       (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
 800	/*
 801	 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
 802	 * when failure (zclose won't close negative fd)).
 803	 */
 804	for (i = obj->nr_maps; i < obj->maps_cap; i++) {
 805		obj->maps[i].fd = -1;
 806		obj->maps[i].inner_map_fd = -1;
 807	}
 808
 809	return &obj->maps[obj->nr_maps++];
 810}
 811
 812static int
 813bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
 814			      int sec_idx, Elf_Data *data, void **data_buff)
 815{
 816	char map_name[BPF_OBJ_NAME_LEN];
 817	struct bpf_map_def *def;
 818	struct bpf_map *map;
 819
 820	map = bpf_object__add_map(obj);
 821	if (IS_ERR(map))
 822		return PTR_ERR(map);
 823
 824	map->libbpf_type = type;
 825	map->sec_idx = sec_idx;
 826	map->sec_offset = 0;
 827	snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
 828		 libbpf_type_to_btf_name[type]);
 829	map->name = strdup(map_name);
 830	if (!map->name) {
 831		pr_warning("failed to alloc map name\n");
 832		return -ENOMEM;
 833	}
 834	pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n",
 835		 map_name, map->sec_idx, map->sec_offset);
 836
 837	def = &map->def;
 838	def->type = BPF_MAP_TYPE_ARRAY;
 839	def->key_size = sizeof(int);
 840	def->value_size = data->d_size;
 841	def->max_entries = 1;
 842	def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
 843	if (data_buff) {
 844		*data_buff = malloc(data->d_size);
 845		if (!*data_buff) {
 846			zfree(&map->name);
 847			pr_warning("failed to alloc map content buffer\n");
 848			return -ENOMEM;
 849		}
 850		memcpy(*data_buff, data->d_buf, data->d_size);
 851	}
 852
 853	pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
 854	return 0;
 855}
 856
 857static int bpf_object__init_global_data_maps(struct bpf_object *obj)
 858{
 859	int err;
 860
 861	if (!obj->caps.global_data)
 862		return 0;
 863	/*
 864	 * Populate obj->maps with libbpf internal maps.
 865	 */
 866	if (obj->efile.data_shndx >= 0) {
 867		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
 868						    obj->efile.data_shndx,
 869						    obj->efile.data,
 870						    &obj->sections.data);
 871		if (err)
 872			return err;
 873	}
 874	if (obj->efile.rodata_shndx >= 0) {
 875		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
 876						    obj->efile.rodata_shndx,
 877						    obj->efile.rodata,
 878						    &obj->sections.rodata);
 879		if (err)
 880			return err;
 881	}
 882	if (obj->efile.bss_shndx >= 0) {
 883		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
 884						    obj->efile.bss_shndx,
 885						    obj->efile.bss, NULL);
 886		if (err)
 887			return err;
 888	}
 889	return 0;
 890}
 891
 892static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
 893{
 894	Elf_Data *symbols = obj->efile.symbols;
 895	int i, map_def_sz = 0, nr_maps = 0, nr_syms;
 896	Elf_Data *data = NULL;
 897	Elf_Scn *scn;
 898
 899	if (obj->efile.maps_shndx < 0)
 900		return 0;
 901
 902	if (!symbols)
 903		return -EINVAL;
 904
 905	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
 906	if (scn)
 907		data = elf_getdata(scn, NULL);
 908	if (!scn || !data) {
 909		pr_warning("failed to get Elf_Data from map section %d\n",
 910			   obj->efile.maps_shndx);
 911		return -EINVAL;
 912	}
 913
 914	/*
 915	 * Count number of maps. Each map has a name.
 916	 * Array of maps is not supported: only the first element is
 917	 * considered.
 918	 *
 919	 * TODO: Detect array of map and report error.
 920	 */
 921	nr_syms = symbols->d_size / sizeof(GElf_Sym);
 922	for (i = 0; i < nr_syms; i++) {
 923		GElf_Sym sym;
 924
 925		if (!gelf_getsym(symbols, i, &sym))
 926			continue;
 927		if (sym.st_shndx != obj->efile.maps_shndx)
 928			continue;
 929		nr_maps++;
 930	}
 931	/* Assume equally sized map definitions */
 932	pr_debug("maps in %s: %d maps in %zd bytes\n",
 933		 obj->path, nr_maps, data->d_size);
 934
 935	map_def_sz = data->d_size / nr_maps;
 936	if (!data->d_size || (data->d_size % nr_maps) != 0) {
 937		pr_warning("unable to determine map definition size "
 938			   "section %s, %d maps in %zd bytes\n",
 939			   obj->path, nr_maps, data->d_size);
 940		return -EINVAL;
 941	}
 942
 943	/* Fill obj->maps using data in "maps" section.  */
 944	for (i = 0; i < nr_syms; i++) {
 945		GElf_Sym sym;
 946		const char *map_name;
 947		struct bpf_map_def *def;
 948		struct bpf_map *map;
 949
 950		if (!gelf_getsym(symbols, i, &sym))
 951			continue;
 952		if (sym.st_shndx != obj->efile.maps_shndx)
 953			continue;
 954
 955		map = bpf_object__add_map(obj);
 956		if (IS_ERR(map))
 957			return PTR_ERR(map);
 958
 959		map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
 960				      sym.st_name);
 961		if (!map_name) {
 962			pr_warning("failed to get map #%d name sym string for obj %s\n",
 963				   i, obj->path);
 964			return -LIBBPF_ERRNO__FORMAT;
 965		}
 966
 967		map->libbpf_type = LIBBPF_MAP_UNSPEC;
 968		map->sec_idx = sym.st_shndx;
 969		map->sec_offset = sym.st_value;
 970		pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
 971			 map_name, map->sec_idx, map->sec_offset);
 972		if (sym.st_value + map_def_sz > data->d_size) {
 973			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
 974				   obj->path, map_name);
 975			return -EINVAL;
 976		}
 977
 978		map->name = strdup(map_name);
 979		if (!map->name) {
 980			pr_warning("failed to alloc map name\n");
 981			return -ENOMEM;
 982		}
 983		pr_debug("map %d is \"%s\"\n", i, map->name);
 984		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
 985		/*
 986		 * If the definition of the map in the object file fits in
 987		 * bpf_map_def, copy it.  Any extra fields in our version
 988		 * of bpf_map_def will default to zero as a result of the
 989		 * calloc above.
 990		 */
 991		if (map_def_sz <= sizeof(struct bpf_map_def)) {
 992			memcpy(&map->def, def, map_def_sz);
 993		} else {
 994			/*
 995			 * Here the map structure being read is bigger than what
 996			 * we expect, truncate if the excess bits are all zero.
 997			 * If they are not zero, reject this map as
 998			 * incompatible.
 999			 */
1000			char *b;
1001			for (b = ((char *)def) + sizeof(struct bpf_map_def);
1002			     b < ((char *)def) + map_def_sz; b++) {
1003				if (*b != 0) {
1004					pr_warning("maps section in %s: \"%s\" "
1005						   "has unrecognized, non-zero "
1006						   "options\n",
1007						   obj->path, map_name);
1008					if (strict)
1009						return -EINVAL;
1010				}
1011			}
1012			memcpy(&map->def, def, sizeof(struct bpf_map_def));
1013		}
1014	}
1015	return 0;
1016}
1017
1018static const struct btf_type *
1019skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1020{
1021	const struct btf_type *t = btf__type_by_id(btf, id);
1022
1023	if (res_id)
1024		*res_id = id;
1025
1026	while (btf_is_mod(t) || btf_is_typedef(t)) {
1027		if (res_id)
1028			*res_id = t->type;
1029		t = btf__type_by_id(btf, t->type);
1030	}
1031
1032	return t;
1033}
1034
1035/*
1036 * Fetch integer attribute of BTF map definition. Such attributes are
1037 * represented using a pointer to an array, in which dimensionality of array
1038 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1039 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1040 * type definition, while using only sizeof(void *) space in ELF data section.
1041 */
1042static bool get_map_field_int(const char *map_name, const struct btf *btf,
1043			      const struct btf_type *def,
1044			      const struct btf_member *m, __u32 *res) {
1045	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1046	const char *name = btf__name_by_offset(btf, m->name_off);
1047	const struct btf_array *arr_info;
1048	const struct btf_type *arr_t;
1049
1050	if (!btf_is_ptr(t)) {
1051		pr_warning("map '%s': attr '%s': expected PTR, got %u.\n",
1052			   map_name, name, btf_kind(t));
1053		return false;
1054	}
1055
1056	arr_t = btf__type_by_id(btf, t->type);
1057	if (!arr_t) {
1058		pr_warning("map '%s': attr '%s': type [%u] not found.\n",
1059			   map_name, name, t->type);
1060		return false;
1061	}
1062	if (!btf_is_array(arr_t)) {
1063		pr_warning("map '%s': attr '%s': expected ARRAY, got %u.\n",
1064			   map_name, name, btf_kind(arr_t));
1065		return false;
1066	}
1067	arr_info = btf_array(arr_t);
1068	*res = arr_info->nelems;
1069	return true;
1070}
1071
1072static int bpf_object__init_user_btf_map(struct bpf_object *obj,
1073					 const struct btf_type *sec,
1074					 int var_idx, int sec_idx,
1075					 const Elf_Data *data, bool strict)
1076{
1077	const struct btf_type *var, *def, *t;
1078	const struct btf_var_secinfo *vi;
1079	const struct btf_var *var_extra;
1080	const struct btf_member *m;
1081	const char *map_name;
1082	struct bpf_map *map;
1083	int vlen, i;
1084
1085	vi = btf_var_secinfos(sec) + var_idx;
1086	var = btf__type_by_id(obj->btf, vi->type);
1087	var_extra = btf_var(var);
1088	map_name = btf__name_by_offset(obj->btf, var->name_off);
1089	vlen = btf_vlen(var);
1090
1091	if (map_name == NULL || map_name[0] == '\0') {
1092		pr_warning("map #%d: empty name.\n", var_idx);
1093		return -EINVAL;
1094	}
1095	if ((__u64)vi->offset + vi->size > data->d_size) {
1096		pr_warning("map '%s' BTF data is corrupted.\n", map_name);
1097		return -EINVAL;
1098	}
1099	if (!btf_is_var(var)) {
1100		pr_warning("map '%s': unexpected var kind %u.\n",
1101			   map_name, btf_kind(var));
1102		return -EINVAL;
1103	}
1104	if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
1105	    var_extra->linkage != BTF_VAR_STATIC) {
1106		pr_warning("map '%s': unsupported var linkage %u.\n",
1107			   map_name, var_extra->linkage);
1108		return -EOPNOTSUPP;
1109	}
1110
1111	def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
1112	if (!btf_is_struct(def)) {
1113		pr_warning("map '%s': unexpected def kind %u.\n",
1114			   map_name, btf_kind(var));
1115		return -EINVAL;
1116	}
1117	if (def->size > vi->size) {
1118		pr_warning("map '%s': invalid def size.\n", map_name);
1119		return -EINVAL;
1120	}
1121
1122	map = bpf_object__add_map(obj);
1123	if (IS_ERR(map))
1124		return PTR_ERR(map);
1125	map->name = strdup(map_name);
1126	if (!map->name) {
1127		pr_warning("map '%s': failed to alloc map name.\n", map_name);
1128		return -ENOMEM;
1129	}
1130	map->libbpf_type = LIBBPF_MAP_UNSPEC;
1131	map->def.type = BPF_MAP_TYPE_UNSPEC;
1132	map->sec_idx = sec_idx;
1133	map->sec_offset = vi->offset;
1134	pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
1135		 map_name, map->sec_idx, map->sec_offset);
1136
1137	vlen = btf_vlen(def);
1138	m = btf_members(def);
1139	for (i = 0; i < vlen; i++, m++) {
1140		const char *name = btf__name_by_offset(obj->btf, m->name_off);
1141
1142		if (!name) {
1143			pr_warning("map '%s': invalid field #%d.\n",
1144				   map_name, i);
1145			return -EINVAL;
1146		}
1147		if (strcmp(name, "type") == 0) {
1148			if (!get_map_field_int(map_name, obj->btf, def, m,
1149					       &map->def.type))
1150				return -EINVAL;
1151			pr_debug("map '%s': found type = %u.\n",
1152				 map_name, map->def.type);
1153		} else if (strcmp(name, "max_entries") == 0) {
1154			if (!get_map_field_int(map_name, obj->btf, def, m,
1155					       &map->def.max_entries))
1156				return -EINVAL;
1157			pr_debug("map '%s': found max_entries = %u.\n",
1158				 map_name, map->def.max_entries);
1159		} else if (strcmp(name, "map_flags") == 0) {
1160			if (!get_map_field_int(map_name, obj->btf, def, m,
1161					       &map->def.map_flags))
1162				return -EINVAL;
1163			pr_debug("map '%s': found map_flags = %u.\n",
1164				 map_name, map->def.map_flags);
1165		} else if (strcmp(name, "key_size") == 0) {
1166			__u32 sz;
1167
1168			if (!get_map_field_int(map_name, obj->btf, def, m,
1169					       &sz))
1170				return -EINVAL;
1171			pr_debug("map '%s': found key_size = %u.\n",
1172				 map_name, sz);
1173			if (map->def.key_size && map->def.key_size != sz) {
1174				pr_warning("map '%s': conflicting key size %u != %u.\n",
1175					   map_name, map->def.key_size, sz);
1176				return -EINVAL;
1177			}
1178			map->def.key_size = sz;
1179		} else if (strcmp(name, "key") == 0) {
1180			__s64 sz;
1181
1182			t = btf__type_by_id(obj->btf, m->type);
1183			if (!t) {
1184				pr_warning("map '%s': key type [%d] not found.\n",
1185					   map_name, m->type);
1186				return -EINVAL;
1187			}
1188			if (!btf_is_ptr(t)) {
1189				pr_warning("map '%s': key spec is not PTR: %u.\n",
1190					   map_name, btf_kind(t));
1191				return -EINVAL;
1192			}
1193			sz = btf__resolve_size(obj->btf, t->type);
1194			if (sz < 0) {
1195				pr_warning("map '%s': can't determine key size for type [%u]: %lld.\n",
1196					   map_name, t->type, sz);
1197				return sz;
1198			}
1199			pr_debug("map '%s': found key [%u], sz = %lld.\n",
1200				 map_name, t->type, sz);
1201			if (map->def.key_size && map->def.key_size != sz) {
1202				pr_warning("map '%s': conflicting key size %u != %lld.\n",
1203					   map_name, map->def.key_size, sz);
1204				return -EINVAL;
1205			}
1206			map->def.key_size = sz;
1207			map->btf_key_type_id = t->type;
1208		} else if (strcmp(name, "value_size") == 0) {
1209			__u32 sz;
1210
1211			if (!get_map_field_int(map_name, obj->btf, def, m,
1212					       &sz))
1213				return -EINVAL;
1214			pr_debug("map '%s': found value_size = %u.\n",
1215				 map_name, sz);
1216			if (map->def.value_size && map->def.value_size != sz) {
1217				pr_warning("map '%s': conflicting value size %u != %u.\n",
1218					   map_name, map->def.value_size, sz);
1219				return -EINVAL;
1220			}
1221			map->def.value_size = sz;
1222		} else if (strcmp(name, "value") == 0) {
1223			__s64 sz;
1224
1225			t = btf__type_by_id(obj->btf, m->type);
1226			if (!t) {
1227				pr_warning("map '%s': value type [%d] not found.\n",
1228					   map_name, m->type);
1229				return -EINVAL;
1230			}
1231			if (!btf_is_ptr(t)) {
1232				pr_warning("map '%s': value spec is not PTR: %u.\n",
1233					   map_name, btf_kind(t));
1234				return -EINVAL;
1235			}
1236			sz = btf__resolve_size(obj->btf, t->type);
1237			if (sz < 0) {
1238				pr_warning("map '%s': can't determine value size for type [%u]: %lld.\n",
1239					   map_name, t->type, sz);
1240				return sz;
1241			}
1242			pr_debug("map '%s': found value [%u], sz = %lld.\n",
1243				 map_name, t->type, sz);
1244			if (map->def.value_size && map->def.value_size != sz) {
1245				pr_warning("map '%s': conflicting value size %u != %lld.\n",
1246					   map_name, map->def.value_size, sz);
1247				return -EINVAL;
1248			}
1249			map->def.value_size = sz;
1250			map->btf_value_type_id = t->type;
1251		} else {
1252			if (strict) {
1253				pr_warning("map '%s': unknown field '%s'.\n",
1254					   map_name, name);
1255				return -ENOTSUP;
1256			}
1257			pr_debug("map '%s': ignoring unknown field '%s'.\n",
1258				 map_name, name);
1259		}
1260	}
1261
1262	if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
1263		pr_warning("map '%s': map type isn't specified.\n", map_name);
1264		return -EINVAL;
1265	}
1266
1267	return 0;
1268}
1269
1270static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
1271{
1272	const struct btf_type *sec = NULL;
1273	int nr_types, i, vlen, err;
1274	const struct btf_type *t;
1275	const char *name;
1276	Elf_Data *data;
1277	Elf_Scn *scn;
1278
1279	if (obj->efile.btf_maps_shndx < 0)
1280		return 0;
1281
1282	scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
1283	if (scn)
1284		data = elf_getdata(scn, NULL);
1285	if (!scn || !data) {
1286		pr_warning("failed to get Elf_Data from map section %d (%s)\n",
1287			   obj->efile.maps_shndx, MAPS_ELF_SEC);
1288		return -EINVAL;
1289	}
1290
1291	nr_types = btf__get_nr_types(obj->btf);
1292	for (i = 1; i <= nr_types; i++) {
1293		t = btf__type_by_id(obj->btf, i);
1294		if (!btf_is_datasec(t))
1295			continue;
1296		name = btf__name_by_offset(obj->btf, t->name_off);
1297		if (strcmp(name, MAPS_ELF_SEC) == 0) {
1298			sec = t;
1299			break;
1300		}
1301	}
1302
1303	if (!sec) {
1304		pr_warning("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
1305		return -ENOENT;
1306	}
1307
1308	vlen = btf_vlen(sec);
1309	for (i = 0; i < vlen; i++) {
1310		err = bpf_object__init_user_btf_map(obj, sec, i,
1311						    obj->efile.btf_maps_shndx,
1312						    data, strict);
1313		if (err)
1314			return err;
1315	}
1316
1317	return 0;
1318}
1319
1320static int bpf_object__init_maps(struct bpf_object *obj, int flags)
1321{
1322	bool strict = !(flags & MAPS_RELAX_COMPAT);
1323	int err;
1324
1325	err = bpf_object__init_user_maps(obj, strict);
1326	if (err)
1327		return err;
1328
1329	err = bpf_object__init_user_btf_maps(obj, strict);
1330	if (err)
1331		return err;
1332
1333	err = bpf_object__init_global_data_maps(obj);
1334	if (err)
1335		return err;
1336
1337	if (obj->nr_maps) {
1338		qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
1339		      compare_bpf_map);
1340	}
1341	return 0;
1342}
1343
1344static bool section_have_execinstr(struct bpf_object *obj, int idx)
1345{
1346	Elf_Scn *scn;
1347	GElf_Shdr sh;
1348
1349	scn = elf_getscn(obj->efile.elf, idx);
1350	if (!scn)
1351		return false;
1352
1353	if (gelf_getshdr(scn, &sh) != &sh)
1354		return false;
1355
1356	if (sh.sh_flags & SHF_EXECINSTR)
1357		return true;
1358
1359	return false;
1360}
1361
1362static void bpf_object__sanitize_btf(struct bpf_object *obj)
1363{
1364	bool has_datasec = obj->caps.btf_datasec;
1365	bool has_func = obj->caps.btf_func;
1366	struct btf *btf = obj->btf;
1367	struct btf_type *t;
1368	int i, j, vlen;
1369
1370	if (!obj->btf || (has_func && has_datasec))
1371		return;
1372
1373	for (i = 1; i <= btf__get_nr_types(btf); i++) {
1374		t = (struct btf_type *)btf__type_by_id(btf, i);
1375
1376		if (!has_datasec && btf_is_var(t)) {
1377			/* replace VAR with INT */
1378			t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
1379			/*
1380			 * using size = 1 is the safest choice, 4 will be too
1381			 * big and cause kernel BTF validation failure if
1382			 * original variable took less than 4 bytes
1383			 */
1384			t->size = 1;
1385			*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
1386		} else if (!has_datasec && btf_is_datasec(t)) {
1387			/* replace DATASEC with STRUCT */
1388			const struct btf_var_secinfo *v = btf_var_secinfos(t);
1389			struct btf_member *m = btf_members(t);
1390			struct btf_type *vt;
1391			char *name;
1392
1393			name = (char *)btf__name_by_offset(btf, t->name_off);
1394			while (*name) {
1395				if (*name == '.')
1396					*name = '_';
1397				name++;
1398			}
1399
1400			vlen = btf_vlen(t);
1401			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
1402			for (j = 0; j < vlen; j++, v++, m++) {
1403				/* order of field assignments is important */
1404				m->offset = v->offset * 8;
1405				m->type = v->type;
1406				/* preserve variable name as member name */
1407				vt = (void *)btf__type_by_id(btf, v->type);
1408				m->name_off = vt->name_off;
1409			}
1410		} else if (!has_func && btf_is_func_proto(t)) {
1411			/* replace FUNC_PROTO with ENUM */
1412			vlen = btf_vlen(t);
1413			t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
1414			t->size = sizeof(__u32); /* kernel enforced */
1415		} else if (!has_func && btf_is_func(t)) {
1416			/* replace FUNC with TYPEDEF */
1417			t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
1418		}
1419	}
1420}
1421
1422static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
1423{
1424	if (!obj->btf_ext)
1425		return;
1426
1427	if (!obj->caps.btf_func) {
1428		btf_ext__free(obj->btf_ext);
1429		obj->btf_ext = NULL;
1430	}
1431}
1432
1433static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
1434{
1435	return obj->efile.btf_maps_shndx >= 0;
1436}
1437
1438static int bpf_object__init_btf(struct bpf_object *obj,
1439				Elf_Data *btf_data,
1440				Elf_Data *btf_ext_data)
1441{
1442	bool btf_required = bpf_object__is_btf_mandatory(obj);
1443	int err = 0;
1444
1445	if (btf_data) {
1446		obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
1447		if (IS_ERR(obj->btf)) {
1448			pr_warning("Error loading ELF section %s: %d.\n",
1449				   BTF_ELF_SEC, err);
1450			goto out;
1451		}
1452		err = btf__finalize_data(obj, obj->btf);
1453		if (err) {
1454			pr_warning("Error finalizing %s: %d.\n",
1455				   BTF_ELF_SEC, err);
1456			goto out;
1457		}
1458	}
1459	if (btf_ext_data) {
1460		if (!obj->btf) {
1461			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
1462				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
1463			goto out;
1464		}
1465		obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
1466					    btf_ext_data->d_size);
1467		if (IS_ERR(obj->btf_ext)) {
1468			pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
1469				   BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
1470			obj->btf_ext = NULL;
1471			goto out;
1472		}
1473	}
1474out:
1475	if (err || IS_ERR(obj->btf)) {
1476		if (btf_required)
1477			err = err ? : PTR_ERR(obj->btf);
1478		else
1479			err = 0;
1480		if (!IS_ERR_OR_NULL(obj->btf))
1481			btf__free(obj->btf);
1482		obj->btf = NULL;
1483	}
1484	if (btf_required && !obj->btf) {
1485		pr_warning("BTF is required, but is missing or corrupted.\n");
1486		return err == 0 ? -ENOENT : err;
1487	}
1488	return 0;
1489}
1490
1491static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
1492{
1493	int err = 0;
1494
1495	if (!obj->btf)
1496		return 0;
1497
1498	bpf_object__sanitize_btf(obj);
1499	bpf_object__sanitize_btf_ext(obj);
1500
1501	err = btf__load(obj->btf);
1502	if (err) {
1503		pr_warning("Error loading %s into kernel: %d.\n",
1504			   BTF_ELF_SEC, err);
1505		btf__free(obj->btf);
1506		obj->btf = NULL;
1507		/* btf_ext can't exist without btf, so free it as well */
1508		if (obj->btf_ext) {
1509			btf_ext__free(obj->btf_ext);
1510			obj->btf_ext = NULL;
1511		}
1512
1513		if (bpf_object__is_btf_mandatory(obj))
1514			return err;
1515	}
1516	return 0;
1517}
1518
1519static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
1520{
1521	Elf *elf = obj->efile.elf;
1522	GElf_Ehdr *ep = &obj->efile.ehdr;
1523	Elf_Data *btf_ext_data = NULL;
1524	Elf_Data *btf_data = NULL;
1525	Elf_Scn *scn = NULL;
1526	int idx = 0, err = 0;
1527
1528	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
1529	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
1530		pr_warning("failed to get e_shstrndx from %s\n", obj->path);
1531		return -LIBBPF_ERRNO__FORMAT;
1532	}
1533
1534	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1535		char *name;
1536		GElf_Shdr sh;
1537		Elf_Data *data;
1538
1539		idx++;
1540		if (gelf_getshdr(scn, &sh) != &sh) {
1541			pr_warning("failed to get section(%d) header from %s\n",
1542				   idx, obj->path);
1543			return -LIBBPF_ERRNO__FORMAT;
1544		}
1545
1546		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
1547		if (!name) {
1548			pr_warning("failed to get section(%d) name from %s\n",
1549				   idx, obj->path);
1550			return -LIBBPF_ERRNO__FORMAT;
1551		}
1552
1553		data = elf_getdata(scn, 0);
1554		if (!data) {
1555			pr_warning("failed to get section(%d) data from %s(%s)\n",
1556				   idx, name, obj->path);
1557			return -LIBBPF_ERRNO__FORMAT;
1558		}
1559		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
1560			 idx, name, (unsigned long)data->d_size,
1561			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
1562			 (int)sh.sh_type);
1563
1564		if (strcmp(name, "license") == 0) {
1565			err = bpf_object__init_license(obj,
1566						       data->d_buf,
1567						       data->d_size);
1568			if (err)
1569				return err;
1570		} else if (strcmp(name, "version") == 0) {
1571			err = bpf_object__init_kversion(obj,
1572							data->d_buf,
1573							data->d_size);
1574			if (err)
1575				return err;
1576		} else if (strcmp(name, "maps") == 0) {
1577			obj->efile.maps_shndx = idx;
1578		} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
1579			obj->efile.btf_maps_shndx = idx;
1580		} else if (strcmp(name, BTF_ELF_SEC) == 0) {
1581			btf_data = data;
1582		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1583			btf_ext_data = data;
1584		} else if (sh.sh_type == SHT_SYMTAB) {
1585			if (obj->efile.symbols) {
1586				pr_warning("bpf: multiple SYMTAB in %s\n",
1587					   obj->path);
1588				return -LIBBPF_ERRNO__FORMAT;
1589			}
1590			obj->efile.symbols = data;
1591			obj->efile.strtabidx = sh.sh_link;
1592		} else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
1593			if (sh.sh_flags & SHF_EXECINSTR) {
1594				if (strcmp(name, ".text") == 0)
1595					obj->efile.text_shndx = idx;
1596				err = bpf_object__add_program(obj, data->d_buf,
1597							      data->d_size, name, idx);
1598				if (err) {
1599					char errmsg[STRERR_BUFSIZE];
1600					char *cp = libbpf_strerror_r(-err, errmsg,
1601								     sizeof(errmsg));
1602
1603					pr_warning("failed to alloc program %s (%s): %s",
1604						   name, obj->path, cp);
1605					return err;
1606				}
1607			} else if (strcmp(name, ".data") == 0) {
1608				obj->efile.data = data;
1609				obj->efile.data_shndx = idx;
1610			} else if (strcmp(name, ".rodata") == 0) {
1611				obj->efile.rodata = data;
1612				obj->efile.rodata_shndx = idx;
1613			} else {
1614				pr_debug("skip section(%d) %s\n", idx, name);
1615			}
1616		} else if (sh.sh_type == SHT_REL) {
1617			int nr_reloc = obj->efile.nr_reloc;
1618			void *reloc = obj->efile.reloc;
1619			int sec = sh.sh_info; /* points to other section */
1620
1621			/* Only do relo for section with exec instructions */
1622			if (!section_have_execinstr(obj, sec)) {
1623				pr_debug("skip relo %s(%d) for section(%d)\n",
1624					 name, idx, sec);
1625				continue;
1626			}
1627
1628			reloc = reallocarray(reloc, nr_reloc + 1,
1629					     sizeof(*obj->efile.reloc));
1630			if (!reloc) {
1631				pr_warning("realloc failed\n");
1632				return -ENOMEM;
1633			}
1634
1635			obj->efile.reloc = reloc;
1636			obj->efile.nr_reloc++;
1637
1638			obj->efile.reloc[nr_reloc].shdr = sh;
1639			obj->efile.reloc[nr_reloc].data = data;
1640		} else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
1641			obj->efile.bss = data;
1642			obj->efile.bss_shndx = idx;
1643		} else {
1644			pr_debug("skip section(%d) %s\n", idx, name);
1645		}
1646	}
1647
1648	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
1649		pr_warning("Corrupted ELF file: index of strtab invalid\n");
1650		return -LIBBPF_ERRNO__FORMAT;
1651	}
1652	err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
1653	if (!err)
1654		err = bpf_object__init_maps(obj, flags);
1655	if (!err)
1656		err = bpf_object__sanitize_and_load_btf(obj);
1657	if (!err)
1658		err = bpf_object__init_prog_names(obj);
1659	return err;
1660}
1661
1662static struct bpf_program *
1663bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
1664{
1665	struct bpf_program *prog;
1666	size_t i;
1667
1668	for (i = 0; i < obj->nr_programs; i++) {
1669		prog = &obj->programs[i];
1670		if (prog->idx == idx)
1671			return prog;
1672	}
1673	return NULL;
1674}
1675
1676struct bpf_program *
1677bpf_object__find_program_by_title(const struct bpf_object *obj,
1678				  const char *title)
1679{
1680	struct bpf_program *pos;
1681
1682	bpf_object__for_each_program(pos, obj) {
1683		if (pos->section_name && !strcmp(pos->section_name, title))
1684			return pos;
1685	}
1686	return NULL;
1687}
1688
1689static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
1690				      int shndx)
1691{
1692	return shndx == obj->efile.data_shndx ||
1693	       shndx == obj->efile.bss_shndx ||
1694	       shndx == obj->efile.rodata_shndx;
1695}
1696
1697static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
1698				      int shndx)
1699{
1700	return shndx == obj->efile.maps_shndx ||
1701	       shndx == obj->efile.btf_maps_shndx;
1702}
1703
1704static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
1705					      int shndx)
1706{
1707	return shndx == obj->efile.text_shndx ||
1708	       bpf_object__shndx_is_maps(obj, shndx) ||
1709	       bpf_object__shndx_is_data(obj, shndx);
1710}
1711
1712static enum libbpf_map_type
1713bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
1714{
1715	if (shndx == obj->efile.data_shndx)
1716		return LIBBPF_MAP_DATA;
1717	else if (shndx == obj->efile.bss_shndx)
1718		return LIBBPF_MAP_BSS;
1719	else if (shndx == obj->efile.rodata_shndx)
1720		return LIBBPF_MAP_RODATA;
1721	else
1722		return LIBBPF_MAP_UNSPEC;
1723}
1724
1725static int
1726bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1727			   Elf_Data *data, struct bpf_object *obj)
1728{
1729	Elf_Data *symbols = obj->efile.symbols;
1730	struct bpf_map *maps = obj->maps;
1731	size_t nr_maps = obj->nr_maps;
1732	int i, nrels;
1733
1734	pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
1735	nrels = shdr->sh_size / shdr->sh_entsize;
1736
1737	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
1738	if (!prog->reloc_desc) {
1739		pr_warning("failed to alloc memory in relocation\n");
1740		return -ENOMEM;
1741	}
1742	prog->nr_reloc = nrels;
1743
1744	for (i = 0; i < nrels; i++) {
1745		struct bpf_insn *insns = prog->insns;
1746		enum libbpf_map_type type;
1747		unsigned int insn_idx;
1748		unsigned int shdr_idx;
1749		const char *name;
1750		size_t map_idx;
1751		GElf_Sym sym;
1752		GElf_Rel rel;
1753
1754		if (!gelf_getrel(data, i, &rel)) {
1755			pr_warning("relocation: failed to get %d reloc\n", i);
1756			return -LIBBPF_ERRNO__FORMAT;
1757		}
1758
1759		if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
1760			pr_warning("relocation: symbol %"PRIx64" not found\n",
1761				   GELF_R_SYM(rel.r_info));
1762			return -LIBBPF_ERRNO__FORMAT;
1763		}
1764
1765		name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
1766				  sym.st_name) ? : "<?>";
1767
1768		pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
1769			 (long long) (rel.r_info >> 32),
1770			 (long long) sym.st_value, sym.st_name, name);
1771
1772		shdr_idx = sym.st_shndx;
1773		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1774		pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n",
1775			 insn_idx, shdr_idx);
1776
1777		if (shdr_idx >= SHN_LORESERVE) {
1778			pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
1779				   name, shdr_idx, insn_idx,
1780				   insns[insn_idx].code);
1781			return -LIBBPF_ERRNO__RELOC;
1782		}
1783		if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
1784			pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
1785				   prog->section_name, shdr_idx);
1786			return -LIBBPF_ERRNO__RELOC;
1787		}
1788
1789		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1790			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1791				pr_warning("incorrect bpf_call opcode\n");
1792				return -LIBBPF_ERRNO__RELOC;
1793			}
1794			prog->reloc_desc[i].type = RELO_CALL;
1795			prog->reloc_desc[i].insn_idx = insn_idx;
1796			prog->reloc_desc[i].text_off = sym.st_value;
1797			obj->has_pseudo_calls = true;
1798			continue;
1799		}
1800
1801		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1802			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1803				   insn_idx, insns[insn_idx].code);
1804			return -LIBBPF_ERRNO__RELOC;
1805		}
1806
1807		if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
1808		    bpf_object__shndx_is_data(obj, shdr_idx)) {
1809			type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
1810			if (type != LIBBPF_MAP_UNSPEC) {
1811				if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
1812					pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
1813						   name, insn_idx, insns[insn_idx].code);
1814					return -LIBBPF_ERRNO__RELOC;
1815				}
1816				if (!obj->caps.global_data) {
1817					pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
1818						   name, insn_idx);
1819					return -LIBBPF_ERRNO__RELOC;
1820				}
1821			}
1822
1823			for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1824				if (maps[map_idx].libbpf_type != type)
1825					continue;
1826				if (type != LIBBPF_MAP_UNSPEC ||
1827				    (maps[map_idx].sec_idx == sym.st_shndx &&
1828				     maps[map_idx].sec_offset == sym.st_value)) {
1829					pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n",
1830						 map_idx, maps[map_idx].name,
1831						 maps[map_idx].sec_idx,
1832						 maps[map_idx].sec_offset,
1833						 insn_idx);
1834					break;
1835				}
1836			}
1837
1838			if (map_idx >= nr_maps) {
1839				pr_warning("bpf relocation: map_idx %d larger than %d\n",
1840					   (int)map_idx, (int)nr_maps - 1);
1841				return -LIBBPF_ERRNO__RELOC;
1842			}
1843
1844			prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
1845						   RELO_DATA : RELO_LD64;
1846			prog->reloc_desc[i].insn_idx = insn_idx;
1847			prog->reloc_desc[i].map_idx = map_idx;
1848		}
1849	}
1850	return 0;
1851}
1852
1853static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
1854{
1855	struct bpf_map_def *def = &map->def;
1856	__u32 key_type_id = 0, value_type_id = 0;
1857	int ret;
1858
1859	/* if it's BTF-defined map, we don't need to search for type IDs */
1860	if (map->sec_idx == obj->efile.btf_maps_shndx)
1861		return 0;
1862
1863	if (!bpf_map__is_internal(map)) {
1864		ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
1865					   def->value_size, &key_type_id,
1866					   &value_type_id);
1867	} else {
1868		/*
1869		 * LLVM annotates global data differently in BTF, that is,
1870		 * only as '.data', '.bss' or '.rodata'.
1871		 */
1872		ret = btf__find_by_name(obj->btf,
1873				libbpf_type_to_btf_name[map->libbpf_type]);
1874	}
1875	if (ret < 0)
1876		return ret;
1877
1878	map->btf_key_type_id = key_type_id;
1879	map->btf_value_type_id = bpf_map__is_internal(map) ?
1880				 ret : value_type_id;
1881	return 0;
1882}
1883
1884int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1885{
1886	struct bpf_map_info info = {};
1887	__u32 len = sizeof(info);
1888	int new_fd, err;
1889	char *new_name;
1890
1891	err = bpf_obj_get_info_by_fd(fd, &info, &len);
1892	if (err)
1893		return err;
1894
1895	new_name = strdup(info.name);
1896	if (!new_name)
1897		return -errno;
1898
1899	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1900	if (new_fd < 0)
1901		goto err_free_new_name;
1902
1903	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1904	if (new_fd < 0)
1905		goto err_close_new_fd;
1906
1907	err = zclose(map->fd);
1908	if (err)
1909		goto err_close_new_fd;
1910	free(map->name);
1911
1912	map->fd = new_fd;
1913	map->name = new_name;
1914	map->def.type = info.type;
1915	map->def.key_size = info.key_size;
1916	map->def.value_size = info.value_size;
1917	map->def.max_entries = info.max_entries;
1918	map->def.map_flags = info.map_flags;
1919	map->btf_key_type_id = info.btf_key_type_id;
1920	map->btf_value_type_id = info.btf_value_type_id;
1921
1922	return 0;
1923
1924err_close_new_fd:
1925	close(new_fd);
1926err_free_new_name:
1927	free(new_name);
1928	return -errno;
1929}
1930
1931int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
1932{
1933	if (!map || !max_entries)
1934		return -EINVAL;
1935
1936	/* If map already created, its attributes can't be changed. */
1937	if (map->fd >= 0)
1938		return -EBUSY;
1939
1940	map->def.max_entries = max_entries;
1941
1942	return 0;
1943}
1944
1945static int
1946bpf_object__probe_name(struct bpf_object *obj)
1947{
1948	struct bpf_load_program_attr attr;
1949	char *cp, errmsg[STRERR_BUFSIZE];
1950	struct bpf_insn insns[] = {
1951		BPF_MOV64_IMM(BPF_REG_0, 0),
1952		BPF_EXIT_INSN(),
1953	};
1954	int ret;
1955
1956	/* make sure basic loading works */
1957
1958	memset(&attr, 0, sizeof(attr));
1959	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1960	attr.insns = insns;
1961	attr.insns_cnt = ARRAY_SIZE(insns);
1962	attr.license = "GPL";
1963
1964	ret = bpf_load_program_xattr(&attr, NULL, 0);
1965	if (ret < 0) {
1966		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1967		pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1968			   __func__, cp, errno);
1969		return -errno;
1970	}
1971	close(ret);
1972
1973	/* now try the same program, but with the name */
1974
1975	attr.name = "test";
1976	ret = bpf_load_program_xattr(&attr, NULL, 0);
1977	if (ret >= 0) {
1978		obj->caps.name = 1;
1979		close(ret);
1980	}
1981
1982	return 0;
1983}
1984
1985static int
1986bpf_object__probe_global_data(struct bpf_object *obj)
1987{
1988	struct bpf_load_program_attr prg_attr;
1989	struct bpf_create_map_attr map_attr;
1990	char *cp, errmsg[STRERR_BUFSIZE];
1991	struct bpf_insn insns[] = {
1992		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
1993		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1994		BPF_MOV64_IMM(BPF_REG_0, 0),
1995		BPF_EXIT_INSN(),
1996	};
1997	int ret, map;
1998
1999	memset(&map_attr, 0, sizeof(map_attr));
2000	map_attr.map_type = BPF_MAP_TYPE_ARRAY;
2001	map_attr.key_size = sizeof(int);
2002	map_attr.value_size = 32;
2003	map_attr.max_entries = 1;
2004
2005	map = bpf_create_map_xattr(&map_attr);
2006	if (map < 0) {
2007		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2008		pr_warning("Error in %s():%s(%d). Couldn't create simple array map.\n",
2009			   __func__, cp, errno);
2010		return -errno;
2011	}
2012
2013	insns[0].imm = map;
2014
2015	memset(&prg_attr, 0, sizeof(prg_attr));
2016	prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
2017	prg_attr.insns = insns;
2018	prg_attr.insns_cnt = ARRAY_SIZE(insns);
2019	prg_attr.license = "GPL";
2020
2021	ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
2022	if (ret >= 0) {
2023		obj->caps.global_data = 1;
2024		close(ret);
2025	}
2026
2027	close(map);
2028	return 0;
2029}
2030
2031static int bpf_object__probe_btf_func(struct bpf_object *obj)
2032{
2033	const char strs[] = "\0int\0x\0a";
2034	/* void x(int a) {} */
2035	__u32 types[] = {
2036		/* int */
2037		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
2038		/* FUNC_PROTO */                                /* [2] */
2039		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
2040		BTF_PARAM_ENC(7, 1),
2041		/* FUNC x */                                    /* [3] */
2042		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
2043	};
2044	int btf_fd;
2045
2046	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2047				      strs, sizeof(strs));
2048	if (btf_fd >= 0) {
2049		obj->caps.btf_func = 1;
2050		close(btf_fd);
2051		return 1;
2052	}
2053
2054	return 0;
2055}
2056
2057static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
2058{
2059	const char strs[] = "\0x\0.data";
2060	/* static int a; */
2061	__u32 types[] = {
2062		/* int */
2063		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
2064		/* VAR x */                                     /* [2] */
2065		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
2066		BTF_VAR_STATIC,
2067		/* DATASEC val */                               /* [3] */
2068		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
2069		BTF_VAR_SECINFO_ENC(2, 0, 4),
2070	};
2071	int btf_fd;
2072
2073	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2074				      strs, sizeof(strs));
2075	if (btf_fd >= 0) {
2076		obj->caps.btf_datasec = 1;
2077		close(btf_fd);
2078		return 1;
2079	}
2080
2081	return 0;
2082}
2083
2084static int
2085bpf_object__probe_caps(struct bpf_object *obj)
2086{
2087	int (*probe_fn[])(struct bpf_object *obj) = {
2088		bpf_object__probe_name,
2089		bpf_object__probe_global_data,
2090		bpf_object__probe_btf_func,
2091		bpf_object__probe_btf_datasec,
2092	};
2093	int i, ret;
2094
2095	for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
2096		ret = probe_fn[i](obj);
2097		if (ret < 0)
2098			pr_debug("Probe #%d failed with %d.\n", i, ret);
2099	}
2100
2101	return 0;
2102}
2103
2104static int
2105bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
2106{
2107	char *cp, errmsg[STRERR_BUFSIZE];
2108	int err, zero = 0;
2109	__u8 *data;
2110
2111	/* Nothing to do here since kernel already zero-initializes .bss map. */
2112	if (map->libbpf_type == LIBBPF_MAP_BSS)
2113		return 0;
2114
2115	data = map->libbpf_type == LIBBPF_MAP_DATA ?
2116	       obj->sections.data : obj->sections.rodata;
2117
2118	err = bpf_map_update_elem(map->fd, &zero, data, 0);
2119	/* Freeze .rodata map as read-only from syscall side. */
2120	if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
2121		err = bpf_map_freeze(map->fd);
2122		if (err) {
2123			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2124			pr_warning("Error freezing map(%s) as read-only: %s\n",
2125				   map->name, cp);
2126			err = 0;
2127		}
2128	}
2129	return err;
2130}
2131
2132static int
2133bpf_object__create_maps(struct bpf_object *obj)
2134{
2135	struct bpf_create_map_attr create_attr = {};
2136	int nr_cpus = 0;
2137	unsigned int i;
2138	int err;
2139
2140	for (i = 0; i < obj->nr_maps; i++) {
2141		struct bpf_map *map = &obj->maps[i];
2142		struct bpf_map_def *def = &map->def;
2143		char *cp, errmsg[STRERR_BUFSIZE];
2144		int *pfd = &map->fd;
2145
2146		if (map->fd >= 0) {
2147			pr_debug("skip map create (preset) %s: fd=%d\n",
2148				 map->name, map->fd);
2149			continue;
2150		}
2151
2152		if (obj->caps.name)
2153			create_attr.name = map->name;
2154		create_attr.map_ifindex = map->map_ifindex;
2155		create_attr.map_type = def->type;
2156		create_attr.map_flags = def->map_flags;
2157		create_attr.key_size = def->key_size;
2158		create_attr.value_size = def->value_size;
2159		if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
2160		    !def->max_entries) {
2161			if (!nr_cpus)
2162				nr_cpus = libbpf_num_possible_cpus();
2163			if (nr_cpus < 0) {
2164				pr_warning("failed to determine number of system CPUs: %d\n",
2165					   nr_cpus);
2166				err = nr_cpus;
2167				goto err_out;
2168			}
2169			pr_debug("map '%s': setting size to %d\n",
2170				 map->name, nr_cpus);
2171			create_attr.max_entries = nr_cpus;
2172		} else {
2173			create_attr.max_entries = def->max_entries;
2174		}
2175		create_attr.btf_fd = 0;
2176		create_attr.btf_key_type_id = 0;
2177		create_attr.btf_value_type_id = 0;
2178		if (bpf_map_type__is_map_in_map(def->type) &&
2179		    map->inner_map_fd >= 0)
2180			create_attr.inner_map_fd = map->inner_map_fd;
2181
2182		if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
2183			create_attr.btf_fd = btf__fd(obj->btf);
2184			create_attr.btf_key_type_id = map->btf_key_type_id;
2185			create_attr.btf_value_type_id = map->btf_value_type_id;
2186		}
2187
2188		*pfd = bpf_create_map_xattr(&create_attr);
2189		if (*pfd < 0 && (create_attr.btf_key_type_id ||
2190				 create_attr.btf_value_type_id)) {
2191			err = -errno;
2192			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2193			pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
2194				   map->name, cp, err);
2195			create_attr.btf_fd = 0;
2196			create_attr.btf_key_type_id = 0;
2197			create_attr.btf_value_type_id = 0;
2198			map->btf_key_type_id = 0;
2199			map->btf_value_type_id = 0;
2200			*pfd = bpf_create_map_xattr(&create_attr);
2201		}
2202
2203		if (*pfd < 0) {
2204			size_t j;
2205
2206			err = -errno;
2207err_out:
2208			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2209			pr_warning("failed to create map (name: '%s'): %s(%d)\n",
2210				   map->name, cp, err);
2211			for (j = 0; j < i; j++)
2212				zclose(obj->maps[j].fd);
2213			return err;
2214		}
2215
2216		if (bpf_map__is_internal(map)) {
2217			err = bpf_object__populate_internal_map(obj, map);
2218			if (err < 0) {
2219				zclose(*pfd);
2220				goto err_out;
2221			}
2222		}
2223
2224		pr_debug("created map %s: fd=%d\n", map->name, *pfd);
2225	}
2226
2227	return 0;
2228}
2229
2230static int
2231check_btf_ext_reloc_err(struct bpf_program *prog, int err,
2232			void *btf_prog_info, const char *info_name)
2233{
2234	if (err != -ENOENT) {
2235		pr_warning("Error in loading %s for sec %s.\n",
2236			   info_name, prog->section_name);
2237		return err;
2238	}
2239
2240	/* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
2241
2242	if (btf_prog_info) {
2243		/*
2244		 * Some info has already been found but has problem
2245		 * in the last btf_ext reloc. Must have to error out.
2246		 */
2247		pr_warning("Error in relocating %s for sec %s.\n",
2248			   info_name, prog->section_name);
2249		return err;
2250	}
2251
2252	/* Have problem loading the very first info. Ignore the rest. */
2253	pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
2254		   info_name, prog->section_name, info_name);
2255	return 0;
2256}
2257
2258static int
2259bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
2260			  const char *section_name,  __u32 insn_offset)
2261{
2262	int err;
2263
2264	if (!insn_offset || prog->func_info) {
2265		/*
2266		 * !insn_offset => main program
2267		 *
2268		 * For sub prog, the main program's func_info has to
2269		 * be loaded first (i.e. prog->func_info != NULL)
2270		 */
2271		err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
2272					       section_name, insn_offset,
2273					       &prog->func_info,
2274					       &prog->func_info_cnt);
2275		if (err)
2276			return check_btf_ext_reloc_err(prog, err,
2277						       prog->func_info,
2278						       "bpf_func_info");
2279
2280		prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
2281	}
2282
2283	if (!insn_offset || prog->line_info) {
2284		err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
2285					       section_name, insn_offset,
2286					       &prog->line_info,
2287					       &prog->line_info_cnt);
2288		if (err)
2289			return check_btf_ext_reloc_err(prog, err,
2290						       prog->line_info,
2291						       "bpf_line_info");
2292
2293		prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
2294	}
2295
2296	return 0;
2297}
2298
2299#define BPF_CORE_SPEC_MAX_LEN 64
2300
2301/* represents BPF CO-RE field or array element accessor */
2302struct bpf_core_accessor {
2303	__u32 type_id;		/* struct/union type or array element type */
2304	__u32 idx;		/* field index or array index */
2305	const char *name;	/* field name or NULL for array accessor */
2306};
2307
2308struct bpf_core_spec {
2309	const struct btf *btf;
2310	/* high-level spec: named fields and array indices only */
2311	struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
2312	/* high-level spec length */
2313	int len;
2314	/* raw, low-level spec: 1-to-1 with accessor spec string */
2315	int raw_spec[BPF_CORE_SPEC_MAX_LEN];
2316	/* raw spec length */
2317	int raw_len;
2318	/* field byte offset represented by spec */
2319	__u32 offset;
2320};
2321
2322static bool str_is_empty(const char *s)
2323{
2324	return !s || !s[0];
2325}
2326
2327/*
2328 * Turn bpf_offset_reloc into a low- and high-level spec representation,
2329 * validating correctness along the way, as well as calculating resulting
2330 * field offset (in bytes), specified by accessor string. Low-level spec
2331 * captures every single level of nestedness, including traversing anonymous
2332 * struct/union members. High-level one only captures semantically meaningful
2333 * "turning points": named fields and array indicies.
2334 * E.g., for this case:
2335 *
2336 *   struct sample {
2337 *       int __unimportant;
2338 *       struct {
2339 *           int __1;
2340 *           int __2;
2341 *           int a[7];
2342 *       };
2343 *   };
2344 *
2345 *   struct sample *s = ...;
2346 *
2347 *   int x = &s->a[3]; // access string = '0:1:2:3'
2348 *
2349 * Low-level spec has 1:1 mapping with each element of access string (it's
2350 * just a parsed access string representation): [0, 1, 2, 3].
2351 *
2352 * High-level spec will capture only 3 points:
2353 *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
2354 *   - field 'a' access (corresponds to '2' in low-level spec);
2355 *   - array element #3 access (corresponds to '3' in low-level spec).
2356 *
2357 */
2358static int bpf_core_spec_parse(const struct btf *btf,
2359			       __u32 type_id,
2360			       const char *spec_str,
2361			       struct bpf_core_spec *spec)
2362{
2363	int access_idx, parsed_len, i;
2364	const struct btf_type *t;
2365	const char *name;
2366	__u32 id;
2367	__s64 sz;
2368
2369	if (str_is_empty(spec_str) || *spec_str == ':')
2370		return -EINVAL;
2371
2372	memset(spec, 0, sizeof(*spec));
2373	spec->btf = btf;
2374
2375	/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
2376	while (*spec_str) {
2377		if (*spec_str == ':')
2378			++spec_str;
2379		if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
2380			return -EINVAL;
2381		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2382			return -E2BIG;
2383		spec_str += parsed_len;
2384		spec->raw_spec[spec->raw_len++] = access_idx;
2385	}
2386
2387	if (spec->raw_len == 0)
2388		return -EINVAL;
2389
2390	/* first spec value is always reloc type array index */
2391	t = skip_mods_and_typedefs(btf, type_id, &id);
2392	if (!t)
2393		return -EINVAL;
2394
2395	access_idx = spec->raw_spec[0];
2396	spec->spec[0].type_id = id;
2397	spec->spec[0].idx = access_idx;
2398	spec->len++;
2399
2400	sz = btf__resolve_size(btf, id);
2401	if (sz < 0)
2402		return sz;
2403	spec->offset = access_idx * sz;
2404
2405	for (i = 1; i < spec->raw_len; i++) {
2406		t = skip_mods_and_typedefs(btf, id, &id);
2407		if (!t)
2408			return -EINVAL;
2409
2410		access_idx = spec->raw_spec[i];
2411
2412		if (btf_is_composite(t)) {
2413			const struct btf_member *m;
2414			__u32 offset;
2415
2416			if (access_idx >= btf_vlen(t))
2417				return -EINVAL;
2418			if (btf_member_bitfield_size(t, access_idx))
2419				return -EINVAL;
2420
2421			offset = btf_member_bit_offset(t, access_idx);
2422			if (offset % 8)
2423				return -EINVAL;
2424			spec->offset += offset / 8;
2425
2426			m = btf_members(t) + access_idx;
2427			if (m->name_off) {
2428				name = btf__name_by_offset(btf, m->name_off);
2429				if (str_is_empty(name))
2430					return -EINVAL;
2431
2432				spec->spec[spec->len].type_id = id;
2433				spec->spec[spec->len].idx = access_idx;
2434				spec->spec[spec->len].name = name;
2435				spec->len++;
2436			}
2437
2438			id = m->type;
2439		} else if (btf_is_array(t)) {
2440			const struct btf_array *a = btf_array(t);
2441
2442			t = skip_mods_and_typedefs(btf, a->type, &id);
2443			if (!t || access_idx >= a->nelems)
2444				return -EINVAL;
2445
2446			spec->spec[spec->len].type_id = id;
2447			spec->spec[spec->len].idx = access_idx;
2448			spec->len++;
2449
2450			sz = btf__resolve_size(btf, id);
2451			if (sz < 0)
2452				return sz;
2453			spec->offset += access_idx * sz;
2454		} else {
2455			pr_warning("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
2456				   type_id, spec_str, i, id, btf_kind(t));
2457			return -EINVAL;
2458		}
2459	}
2460
2461	return 0;
2462}
2463
2464static bool bpf_core_is_flavor_sep(const char *s)
2465{
2466	/* check X___Y name pattern, where X and Y are not underscores */
2467	return s[0] != '_' &&				      /* X */
2468	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
2469	       s[4] != '_';				      /* Y */
2470}
2471
2472/* Given 'some_struct_name___with_flavor' return the length of a name prefix
2473 * before last triple underscore. Struct name part after last triple
2474 * underscore is ignored by BPF CO-RE relocation during relocation matching.
2475 */
2476static size_t bpf_core_essential_name_len(const char *name)
2477{
2478	size_t n = strlen(name);
2479	int i;
2480
2481	for (i = n - 5; i >= 0; i--) {
2482		if (bpf_core_is_flavor_sep(name + i))
2483			return i + 1;
2484	}
2485	return n;
2486}
2487
2488/* dynamically sized list of type IDs */
2489struct ids_vec {
2490	__u32 *data;
2491	int len;
2492};
2493
2494static void bpf_core_free_cands(struct ids_vec *cand_ids)
2495{
2496	free(cand_ids->data);
2497	free(cand_ids);
2498}
2499
2500static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
2501					   __u32 local_type_id,
2502					   const struct btf *targ_btf)
2503{
2504	size_t local_essent_len, targ_essent_len;
2505	const char *local_name, *targ_name;
2506	const struct btf_type *t;
2507	struct ids_vec *cand_ids;
2508	__u32 *new_ids;
2509	int i, err, n;
2510
2511	t = btf__type_by_id(local_btf, local_type_id);
2512	if (!t)
2513		return ERR_PTR(-EINVAL);
2514
2515	local_name = btf__name_by_offset(local_btf, t->name_off);
2516	if (str_is_empty(local_name))
2517		return ERR_PTR(-EINVAL);
2518	local_essent_len = bpf_core_essential_name_len(local_name);
2519
2520	cand_ids = calloc(1, sizeof(*cand_ids));
2521	if (!cand_ids)
2522		return ERR_PTR(-ENOMEM);
2523
2524	n = btf__get_nr_types(targ_btf);
2525	for (i = 1; i <= n; i++) {
2526		t = btf__type_by_id(targ_btf, i);
2527		targ_name = btf__name_by_offset(targ_btf, t->name_off);
2528		if (str_is_empty(targ_name))
2529			continue;
2530
2531		targ_essent_len = bpf_core_essential_name_len(targ_name);
2532		if (targ_essent_len != local_essent_len)
2533			continue;
2534
2535		if (strncmp(local_name, targ_name, local_essent_len) == 0) {
2536			pr_debug("[%d] %s: found candidate [%d] %s\n",
2537				 local_type_id, local_name, i, targ_name);
2538			new_ids = realloc(cand_ids->data, cand_ids->len + 1);
2539			if (!new_ids) {
2540				err = -ENOMEM;
2541				goto err_out;
2542			}
2543			cand_ids->data = new_ids;
2544			cand_ids->data[cand_ids->len++] = i;
2545		}
2546	}
2547	return cand_ids;
2548err_out:
2549	bpf_core_free_cands(cand_ids);
2550	return ERR_PTR(err);
2551}
2552
2553/* Check two types for compatibility, skipping const/volatile/restrict and
2554 * typedefs, to ensure we are relocating offset to the compatible entities:
2555 *   - any two STRUCTs/UNIONs are compatible and can be mixed;
2556 *   - any two FWDs are compatible;
2557 *   - any two PTRs are always compatible;
2558 *   - for ENUMs, check sizes, names are ignored;
2559 *   - for INT, size and bitness should match, signedness is ignored;
2560 *   - for ARRAY, dimensionality is ignored, element types are checked for
2561 *     compatibility recursively;
2562 *   - everything else shouldn't be ever a target of relocation.
2563 * These rules are not set in stone and probably will be adjusted as we get
2564 * more experience with using BPF CO-RE relocations.
2565 */
2566static int bpf_core_fields_are_compat(const struct btf *local_btf,
2567				      __u32 local_id,
2568				      const struct btf *targ_btf,
2569				      __u32 targ_id)
2570{
2571	const struct btf_type *local_type, *targ_type;
2572
2573recur:
2574	local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
2575	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
2576	if (!local_type || !targ_type)
2577		return -EINVAL;
2578
2579	if (btf_is_composite(local_type) && btf_is_composite(targ_type))
2580		return 1;
2581	if (btf_kind(local_type) != btf_kind(targ_type))
2582		return 0;
2583
2584	switch (btf_kind(local_type)) {
2585	case BTF_KIND_FWD:
2586	case BTF_KIND_PTR:
2587		return 1;
2588	case BTF_KIND_ENUM:
2589		return local_type->size == targ_type->size;
2590	case BTF_KIND_INT:
2591		return btf_int_offset(local_type) == 0 &&
2592		       btf_int_offset(targ_type) == 0 &&
2593		       local_type->size == targ_type->size &&
2594		       btf_int_bits(local_type) == btf_int_bits(targ_type);
2595	case BTF_KIND_ARRAY:
2596		local_id = btf_array(local_type)->type;
2597		targ_id = btf_array(targ_type)->type;
2598		goto recur;
2599	default:
2600		pr_warning("unexpected kind %d relocated, local [%d], target [%d]\n",
2601			   btf_kind(local_type), local_id, targ_id);
2602		return 0;
2603	}
2604}
2605
2606/*
2607 * Given single high-level named field accessor in local type, find
2608 * corresponding high-level accessor for a target type. Along the way,
2609 * maintain low-level spec for target as well. Also keep updating target
2610 * offset.
2611 *
2612 * Searching is performed through recursive exhaustive enumeration of all
2613 * fields of a struct/union. If there are any anonymous (embedded)
2614 * structs/unions, they are recursively searched as well. If field with
2615 * desired name is found, check compatibility between local and target types,
2616 * before returning result.
2617 *
2618 * 1 is returned, if field is found.
2619 * 0 is returned if no compatible field is found.
2620 * <0 is returned on error.
2621 */
2622static int bpf_core_match_member(const struct btf *local_btf,
2623				 const struct bpf_core_accessor *local_acc,
2624				 const struct btf *targ_btf,
2625				 __u32 targ_id,
2626				 struct bpf_core_spec *spec,
2627				 __u32 *next_targ_id)
2628{
2629	const struct btf_type *local_type, *targ_type;
2630	const struct btf_member *local_member, *m;
2631	const char *local_name, *targ_name;
2632	__u32 local_id;
2633	int i, n, found;
2634
2635	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
2636	if (!targ_type)
2637		return -EINVAL;
2638	if (!btf_is_composite(targ_type))
2639		return 0;
2640
2641	local_id = local_acc->type_id;
2642	local_type = btf__type_by_id(local_btf, local_id);
2643	local_member = btf_members(local_type) + local_acc->idx;
2644	local_name = btf__name_by_offset(local_btf, local_member->name_off);
2645
2646	n = btf_vlen(targ_type);
2647	m = btf_members(targ_type);
2648	for (i = 0; i < n; i++, m++) {
2649		__u32 offset;
2650
2651		/* bitfield relocations not supported */
2652		if (btf_member_bitfield_size(targ_type, i))
2653			continue;
2654		offset = btf_member_bit_offset(targ_type, i);
2655		if (offset % 8)
2656			continue;
2657
2658		/* too deep struct/union/array nesting */
2659		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2660			return -E2BIG;
2661
2662		/* speculate this member will be the good one */
2663		spec->offset += offset / 8;
2664		spec->raw_spec[spec->raw_len++] = i;
2665
2666		targ_name = btf__name_by_offset(targ_btf, m->name_off);
2667		if (str_is_empty(targ_name)) {
2668			/* embedded struct/union, we need to go deeper */
2669			found = bpf_core_match_member(local_btf, local_acc,
2670						      targ_btf, m->type,
2671						      spec, next_targ_id);
2672			if (found) /* either found or error */
2673				return found;
2674		} else if (strcmp(local_name, targ_name) == 0) {
2675			/* matching named field */
2676			struct bpf_core_accessor *targ_acc;
2677
2678			targ_acc = &spec->spec[spec->len++];
2679			targ_acc->type_id = targ_id;
2680			targ_acc->idx = i;
2681			targ_acc->name = targ_name;
2682
2683			*next_targ_id = m->type;
2684			found = bpf_core_fields_are_compat(local_btf,
2685							   local_member->type,
2686							   targ_btf, m->type);
2687			if (!found)
2688				spec->len--; /* pop accessor */
2689			return found;
2690		}
2691		/* member turned out not to be what we looked for */
2692		spec->offset -= offset / 8;
2693		spec->raw_len--;
2694	}
2695
2696	return 0;
2697}
2698
2699/*
2700 * Try to match local spec to a target type and, if successful, produce full
2701 * target spec (high-level, low-level + offset).
2702 */
2703static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
2704			       const struct btf *targ_btf, __u32 targ_id,
2705			       struct bpf_core_spec *targ_spec)
2706{
2707	const struct btf_type *targ_type;
2708	const struct bpf_core_accessor *local_acc;
2709	struct bpf_core_accessor *targ_acc;
2710	int i, sz, matched;
2711
2712	memset(targ_spec, 0, sizeof(*targ_spec));
2713	targ_spec->btf = targ_btf;
2714
2715	local_acc = &local_spec->spec[0];
2716	targ_acc = &targ_spec->spec[0];
2717
2718	for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
2719		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
2720						   &targ_id);
2721		if (!targ_type)
2722			return -EINVAL;
2723
2724		if (local_acc->name) {
2725			matched = bpf_core_match_member(local_spec->btf,
2726							local_acc,
2727							targ_btf, targ_id,
2728							targ_spec, &targ_id);
2729			if (matched <= 0)
2730				return matched;
2731		} else {
2732			/* for i=0, targ_id is already treated as array element
2733			 * type (because it's the original struct), for others
2734			 * we should find array element type first
2735			 */
2736			if (i > 0) {
2737				const struct btf_array *a;
2738
2739				if (!btf_is_array(targ_type))
2740					return 0;
2741
2742				a = btf_array(targ_type);
2743				if (local_acc->idx >= a->nelems)
2744					return 0;
2745				if (!skip_mods_and_typedefs(targ_btf, a->type,
2746							    &targ_id))
2747					return -EINVAL;
2748			}
2749
2750			/* too deep struct/union/array nesting */
2751			if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2752				return -E2BIG;
2753
2754			targ_acc->type_id = targ_id;
2755			targ_acc->idx = local_acc->idx;
2756			targ_acc->name = NULL;
2757			targ_spec->len++;
2758			targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
2759			targ_spec->raw_len++;
2760
2761			sz = btf__resolve_size(targ_btf, targ_id);
2762			if (sz < 0)
2763				return sz;
2764			targ_spec->offset += local_acc->idx * sz;
2765		}
2766	}
2767
2768	return 1;
2769}
2770
2771/*
2772 * Patch relocatable BPF instruction.
2773 * Expected insn->imm value is provided for validation, as well as the new
2774 * relocated value.
2775 *
2776 * Currently three kinds of BPF instructions are supported:
2777 * 1. rX = <imm> (assignment with immediate operand);
2778 * 2. rX += <imm> (arithmetic operations with immediate operand);
2779 * 3. *(rX) = <imm> (indirect memory assignment with immediate operand).
2780 *
2781 * If actual insn->imm value is wrong, bail out.
2782 */
2783static int bpf_core_reloc_insn(struct bpf_program *prog, int insn_off,
2784			       __u32 orig_off, __u32 new_off)
2785{
2786	struct bpf_insn *insn;
2787	int insn_idx;
2788	__u8 class;
2789
2790	if (insn_off % sizeof(struct bpf_insn))
2791		return -EINVAL;
2792	insn_idx = insn_off / sizeof(struct bpf_insn);
2793
2794	insn = &prog->insns[insn_idx];
2795	class = BPF_CLASS(insn->code);
2796
2797	if (class == BPF_ALU || class == BPF_ALU64) {
2798		if (BPF_SRC(insn->code) != BPF_K)
2799			return -EINVAL;
2800		if (insn->imm != orig_off)
2801			return -EINVAL;
2802		insn->imm = new_off;
2803		pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n",
2804			 bpf_program__title(prog, false),
2805			 insn_idx, orig_off, new_off);
2806	} else {
2807		pr_warning("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
2808			   bpf_program__title(prog, false),
2809			   insn_idx, insn->code, insn->src_reg, insn->dst_reg,
2810			   insn->off, insn->imm);
2811		return -EINVAL;
2812	}
2813	return 0;
2814}
2815
2816static struct btf *btf_load_raw(const char *path)
2817{
2818	struct btf *btf;
2819	size_t read_cnt;
2820	struct stat st;
2821	void *data;
2822	FILE *f;
2823
2824	if (stat(path, &st))
2825		return ERR_PTR(-errno);
2826
2827	data = malloc(st.st_size);
2828	if (!data)
2829		return ERR_PTR(-ENOMEM);
2830
2831	f = fopen(path, "rb");
2832	if (!f) {
2833		btf = ERR_PTR(-errno);
2834		goto cleanup;
2835	}
2836
2837	read_cnt = fread(data, 1, st.st_size, f);
2838	fclose(f);
2839	if (read_cnt < st.st_size) {
2840		btf = ERR_PTR(-EBADF);
2841		goto cleanup;
2842	}
2843
2844	btf = btf__new(data, read_cnt);
2845
2846cleanup:
2847	free(data);
2848	return btf;
2849}
2850
2851/*
2852 * Probe few well-known locations for vmlinux kernel image and try to load BTF
2853 * data out of it to use for target BTF.
2854 */
2855static struct btf *bpf_core_find_kernel_btf(void)
2856{
2857	struct {
2858		const char *path_fmt;
2859		bool raw_btf;
2860	} locations[] = {
2861		/* try canonical vmlinux BTF through sysfs first */
2862		{ "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
2863		/* fall back to trying to find vmlinux ELF on disk otherwise */
2864		{ "/boot/vmlinux-%1$s" },
2865		{ "/lib/modules/%1$s/vmlinux-%1$s" },
2866		{ "/lib/modules/%1$s/build/vmlinux" },
2867		{ "/usr/lib/modules/%1$s/kernel/vmlinux" },
2868		{ "/usr/lib/debug/boot/vmlinux-%1$s" },
2869		{ "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
2870		{ "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
2871	};
2872	char path[PATH_MAX + 1];
2873	struct utsname buf;
2874	struct btf *btf;
2875	int i;
2876
2877	uname(&buf);
2878
2879	for (i = 0; i < ARRAY_SIZE(locations); i++) {
2880		snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
2881
2882		if (access(path, R_OK))
2883			continue;
2884
2885		if (locations[i].raw_btf)
2886			btf = btf_load_raw(path);
2887		else
2888			btf = btf__parse_elf(path, NULL);
2889
2890		pr_debug("loading kernel BTF '%s': %ld\n",
2891			 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
2892		if (IS_ERR(btf))
2893			continue;
2894
2895		return btf;
2896	}
2897
2898	pr_warning("failed to find valid kernel BTF\n");
2899	return ERR_PTR(-ESRCH);
2900}
2901
2902/* Output spec definition in the format:
2903 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
2904 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
2905 */
2906static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
2907{
2908	const struct btf_type *t;
2909	const char *s;
2910	__u32 type_id;
2911	int i;
2912
2913	type_id = spec->spec[0].type_id;
2914	t = btf__type_by_id(spec->btf, type_id);
2915	s = btf__name_by_offset(spec->btf, t->name_off);
2916	libbpf_print(level, "[%u] %s + ", type_id, s);
2917
2918	for (i = 0; i < spec->raw_len; i++)
2919		libbpf_print(level, "%d%s", spec->raw_spec[i],
2920			     i == spec->raw_len - 1 ? " => " : ":");
2921
2922	libbpf_print(level, "%u @ &x", spec->offset);
2923
2924	for (i = 0; i < spec->len; i++) {
2925		if (spec->spec[i].name)
2926			libbpf_print(level, ".%s", spec->spec[i].name);
2927		else
2928			libbpf_print(level, "[%u]", spec->spec[i].idx);
2929	}
2930
2931}
2932
2933static size_t bpf_core_hash_fn(const void *key, void *ctx)
2934{
2935	return (size_t)key;
2936}
2937
2938static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
2939{
2940	return k1 == k2;
2941}
2942
2943static void *u32_as_hash_key(__u32 x)
2944{
2945	return (void *)(uintptr_t)x;
2946}
2947
2948/*
2949 * CO-RE relocate single instruction.
2950 *
2951 * The outline and important points of the algorithm:
2952 * 1. For given local type, find corresponding candidate target types.
2953 *    Candidate type is a type with the same "essential" name, ignoring
2954 *    everything after last triple underscore (___). E.g., `sample`,
2955 *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
2956 *    for each other. Names with triple underscore are referred to as
2957 *    "flavors" and are useful, among other things, to allow to
2958 *    specify/support incompatible variations of the same kernel struct, which
2959 *    might differ between different kernel versions and/or build
2960 *    configurations.
2961 *
2962 *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
2963 *    converter, when deduplicated BTF of a kernel still contains more than
2964 *    one different types with the same name. In that case, ___2, ___3, etc
2965 *    are appended starting from second name conflict. But start flavors are
2966 *    also useful to be defined "locally", in BPF program, to extract same
2967 *    data from incompatible changes between different kernel
2968 *    versions/configurations. For instance, to handle field renames between
2969 *    kernel versions, one can use two flavors of the struct name with the
2970 *    same common name and use conditional relocations to extract that field,
2971 *    depending on target kernel version.
2972 * 2. For each candidate type, try to match local specification to this
2973 *    candidate target type. Matching involves finding corresponding
2974 *    high-level spec accessors, meaning that all named fields should match,
2975 *    as well as all array accesses should be within the actual bounds. Also,
2976 *    types should be compatible (see bpf_core_fields_are_compat for details).
2977 * 3. It is supported and expected that there might be multiple flavors
2978 *    matching the spec. As long as all the specs resolve to the same set of
2979 *    offsets across all candidates, there is not error. If there is any
2980 *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
2981 *    imprefection of BTF deduplication, which can cause slight duplication of
2982 *    the same BTF type, if some directly or indirectly referenced (by
2983 *    pointer) type gets resolved to different actual types in different
2984 *    object files. If such situation occurs, deduplicated BTF will end up
2985 *    with two (or more) structurally identical types, which differ only in
2986 *    types they refer to through pointer. This should be OK in most cases and
2987 *    is not an error.
2988 * 4. Candidate types search is performed by linearly scanning through all
2989 *    types in target BTF. It is anticipated that this is overall more
2990 *    efficient memory-wise and not significantly worse (if not better)
2991 *    CPU-wise compared to prebuilding a map from all local type names to
2992 *    a list of candidate type names. It's also sped up by caching resolved
2993 *    list of matching candidates per each local "root" type ID, that has at
2994 *    least one bpf_offset_reloc associated with it. This list is shared
2995 *    between multiple relocations for the same type ID and is updated as some
2996 *    of the candidates are pruned due to structural incompatibility.
2997 */
2998static int bpf_core_reloc_offset(struct bpf_program *prog,
2999				 const struct bpf_offset_reloc *relo,
3000				 int relo_idx,
3001				 const struct btf *local_btf,
3002				 const struct btf *targ_btf,
3003				 struct hashmap *cand_cache)
3004{
3005	const char *prog_name = bpf_program__title(prog, false);
3006	struct bpf_core_spec local_spec, cand_spec, targ_spec;
3007	const void *type_key = u32_as_hash_key(relo->type_id);
3008	const struct btf_type *local_type, *cand_type;
3009	const char *local_name, *cand_name;
3010	struct ids_vec *cand_ids;
3011	__u32 local_id, cand_id;
3012	const char *spec_str;
3013	int i, j, err;
3014
3015	local_id = relo->type_id;
3016	local_type = btf__type_by_id(local_btf, local_id);
3017	if (!local_type)
3018		return -EINVAL;
3019
3020	local_name = btf__name_by_offset(local_btf, local_type->name_off);
3021	if (str_is_empty(local_name))
3022		return -EINVAL;
3023
3024	spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
3025	if (str_is_empty(spec_str))
3026		return -EINVAL;
3027
3028	err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
3029	if (err) {
3030		pr_warning("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
3031			   prog_name, relo_idx, local_id, local_name, spec_str,
3032			   err);
3033		return -EINVAL;
3034	}
3035
3036	pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx);
3037	bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
3038	libbpf_print(LIBBPF_DEBUG, "\n");
3039
3040	if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
3041		cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
3042		if (IS_ERR(cand_ids)) {
3043			pr_warning("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
3044				   prog_name, relo_idx, local_id, local_name,
3045				   PTR_ERR(cand_ids));
3046			return PTR_ERR(cand_ids);
3047		}
3048		err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
3049		if (err) {
3050			bpf_core_free_cands(cand_ids);
3051			return err;
3052		}
3053	}
3054
3055	for (i = 0, j = 0; i < cand_ids->len; i++) {
3056		cand_id = cand_ids->data[i];
3057		cand_type = btf__type_by_id(targ_btf, cand_id);
3058		cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
3059
3060		err = bpf_core_spec_match(&local_spec, targ_btf,
3061					  cand_id, &cand_spec);
3062		pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
3063			 prog_name, relo_idx, i, cand_name);
3064		bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
3065		libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
3066		if (err < 0) {
3067			pr_warning("prog '%s': relo #%d: matching error: %d\n",
3068				   prog_name, relo_idx, err);
3069			return err;
3070		}
3071		if (err == 0)
3072			continue;
3073
3074		if (j == 0) {
3075			targ_spec = cand_spec;
3076		} else if (cand_spec.offset != targ_spec.offset) {
3077			/* if there are many candidates, they should all
3078			 * resolve to the same offset
3079			 */
3080			pr_warning("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
3081				   prog_name, relo_idx, cand_spec.offset,
3082				   targ_spec.offset);
3083			return -EINVAL;
3084		}
3085
3086		cand_ids->data[j++] = cand_spec.spec[0].type_id;
3087	}
3088
3089	cand_ids->len = j;
3090	if (cand_ids->len == 0) {
3091		pr_warning("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
3092			   prog_name, relo_idx, local_id, local_name, spec_str);
3093		return -ESRCH;
3094	}
3095
3096	err = bpf_core_reloc_insn(prog, relo->insn_off,
3097				  local_spec.offset, targ_spec.offset);
3098	if (err) {
3099		pr_warning("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
3100			   prog_name, relo_idx, relo->insn_off, err);
3101		return -EINVAL;
3102	}
3103
3104	return 0;
3105}
3106
3107static int
3108bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
3109{
3110	const struct btf_ext_info_sec *sec;
3111	const struct bpf_offset_reloc *rec;
3112	const struct btf_ext_info *seg;
3113	struct hashmap_entry *entry;
3114	struct hashmap *cand_cache = NULL;
3115	struct bpf_program *prog;
3116	struct btf *targ_btf;
3117	const char *sec_name;
3118	int i, err = 0;
3119
3120	if (targ_btf_path)
3121		targ_btf = btf__parse_elf(targ_btf_path, NULL);
3122	else
3123		targ_btf = bpf_core_find_kernel_btf();
3124	if (IS_ERR(targ_btf)) {
3125		pr_warning("failed to get target BTF: %ld\n",
3126			   PTR_ERR(targ_btf));
3127		return PTR_ERR(targ_btf);
3128	}
3129
3130	cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
3131	if (IS_ERR(cand_cache)) {
3132		err = PTR_ERR(cand_cache);
3133		goto out;
3134	}
3135
3136	seg = &obj->btf_ext->offset_reloc_info;
3137	for_each_btf_ext_sec(seg, sec) {
3138		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3139		if (str_is_empty(sec_name)) {
3140			err = -EINVAL;
3141			goto out;
3142		}
3143		prog = bpf_object__find_program_by_title(obj, sec_name);
3144		if (!prog) {
3145			pr_warning("failed to find program '%s' for CO-RE offset relocation\n",
3146				   sec_name);
3147			err = -EINVAL;
3148			goto out;
3149		}
3150
3151		pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
3152			 sec_name, sec->num_info);
3153
3154		for_each_btf_ext_rec(seg, sec, i, rec) {
3155			err = bpf_core_reloc_offset(prog, rec, i, obj->btf,
3156						    targ_btf, cand_cache);
3157			if (err) {
3158				pr_warning("prog '%s': relo #%d: failed to relocate: %d\n",
3159					   sec_name, i, err);
3160				goto out;
3161			}
3162		}
3163	}
3164
3165out:
3166	btf__free(targ_btf);
3167	if (!IS_ERR_OR_NULL(cand_cache)) {
3168		hashmap__for_each_entry(cand_cache, entry, i) {
3169			bpf_core_free_cands(entry->value);
3170		}
3171		hashmap__free(cand_cache);
3172	}
3173	return err;
3174}
3175
3176static int
3177bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
3178{
3179	int err = 0;
3180
3181	if (obj->btf_ext->offset_reloc_info.len)
3182		err = bpf_core_reloc_offsets(obj, targ_btf_path);
3183
3184	return err;
3185}
3186
3187static int
3188bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
3189			struct reloc_desc *relo)
3190{
3191	struct bpf_insn *insn, *new_insn;
3192	struct bpf_program *text;
3193	size_t new_cnt;
3194	int err;
3195
3196	if (relo->type != RELO_CALL)
3197		return -LIBBPF_ERRNO__RELOC;
3198
3199	if (prog->idx == obj->efile.text_shndx) {
3200		pr_warning("relo in .text insn %d into off %d\n",
3201			   relo->insn_idx, relo->text_off);
3202		return -LIBBPF_ERRNO__RELOC;
3203	}
3204
3205	if (prog->main_prog_cnt == 0) {
3206		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
3207		if (!text) {
3208			pr_warning("no .text section found yet relo into text exist\n");
3209			return -LIBBPF_ERRNO__RELOC;
3210		}
3211		new_cnt = prog->insns_cnt + text->insns_cnt;
3212		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
3213		if (!new_insn) {
3214			pr_warning("oom in prog realloc\n");
3215			return -ENOMEM;
3216		}
3217
3218		if (obj->btf_ext) {
3219			err = bpf_program_reloc_btf_ext(prog, obj,
3220							text->section_name,
3221							prog->insns_cnt);
3222			if (err)
3223				return err;
3224		}
3225
3226		memcpy(new_insn + prog->insns_cnt, text->insns,
3227		       text->insns_cnt * sizeof(*insn));
3228		prog->insns = new_insn;
3229		prog->main_prog_cnt = prog->insns_cnt;
3230		prog->insns_cnt = new_cnt;
3231		pr_debug("added %zd insn from %s to prog %s\n",
3232			 text->insns_cnt, text->section_name,
3233			 prog->section_name);
3234	}
3235	insn = &prog->insns[relo->insn_idx];
3236	insn->imm += prog->main_prog_cnt - relo->insn_idx;
3237	return 0;
3238}
3239
3240static int
3241bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
3242{
3243	int i, err;
3244
3245	if (!prog)
3246		return 0;
3247
3248	if (obj->btf_ext) {
3249		err = bpf_program_reloc_btf_ext(prog, obj,
3250						prog->section_name, 0);
3251		if (err)
3252			return err;
3253	}
3254
3255	if (!prog->reloc_desc)
3256		return 0;
3257
3258	for (i = 0; i < prog->nr_reloc; i++) {
3259		if (prog->reloc_desc[i].type == RELO_LD64 ||
3260		    prog->reloc_desc[i].type == RELO_DATA) {
3261			bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
3262			struct bpf_insn *insns = prog->insns;
3263			int insn_idx, map_idx;
3264
3265			insn_idx = prog->reloc_desc[i].insn_idx;
3266			map_idx = prog->reloc_desc[i].map_idx;
3267
3268			if (insn_idx + 1 >= (int)prog->insns_cnt) {
3269				pr_warning("relocation out of range: '%s'\n",
3270					   prog->section_name);
3271				return -LIBBPF_ERRNO__RELOC;
3272			}
3273
3274			if (!relo_data) {
3275				insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
3276			} else {
3277				insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
3278				insns[insn_idx + 1].imm = insns[insn_idx].imm;
3279			}
3280			insns[insn_idx].imm = obj->maps[map_idx].fd;
3281		} else if (prog->reloc_desc[i].type == RELO_CALL) {
3282			err = bpf_program__reloc_text(prog, obj,
3283						      &prog->reloc_desc[i]);
3284			if (err)
3285				return err;
3286		}
3287	}
3288
3289	zfree(&prog->reloc_desc);
3290	prog->nr_reloc = 0;
3291	return 0;
3292}
3293
3294static int
3295bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
3296{
3297	struct bpf_program *prog;
3298	size_t i;
3299	int err;
3300
3301	if (obj->btf_ext) {
3302		err = bpf_object__relocate_core(obj, targ_btf_path);
3303		if (err) {
3304			pr_warning("failed to perform CO-RE relocations: %d\n",
3305				   err);
3306			return err;
3307		}
3308	}
3309	for (i = 0; i < obj->nr_programs; i++) {
3310		prog = &obj->programs[i];
3311
3312		err = bpf_program__relocate(prog, obj);
3313		if (err) {
3314			pr_warning("failed to relocate '%s'\n",
3315				   prog->section_name);
3316			return err;
3317		}
3318	}
3319	return 0;
3320}
3321
3322static int bpf_object__collect_reloc(struct bpf_object *obj)
3323{
3324	int i, err;
3325
3326	if (!obj_elf_valid(obj)) {
3327		pr_warning("Internal error: elf object is closed\n");
3328		return -LIBBPF_ERRNO__INTERNAL;
3329	}
3330
3331	for (i = 0; i < obj->efile.nr_reloc; i++) {
3332		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
3333		Elf_Data *data = obj->efile.reloc[i].data;
3334		int idx = shdr->sh_info;
3335		struct bpf_program *prog;
3336
3337		if (shdr->sh_type != SHT_REL) {
3338			pr_warning("internal error at %d\n", __LINE__);
3339			return -LIBBPF_ERRNO__INTERNAL;
3340		}
3341
3342		prog = bpf_object__find_prog_by_idx(obj, idx);
3343		if (!prog) {
3344			pr_warning("relocation failed: no section(%d)\n", idx);
3345			return -LIBBPF_ERRNO__RELOC;
3346		}
3347
3348		err = bpf_program__collect_reloc(prog, shdr, data, obj);
3349		if (err)
3350			return err;
3351	}
3352	return 0;
3353}
3354
3355static int
3356load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
3357	     char *license, __u32 kern_version, int *pfd)
3358{
3359	struct bpf_load_program_attr load_attr;
3360	char *cp, errmsg[STRERR_BUFSIZE];
3361	int log_buf_size = BPF_LOG_BUF_SIZE;
3362	char *log_buf;
3363	int btf_fd, ret;
3364
3365	if (!insns || !insns_cnt)
3366		return -EINVAL;
3367
3368	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
3369	load_attr.prog_type = prog->type;
3370	load_attr.expected_attach_type = prog->expected_attach_type;
3371	if (prog->caps->name)
3372		load_attr.name = prog->name;
3373	load_attr.insns = insns;
3374	load_attr.insns_cnt = insns_cnt;
3375	load_attr.license = license;
3376	load_attr.kern_version = kern_version;
3377	load_attr.prog_ifindex = prog->prog_ifindex;
3378	/* if .BTF.ext was loaded, kernel supports associated BTF for prog */
3379	if (prog->obj->btf_ext)
3380		btf_fd = bpf_object__btf_fd(prog->obj);
3381	else
3382		btf_fd = -1;
3383	load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
3384	load_attr.func_info = prog->func_info;
3385	load_attr.func_info_rec_size = prog->func_info_rec_size;
3386	load_attr.func_info_cnt = prog->func_info_cnt;
3387	load_attr.line_info = prog->line_info;
3388	load_attr.line_info_rec_size = prog->line_info_rec_size;
3389	load_attr.line_info_cnt = prog->line_info_cnt;
3390	load_attr.log_level = prog->log_level;
3391	load_attr.prog_flags = prog->prog_flags;
3392
3393retry_load:
3394	log_buf = malloc(log_buf_size);
3395	if (!log_buf)
3396		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
3397
3398	ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
3399
3400	if (ret >= 0) {
3401		if (load_attr.log_level)
3402			pr_debug("verifier log:\n%s", log_buf);
3403		*pfd = ret;
3404		ret = 0;
3405		goto out;
3406	}
3407
3408	if (errno == ENOSPC) {
3409		log_buf_size <<= 1;
3410		free(log_buf);
3411		goto retry_load;
3412	}
3413	ret = -LIBBPF_ERRNO__LOAD;
3414	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3415	pr_warning("load bpf program failed: %s\n", cp);
3416
3417	if (log_buf && log_buf[0] != '\0') {
3418		ret = -LIBBPF_ERRNO__VERIFY;
3419		pr_warning("-- BEGIN DUMP LOG ---\n");
3420		pr_warning("\n%s\n", log_buf);
3421		pr_warning("-- END LOG --\n");
3422	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
3423		pr_warning("Program too large (%zu insns), at most %d insns\n",
3424			   load_attr.insns_cnt, BPF_MAXINSNS);
3425		ret = -LIBBPF_ERRNO__PROG2BIG;
3426	} else {
3427		/* Wrong program type? */
3428		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
3429			int fd;
3430
3431			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
3432			load_attr.expected_attach_type = 0;
3433			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
3434			if (fd >= 0) {
3435				close(fd);
3436				ret = -LIBBPF_ERRNO__PROGTYPE;
3437				goto out;
3438			}
3439		}
3440
3441		if (log_buf)
3442			ret = -LIBBPF_ERRNO__KVER;
3443	}
3444
3445out:
3446	free(log_buf);
3447	return ret;
3448}
3449
3450int
3451bpf_program__load(struct bpf_program *prog,
3452		  char *license, __u32 kern_version)
3453{
3454	int err = 0, fd, i;
3455
3456	if (prog->instances.nr < 0 || !prog->instances.fds) {
3457		if (prog->preprocessor) {
3458			pr_warning("Internal error: can't load program '%s'\n",
3459				   prog->section_name);
3460			return -LIBBPF_ERRNO__INTERNAL;
3461		}
3462
3463		prog->instances.fds = malloc(sizeof(int));
3464		if (!prog->instances.fds) {
3465			pr_warning("Not enough memory for BPF fds\n");
3466			return -ENOMEM;
3467		}
3468		prog->instances.nr = 1;
3469		prog->instances.fds[0] = -1;
3470	}
3471
3472	if (!prog->preprocessor) {
3473		if (prog->instances.nr != 1) {
3474			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
3475				   prog->section_name, prog->instances.nr);
3476		}
3477		err = load_program(prog, prog->insns, prog->insns_cnt,
3478				   license, kern_version, &fd);
3479		if (!err)
3480			prog->instances.fds[0] = fd;
3481		goto out;
3482	}
3483
3484	for (i = 0; i < prog->instances.nr; i++) {
3485		struct bpf_prog_prep_result result;
3486		bpf_program_prep_t preprocessor = prog->preprocessor;
3487
3488		memset(&result, 0, sizeof(result));
3489		err = preprocessor(prog, i, prog->insns,
3490				   prog->insns_cnt, &result);
3491		if (err) {
3492			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
3493				   i, prog->section_name);
3494			goto out;
3495		}
3496
3497		if (!result.new_insn_ptr || !result.new_insn_cnt) {
3498			pr_debug("Skip loading the %dth instance of program '%s'\n",
3499				 i, prog->section_name);
3500			prog->instances.fds[i] = -1;
3501			if (result.pfd)
3502				*result.pfd = -1;
3503			continue;
3504		}
3505
3506		err = load_program(prog, result.new_insn_ptr,
3507				   result.new_insn_cnt,
3508				   license, kern_version, &fd);
3509
3510		if (err) {
3511			pr_warning("Loading the %dth instance of program '%s' failed\n",
3512					i, prog->section_name);
3513			goto out;
3514		}
3515
3516		if (result.pfd)
3517			*result.pfd = fd;
3518		prog->instances.fds[i] = fd;
3519	}
3520out:
3521	if (err)
3522		pr_warning("failed to load program '%s'\n",
3523			   prog->section_name);
3524	zfree(&prog->insns);
3525	prog->insns_cnt = 0;
3526	return err;
3527}
3528
3529static bool bpf_program__is_function_storage(const struct bpf_program *prog,
3530					     const struct bpf_object *obj)
3531{
3532	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
3533}
3534
3535static int
3536bpf_object__load_progs(struct bpf_object *obj, int log_level)
3537{
3538	size_t i;
3539	int err;
3540
3541	for (i = 0; i < obj->nr_programs; i++) {
3542		if (bpf_program__is_function_storage(&obj->programs[i], obj))
3543			continue;
3544		obj->programs[i].log_level |= log_level;
3545		err = bpf_program__load(&obj->programs[i],
3546					obj->license,
3547					obj->kern_version);
3548		if (err)
3549			return err;
3550	}
3551	return 0;
3552}
3553
3554static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
3555{
3556	switch (type) {
3557	case BPF_PROG_TYPE_SOCKET_FILTER:
3558	case BPF_PROG_TYPE_SCHED_CLS:
3559	case BPF_PROG_TYPE_SCHED_ACT:
3560	case BPF_PROG_TYPE_XDP:
3561	case BPF_PROG_TYPE_CGROUP_SKB:
3562	case BPF_PROG_TYPE_CGROUP_SOCK:
3563	case BPF_PROG_TYPE_LWT_IN:
3564	case BPF_PROG_TYPE_LWT_OUT:
3565	case BPF_PROG_TYPE_LWT_XMIT:
3566	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
3567	case BPF_PROG_TYPE_SOCK_OPS:
3568	case BPF_PROG_TYPE_SK_SKB:
3569	case BPF_PROG_TYPE_CGROUP_DEVICE:
3570	case BPF_PROG_TYPE_SK_MSG:
3571	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3572	case BPF_PROG_TYPE_LIRC_MODE2:
3573	case BPF_PROG_TYPE_SK_REUSEPORT:
3574	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3575	case BPF_PROG_TYPE_UNSPEC:
3576	case BPF_PROG_TYPE_TRACEPOINT:
3577	case BPF_PROG_TYPE_RAW_TRACEPOINT:
3578	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3579	case BPF_PROG_TYPE_PERF_EVENT:
3580	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3581	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3582		return false;
3583	case BPF_PROG_TYPE_KPROBE:
3584	default:
3585		return true;
3586	}
3587}
3588
3589static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
3590{
3591	if (needs_kver && obj->kern_version == 0) {
3592		pr_warning("%s doesn't provide kernel version\n",
3593			   obj->path);
3594		return -LIBBPF_ERRNO__KVERSION;
3595	}
3596	return 0;
3597}
3598
3599static struct bpf_object *
3600__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
3601		   bool needs_kver, int flags)
3602{
3603	struct bpf_object *obj;
3604	int err;
3605
3606	if (elf_version(EV_CURRENT) == EV_NONE) {
3607		pr_warning("failed to init libelf for %s\n", path);
3608		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
3609	}
3610
3611	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
3612	if (IS_ERR(obj))
3613		return obj;
3614
3615	CHECK_ERR(bpf_object__elf_init(obj), err, out);
3616	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
3617	CHECK_ERR(bpf_object__probe_caps(obj), err, out);
3618	CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
3619	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
3620	CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
3621
3622	bpf_object__elf_finish(obj);
3623	return obj;
3624out:
3625	bpf_object__close(obj);
3626	return ERR_PTR(err);
3627}
3628
3629struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
3630					    int flags)
3631{
3632	/* param validation */
3633	if (!attr->file)
3634		return NULL;
3635
3636	pr_debug("loading %s\n", attr->file);
3637
3638	return __bpf_object__open(attr->file, NULL, 0,
3639				  bpf_prog_type__needs_kver(attr->prog_type),
3640				  flags);
3641}
3642
3643struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
3644{
3645	return __bpf_object__open_xattr(attr, 0);
3646}
3647
3648struct bpf_object *bpf_object__open(const char *path)
3649{
3650	struct bpf_object_open_attr attr = {
3651		.file		= path,
3652		.prog_type	= BPF_PROG_TYPE_UNSPEC,
3653	};
3654
3655	return bpf_object__open_xattr(&attr);
3656}
3657
3658struct bpf_object *bpf_object__open_buffer(void *obj_buf,
3659					   size_t obj_buf_sz,
3660					   const char *name)
3661{
3662	char tmp_name[64];
3663
3664	/* param validation */
3665	if (!obj_buf || obj_buf_sz <= 0)
3666		return NULL;
3667
3668	if (!name) {
3669		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
3670			 (unsigned long)obj_buf,
3671			 (unsigned long)obj_buf_sz);
3672		name = tmp_name;
3673	}
3674	pr_debug("loading object '%s' from buffer\n", name);
3675
3676	return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
3677}
3678
3679int bpf_object__unload(struct bpf_object *obj)
3680{
3681	size_t i;
3682
3683	if (!obj)
3684		return -EINVAL;
3685
3686	for (i = 0; i < obj->nr_maps; i++)
3687		zclose(obj->maps[i].fd);
3688
3689	for (i = 0; i < obj->nr_programs; i++)
3690		bpf_program__unload(&obj->programs[i]);
3691
3692	return 0;
3693}
3694
3695int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
3696{
3697	struct bpf_object *obj;
3698	int err;
3699
3700	if (!attr)
3701		return -EINVAL;
3702	obj = attr->obj;
3703	if (!obj)
3704		return -EINVAL;
3705
3706	if (obj->loaded) {
3707		pr_warning("object should not be loaded twice\n");
3708		return -EINVAL;
3709	}
3710
3711	obj->loaded = true;
3712
3713	CHECK_ERR(bpf_object__create_maps(obj), err, out);
3714	CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out);
3715	CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
3716
3717	return 0;
3718out:
3719	bpf_object__unload(obj);
3720	pr_warning("failed to load object '%s'\n", obj->path);
3721	return err;
3722}
3723
3724int bpf_object__load(struct bpf_object *obj)
3725{
3726	struct bpf_object_load_attr attr = {
3727		.obj = obj,
3728	};
3729
3730	return bpf_object__load_xattr(&attr);
3731}
3732
3733static int check_path(const char *path)
3734{
3735	char *cp, errmsg[STRERR_BUFSIZE];
3736	struct statfs st_fs;
3737	char *dname, *dir;
3738	int err = 0;
3739
3740	if (path == NULL)
3741		return -EINVAL;
3742
3743	dname = strdup(path);
3744	if (dname == NULL)
3745		return -ENOMEM;
3746
3747	dir = dirname(dname);
3748	if (statfs(dir, &st_fs)) {
3749		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3750		pr_warning("failed to statfs %s: %s\n", dir, cp);
3751		err = -errno;
3752	}
3753	free(dname);
3754
3755	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
3756		pr_warning("specified path %s is not on BPF FS\n", path);
3757		err = -EINVAL;
3758	}
3759
3760	return err;
3761}
3762
3763int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
3764			      int instance)
3765{
3766	char *cp, errmsg[STRERR_BUFSIZE];
3767	int err;
3768
3769	err = check_path(path);
3770	if (err)
3771		return err;
3772
3773	if (prog == NULL) {
3774		pr_warning("invalid program pointer\n");
3775		return -EINVAL;
3776	}
3777
3778	if (instance < 0 || instance >= prog->instances.nr) {
3779		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
3780			   instance, prog->section_name, prog->instances.nr);
3781		return -EINVAL;
3782	}
3783
3784	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
3785		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3786		pr_warning("failed to pin program: %s\n", cp);
3787		return -errno;
3788	}
3789	pr_debug("pinned program '%s'\n", path);
3790
3791	return 0;
3792}
3793
3794int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
3795				int instance)
3796{
3797	int err;
3798
3799	err = check_path(path);
3800	if (err)
3801		return err;
3802
3803	if (prog == NULL) {
3804		pr_warning("invalid program pointer\n");
3805		return -EINVAL;
3806	}
3807
3808	if (instance < 0 || instance >= prog->instances.nr) {
3809		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
3810			   instance, prog->section_name, prog->instances.nr);
3811		return -EINVAL;
3812	}
3813
3814	err = unlink(path);
3815	if (err != 0)
3816		return -errno;
3817	pr_debug("unpinned program '%s'\n", path);
3818
3819	return 0;
3820}
3821
3822static int make_dir(const char *path)
3823{
3824	char *cp, errmsg[STRERR_BUFSIZE];
3825	int err = 0;
3826
3827	if (mkdir(path, 0700) && errno != EEXIST)
3828		err = -errno;
3829
3830	if (err) {
3831		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
3832		pr_warning("failed to mkdir %s: %s\n", path, cp);
3833	}
3834	return err;
3835}
3836
3837int bpf_program__pin(struct bpf_program *prog, const char *path)
3838{
3839	int i, err;
3840
3841	err = check_path(path);
3842	if (err)
3843		return err;
3844
3845	if (prog == NULL) {
3846		pr_warning("invalid program pointer\n");
3847		return -EINVAL;
3848	}
3849
3850	if (prog->instances.nr <= 0) {
3851		pr_warning("no instances of prog %s to pin\n",
3852			   prog->section_name);
3853		return -EINVAL;
3854	}
3855
3856	if (prog->instances.nr == 1) {
3857		/* don't create subdirs when pinning single instance */
3858		return bpf_program__pin_instance(prog, path, 0);
3859	}
3860
3861	err = make_dir(path);
3862	if (err)
3863		return err;
3864
3865	for (i = 0; i < prog->instances.nr; i++) {
3866		char buf[PATH_MAX];
3867		int len;
3868
3869		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
3870		if (len < 0) {
3871			err = -EINVAL;
3872			goto err_unpin;
3873		} else if (len >= PATH_MAX) {
3874			err = -ENAMETOOLONG;
3875			goto err_unpin;
3876		}
3877
3878		err = bpf_program__pin_instance(prog, buf, i);
3879		if (err)
3880			goto err_unpin;
3881	}
3882
3883	return 0;
3884
3885err_unpin:
3886	for (i = i - 1; i >= 0; i--) {
3887		char buf[PATH_MAX];
3888		int len;
3889
3890		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
3891		if (len < 0)
3892			continue;
3893		else if (len >= PATH_MAX)
3894			continue;
3895
3896		bpf_program__unpin_instance(prog, buf, i);
3897	}
3898
3899	rmdir(path);
3900
3901	return err;
3902}
3903
3904int bpf_program__unpin(struct bpf_program *prog, const char *path)
3905{
3906	int i, err;
3907
3908	err = check_path(path);
3909	if (err)
3910		return err;
3911
3912	if (prog == NULL) {
3913		pr_warning("invalid program pointer\n");
3914		return -EINVAL;
3915	}
3916
3917	if (prog->instances.nr <= 0) {
3918		pr_warning("no instances of prog %s to pin\n",
3919			   prog->section_name);
3920		return -EINVAL;
3921	}
3922
3923	if (prog->instances.nr == 1) {
3924		/* don't create subdirs when pinning single instance */
3925		return bpf_program__unpin_instance(prog, path, 0);
3926	}
3927
3928	for (i = 0; i < prog->instances.nr; i++) {
3929		char buf[PATH_MAX];
3930		int len;
3931
3932		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
3933		if (len < 0)
3934			return -EINVAL;
3935		else if (len >= PATH_MAX)
3936			return -ENAMETOOLONG;
3937
3938		err = bpf_program__unpin_instance(prog, buf, i);
3939		if (err)
3940			return err;
3941	}
3942
3943	err = rmdir(path);
3944	if (err)
3945		return -errno;
3946
3947	return 0;
3948}
3949
3950int bpf_map__pin(struct bpf_map *map, const char *path)
3951{
3952	char *cp, errmsg[STRERR_BUFSIZE];
3953	int err;
3954
3955	err = check_path(path);
3956	if (err)
3957		return err;
3958
3959	if (map == NULL) {
3960		pr_warning("invalid map pointer\n");
3961		return -EINVAL;
3962	}
3963
3964	if (bpf_obj_pin(map->fd, path)) {
3965		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3966		pr_warning("failed to pin map: %s\n", cp);
3967		return -errno;
3968	}
3969
3970	pr_debug("pinned map '%s'\n", path);
3971
3972	return 0;
3973}
3974
3975int bpf_map__unpin(struct bpf_map *map, const char *path)
3976{
3977	int err;
3978
3979	err = check_path(path);
3980	if (err)
3981		return err;
3982
3983	if (map == NULL) {
3984		pr_warning("invalid map pointer\n");
3985		return -EINVAL;
3986	}
3987
3988	err = unlink(path);
3989	if (err != 0)
3990		return -errno;
3991	pr_debug("unpinned map '%s'\n", path);
3992
3993	return 0;
3994}
3995
3996int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
3997{
3998	struct bpf_map *map;
3999	int err;
4000
4001	if (!obj)
4002		return -ENOENT;
4003
4004	if (!obj->loaded) {
4005		pr_warning("object not yet loaded; load it first\n");
4006		return -ENOENT;
4007	}
4008
4009	err = make_dir(path);
4010	if (err)
4011		return err;
4012
4013	bpf_object__for_each_map(map, obj) {
4014		char buf[PATH_MAX];
4015		int len;
4016
4017		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4018			       bpf_map__name(map));
4019		if (len < 0) {
4020			err = -EINVAL;
4021			goto err_unpin_maps;
4022		} else if (len >= PATH_MAX) {
4023			err = -ENAMETOOLONG;
4024			goto err_unpin_maps;
4025		}
4026
4027		err = bpf_map__pin(map, buf);
4028		if (err)
4029			goto err_unpin_maps;
4030	}
4031
4032	return 0;
4033
4034err_unpin_maps:
4035	while ((map = bpf_map__prev(map, obj))) {
4036		char buf[PATH_MAX];
4037		int len;
4038
4039		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4040			       bpf_map__name(map));
4041		if (len < 0)
4042			continue;
4043		else if (len >= PATH_MAX)
4044			continue;
4045
4046		bpf_map__unpin(map, buf);
4047	}
4048
4049	return err;
4050}
4051
4052int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
4053{
4054	struct bpf_map *map;
4055	int err;
4056
4057	if (!obj)
4058		return -ENOENT;
4059
4060	bpf_object__for_each_map(map, obj) {
4061		char buf[PATH_MAX];
4062		int len;
4063
4064		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4065			       bpf_map__name(map));
4066		if (len < 0)
4067			return -EINVAL;
4068		else if (len >= PATH_MAX)
4069			return -ENAMETOOLONG;
4070
4071		err = bpf_map__unpin(map, buf);
4072		if (err)
4073			return err;
4074	}
4075
4076	return 0;
4077}
4078
4079int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
4080{
4081	struct bpf_program *prog;
4082	int err;
4083
4084	if (!obj)
4085		return -ENOENT;
4086
4087	if (!obj->loaded) {
4088		pr_warning("object not yet loaded; load it first\n");
4089		return -ENOENT;
4090	}
4091
4092	err = make_dir(path);
4093	if (err)
4094		return err;
4095
4096	bpf_object__for_each_program(prog, obj) {
4097		char buf[PATH_MAX];
4098		int len;
4099
4100		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4101			       prog->pin_name);
4102		if (len < 0) {
4103			err = -EINVAL;
4104			goto err_unpin_programs;
4105		} else if (len >= PATH_MAX) {
4106			err = -ENAMETOOLONG;
4107			goto err_unpin_programs;
4108		}
4109
4110		err = bpf_program__pin(prog, buf);
4111		if (err)
4112			goto err_unpin_programs;
4113	}
4114
4115	return 0;
4116
4117err_unpin_programs:
4118	while ((prog = bpf_program__prev(prog, obj))) {
4119		char buf[PATH_MAX];
4120		int len;
4121
4122		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4123			       prog->pin_name);
4124		if (len < 0)
4125			continue;
4126		else if (len >= PATH_MAX)
4127			continue;
4128
4129		bpf_program__unpin(prog, buf);
4130	}
4131
4132	return err;
4133}
4134
4135int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
4136{
4137	struct bpf_program *prog;
4138	int err;
4139
4140	if (!obj)
4141		return -ENOENT;
4142
4143	bpf_object__for_each_program(prog, obj) {
4144		char buf[PATH_MAX];
4145		int len;
4146
4147		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4148			       prog->pin_name);
4149		if (len < 0)
4150			return -EINVAL;
4151		else if (len >= PATH_MAX)
4152			return -ENAMETOOLONG;
4153
4154		err = bpf_program__unpin(prog, buf);
4155		if (err)
4156			return err;
4157	}
4158
4159	return 0;
4160}
4161
4162int bpf_object__pin(struct bpf_object *obj, const char *path)
4163{
4164	int err;
4165
4166	err = bpf_object__pin_maps(obj, path);
4167	if (err)
4168		return err;
4169
4170	err = bpf_object__pin_programs(obj, path);
4171	if (err) {
4172		bpf_object__unpin_maps(obj, path);
4173		return err;
4174	}
4175
4176	return 0;
4177}
4178
4179void bpf_object__close(struct bpf_object *obj)
4180{
4181	size_t i;
4182
4183	if (!obj)
4184		return;
4185
4186	if (obj->clear_priv)
4187		obj->clear_priv(obj, obj->priv);
4188
4189	bpf_object__elf_finish(obj);
4190	bpf_object__unload(obj);
4191	btf__free(obj->btf);
4192	btf_ext__free(obj->btf_ext);
4193
4194	for (i = 0; i < obj->nr_maps; i++) {
4195		zfree(&obj->maps[i].name);
4196		if (obj->maps[i].clear_priv)
4197			obj->maps[i].clear_priv(&obj->maps[i],
4198						obj->maps[i].priv);
4199		obj->maps[i].priv = NULL;
4200		obj->maps[i].clear_priv = NULL;
4201	}
4202
4203	zfree(&obj->sections.rodata);
4204	zfree(&obj->sections.data);
4205	zfree(&obj->maps);
4206	obj->nr_maps = 0;
4207
4208	if (obj->programs && obj->nr_programs) {
4209		for (i = 0; i < obj->nr_programs; i++)
4210			bpf_program__exit(&obj->programs[i]);
4211	}
4212	zfree(&obj->programs);
4213
4214	list_del(&obj->list);
4215	free(obj);
4216}
4217
4218struct bpf_object *
4219bpf_object__next(struct bpf_object *prev)
4220{
4221	struct bpf_object *next;
4222
4223	if (!prev)
4224		next = list_first_entry(&bpf_objects_list,
4225					struct bpf_object,
4226					list);
4227	else
4228		next = list_next_entry(prev, list);
4229
4230	/* Empty list is noticed here so don't need checking on entry. */
4231	if (&next->list == &bpf_objects_list)
4232		return NULL;
4233
4234	return next;
4235}
4236
4237const char *bpf_object__name(const struct bpf_object *obj)
4238{
4239	return obj ? obj->path : ERR_PTR(-EINVAL);
4240}
4241
4242unsigned int bpf_object__kversion(const struct bpf_object *obj)
4243{
4244	return obj ? obj->kern_version : 0;
4245}
4246
4247struct btf *bpf_object__btf(const struct bpf_object *obj)
4248{
4249	return obj ? obj->btf : NULL;
4250}
4251
4252int bpf_object__btf_fd(const struct bpf_object *obj)
4253{
4254	return obj->btf ? btf__fd(obj->btf) : -1;
4255}
4256
4257int bpf_object__set_priv(struct bpf_object *obj, void *priv,
4258			 bpf_object_clear_priv_t clear_priv)
4259{
4260	if (obj->priv && obj->clear_priv)
4261		obj->clear_priv(obj, obj->priv);
4262
4263	obj->priv = priv;
4264	obj->clear_priv = clear_priv;
4265	return 0;
4266}
4267
4268void *bpf_object__priv(const struct bpf_object *obj)
4269{
4270	return obj ? obj->priv : ERR_PTR(-EINVAL);
4271}
4272
4273static struct bpf_program *
4274__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
4275		    bool forward)
4276{
4277	size_t nr_programs = obj->nr_programs;
4278	ssize_t idx;
4279
4280	if (!nr_programs)
4281		return NULL;
4282
4283	if (!p)
4284		/* Iter from the beginning */
4285		return forward ? &obj->programs[0] :
4286			&obj->programs[nr_programs - 1];
4287
4288	if (p->obj != obj) {
4289		pr_warning("error: program handler doesn't match object\n");
4290		return NULL;
4291	}
4292
4293	idx = (p - obj->programs) + (forward ? 1 : -1);
4294	if (idx >= obj->nr_programs || idx < 0)
4295		return NULL;
4296	return &obj->programs[idx];
4297}
4298
4299struct bpf_program *
4300bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
4301{
4302	struct bpf_program *prog = prev;
4303
4304	do {
4305		prog = __bpf_program__iter(prog, obj, true);
4306	} while (prog && bpf_program__is_function_storage(prog, obj));
4307
4308	return prog;
4309}
4310
4311struct bpf_program *
4312bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
4313{
4314	struct bpf_program *prog = next;
4315
4316	do {
4317		prog = __bpf_program__iter(prog, obj, false);
4318	} while (prog && bpf_program__is_function_storage(prog, obj));
4319
4320	return prog;
4321}
4322
4323int bpf_program__set_priv(struct bpf_program *prog, void *priv,
4324			  bpf_program_clear_priv_t clear_priv)
4325{
4326	if (prog->priv && prog->clear_priv)
4327		prog->clear_priv(prog, prog->priv);
4328
4329	prog->priv = priv;
4330	prog->clear_priv = clear_priv;
4331	return 0;
4332}
4333
4334void *bpf_program__priv(const struct bpf_program *prog)
4335{
4336	return prog ? prog->priv : ERR_PTR(-EINVAL);
4337}
4338
4339void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
4340{
4341	prog->prog_ifindex = ifindex;
4342}
4343
4344const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
4345{
4346	const char *title;
4347
4348	title = prog->section_name;
4349	if (needs_copy) {
4350		title = strdup(title);
4351		if (!title) {
4352			pr_warning("failed to strdup program title\n");
4353			return ERR_PTR(-ENOMEM);
4354		}
4355	}
4356
4357	return title;
4358}
4359
4360int bpf_program__fd(const struct bpf_program *prog)
4361{
4362	return bpf_program__nth_fd(prog, 0);
4363}
4364
4365int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
4366			  bpf_program_prep_t prep)
4367{
4368	int *instances_fds;
4369
4370	if (nr_instances <= 0 || !prep)
4371		return -EINVAL;
4372
4373	if (prog->instances.nr > 0 || prog->instances.fds) {
4374		pr_warning("Can't set pre-processor after loading\n");
4375		return -EINVAL;
4376	}
4377
4378	instances_fds = malloc(sizeof(int) * nr_instances);
4379	if (!instances_fds) {
4380		pr_warning("alloc memory failed for fds\n");
4381		return -ENOMEM;
4382	}
4383
4384	/* fill all fd with -1 */
4385	memset(instances_fds, -1, sizeof(int) * nr_instances);
4386
4387	prog->instances.nr = nr_instances;
4388	prog->instances.fds = instances_fds;
4389	prog->preprocessor = prep;
4390	return 0;
4391}
4392
4393int bpf_program__nth_fd(const struct bpf_program *prog, int n)
4394{
4395	int fd;
4396
4397	if (!prog)
4398		return -EINVAL;
4399
4400	if (n >= prog->instances.nr || n < 0) {
4401		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
4402			   n, prog->section_name, prog->instances.nr);
4403		return -EINVAL;
4404	}
4405
4406	fd = prog->instances.fds[n];
4407	if (fd < 0) {
4408		pr_warning("%dth instance of program '%s' is invalid\n",
4409			   n, prog->section_name);
4410		return -ENOENT;
4411	}
4412
4413	return fd;
4414}
4415
4416void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
4417{
4418	prog->type = type;
4419}
4420
4421static bool bpf_program__is_type(const struct bpf_program *prog,
4422				 enum bpf_prog_type type)
4423{
4424	return prog ? (prog->type == type) : false;
4425}
4426
4427#define BPF_PROG_TYPE_FNS(NAME, TYPE)				\
4428int bpf_program__set_##NAME(struct bpf_program *prog)		\
4429{								\
4430	if (!prog)						\
4431		return -EINVAL;					\
4432	bpf_program__set_type(prog, TYPE);			\
4433	return 0;						\
4434}								\
4435								\
4436bool bpf_program__is_##NAME(const struct bpf_program *prog)	\
4437{								\
4438	return bpf_program__is_type(prog, TYPE);		\
4439}								\
4440
4441BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
4442BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
4443BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
4444BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
4445BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
4446BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
4447BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
4448BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
4449
4450void bpf_program__set_expected_attach_type(struct bpf_program *prog,
4451					   enum bpf_attach_type type)
4452{
4453	prog->expected_attach_type = type;
4454}
4455
4456#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
4457	{ string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
4458
4459/* Programs that can NOT be attached. */
4460#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
4461
4462/* Programs that can be attached. */
4463#define BPF_APROG_SEC(string, ptype, atype) \
4464	BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
4465
4466/* Programs that must specify expected attach type at load time. */
4467#define BPF_EAPROG_SEC(string, ptype, eatype) \
4468	BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
4469
4470/* Programs that can be attached but attach type can't be identified by section
4471 * name. Kept for backward compatibility.
4472 */
4473#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
4474
4475static const struct {
4476	const char *sec;
4477	size_t len;
4478	enum bpf_prog_type prog_type;
4479	enum bpf_attach_type expected_attach_type;
4480	int is_attachable;
4481	enum bpf_attach_type attach_type;
4482} section_names[] = {
4483	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
4484	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
4485	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
4486	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
4487	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
4488	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
4489	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
4490	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
4491	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
4492	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
4493	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
4494	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
4495	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
4496	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
4497						BPF_CGROUP_INET_INGRESS),
4498	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
4499						BPF_CGROUP_INET_EGRESS),
4500	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
4501	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
4502						BPF_CGROUP_INET_SOCK_CREATE),
4503	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
4504						BPF_CGROUP_INET4_POST_BIND),
4505	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
4506						BPF_CGROUP_INET6_POST_BIND),
4507	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
4508						BPF_CGROUP_DEVICE),
4509	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
4510						BPF_CGROUP_SOCK_OPS),
4511	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
4512						BPF_SK_SKB_STREAM_PARSER),
4513	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
4514						BPF_SK_SKB_STREAM_VERDICT),
4515	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
4516	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
4517						BPF_SK_MSG_VERDICT),
4518	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
4519						BPF_LIRC_MODE2),
4520	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
4521						BPF_FLOW_DISSECTOR),
4522	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4523						BPF_CGROUP_INET4_BIND),
4524	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4525						BPF_CGROUP_INET6_BIND),
4526	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4527						BPF_CGROUP_INET4_CONNECT),
4528	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4529						BPF_CGROUP_INET6_CONNECT),
4530	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4531						BPF_CGROUP_UDP4_SENDMSG),
4532	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4533						BPF_CGROUP_UDP6_SENDMSG),
4534	BPF_EAPROG_SEC("cgroup/recvmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4535						BPF_CGROUP_UDP4_RECVMSG),
4536	BPF_EAPROG_SEC("cgroup/recvmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4537						BPF_CGROUP_UDP6_RECVMSG),
4538	BPF_EAPROG_SEC("cgroup/sysctl",		BPF_PROG_TYPE_CGROUP_SYSCTL,
4539						BPF_CGROUP_SYSCTL),
4540	BPF_EAPROG_SEC("cgroup/getsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
4541						BPF_CGROUP_GETSOCKOPT),
4542	BPF_EAPROG_SEC("cgroup/setsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
4543						BPF_CGROUP_SETSOCKOPT),
4544};
4545
4546#undef BPF_PROG_SEC_IMPL
4547#undef BPF_PROG_SEC
4548#undef BPF_APROG_SEC
4549#undef BPF_EAPROG_SEC
4550#undef BPF_APROG_COMPAT
4551
4552#define MAX_TYPE_NAME_SIZE 32
4553
4554static char *libbpf_get_type_names(bool attach_type)
4555{
4556	int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
4557	char *buf;
4558
4559	buf = malloc(len);
4560	if (!buf)
4561		return NULL;
4562
4563	buf[0] = '\0';
4564	/* Forge string buf with all available names */
4565	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4566		if (attach_type && !section_names[i].is_attachable)
4567			continue;
4568
4569		if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
4570			free(buf);
4571			return NULL;
4572		}
4573		strcat(buf, " ");
4574		strcat(buf, section_names[i].sec);
4575	}
4576
4577	return buf;
4578}
4579
4580int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
4581			     enum bpf_attach_type *expected_attach_type)
4582{
4583	char *type_names;
4584	int i;
4585
4586	if (!name)
4587		return -EINVAL;
4588
4589	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4590		if (strncmp(name, section_names[i].sec, section_names[i].len))
4591			continue;
4592		*prog_type = section_names[i].prog_type;
4593		*expected_attach_type = section_names[i].expected_attach_type;
4594		return 0;
4595	}
4596	pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
4597	type_names = libbpf_get_type_names(false);
4598	if (type_names != NULL) {
4599		pr_info("supported section(type) names are:%s\n", type_names);
4600		free(type_names);
4601	}
4602
4603	return -EINVAL;
4604}
4605
4606int libbpf_attach_type_by_name(const char *name,
4607			       enum bpf_attach_type *attach_type)
4608{
4609	char *type_names;
4610	int i;
4611
4612	if (!name)
4613		return -EINVAL;
4614
4615	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4616		if (strncmp(name, section_names[i].sec, section_names[i].len))
4617			continue;
4618		if (!section_names[i].is_attachable)
4619			return -EINVAL;
4620		*attach_type = section_names[i].attach_type;
4621		return 0;
4622	}
4623	pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
4624	type_names = libbpf_get_type_names(true);
4625	if (type_names != NULL) {
4626		pr_info("attachable section(type) names are:%s\n", type_names);
4627		free(type_names);
4628	}
4629
4630	return -EINVAL;
4631}
4632
4633static int
4634bpf_program__identify_section(struct bpf_program *prog,
4635			      enum bpf_prog_type *prog_type,
4636			      enum bpf_attach_type *expected_attach_type)
4637{
4638	return libbpf_prog_type_by_name(prog->section_name, prog_type,
4639					expected_attach_type);
4640}
4641
4642int bpf_map__fd(const struct bpf_map *map)
4643{
4644	return map ? map->fd : -EINVAL;
4645}
4646
4647const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
4648{
4649	return map ? &map->def : ERR_PTR(-EINVAL);
4650}
4651
4652const char *bpf_map__name(const struct bpf_map *map)
4653{
4654	return map ? map->name : NULL;
4655}
4656
4657__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
4658{
4659	return map ? map->btf_key_type_id : 0;
4660}
4661
4662__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
4663{
4664	return map ? map->btf_value_type_id : 0;
4665}
4666
4667int bpf_map__set_priv(struct bpf_map *map, void *priv,
4668		     bpf_map_clear_priv_t clear_priv)
4669{
4670	if (!map)
4671		return -EINVAL;
4672
4673	if (map->priv) {
4674		if (map->clear_priv)
4675			map->clear_priv(map, map->priv);
4676	}
4677
4678	map->priv = priv;
4679	map->clear_priv = clear_priv;
4680	return 0;
4681}
4682
4683void *bpf_map__priv(const struct bpf_map *map)
4684{
4685	return map ? map->priv : ERR_PTR(-EINVAL);
4686}
4687
4688bool bpf_map__is_offload_neutral(const struct bpf_map *map)
4689{
4690	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
4691}
4692
4693bool bpf_map__is_internal(const struct bpf_map *map)
4694{
4695	return map->libbpf_type != LIBBPF_MAP_UNSPEC;
4696}
4697
4698void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
4699{
4700	map->map_ifindex = ifindex;
4701}
4702
4703int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
4704{
4705	if (!bpf_map_type__is_map_in_map(map->def.type)) {
4706		pr_warning("error: unsupported map type\n");
4707		return -EINVAL;
4708	}
4709	if (map->inner_map_fd != -1) {
4710		pr_warning("error: inner_map_fd already specified\n");
4711		return -EINVAL;
4712	}
4713	map->inner_map_fd = fd;
4714	return 0;
4715}
4716
4717static struct bpf_map *
4718__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
4719{
4720	ssize_t idx;
4721	struct bpf_map *s, *e;
4722
4723	if (!obj || !obj->maps)
4724		return NULL;
4725
4726	s = obj->maps;
4727	e = obj->maps + obj->nr_maps;
4728
4729	if ((m < s) || (m >= e)) {
4730		pr_warning("error in %s: map handler doesn't belong to object\n",
4731			   __func__);
4732		return NULL;
4733	}
4734
4735	idx = (m - obj->maps) + i;
4736	if (idx >= obj->nr_maps || idx < 0)
4737		return NULL;
4738	return &obj->maps[idx];
4739}
4740
4741struct bpf_map *
4742bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
4743{
4744	if (prev == NULL)
4745		return obj->maps;
4746
4747	return __bpf_map__iter(prev, obj, 1);
4748}
4749
4750struct bpf_map *
4751bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
4752{
4753	if (next == NULL) {
4754		if (!obj->nr_maps)
4755			return NULL;
4756		return obj->maps + obj->nr_maps - 1;
4757	}
4758
4759	return __bpf_map__iter(next, obj, -1);
4760}
4761
4762struct bpf_map *
4763bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
4764{
4765	struct bpf_map *pos;
4766
4767	bpf_object__for_each_map(pos, obj) {
4768		if (pos->name && !strcmp(pos->name, name))
4769			return pos;
4770	}
4771	return NULL;
4772}
4773
4774int
4775bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
4776{
4777	return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
4778}
4779
4780struct bpf_map *
4781bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
4782{
4783	return ERR_PTR(-ENOTSUP);
4784}
4785
4786long libbpf_get_error(const void *ptr)
4787{
4788	return PTR_ERR_OR_ZERO(ptr);
4789}
4790
4791int bpf_prog_load(const char *file, enum bpf_prog_type type,
4792		  struct bpf_object **pobj, int *prog_fd)
4793{
4794	struct bpf_prog_load_attr attr;
4795
4796	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
4797	attr.file = file;
4798	attr.prog_type = type;
4799	attr.expected_attach_type = 0;
4800
4801	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
4802}
4803
4804int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
4805			struct bpf_object **pobj, int *prog_fd)
4806{
4807	struct bpf_object_open_attr open_attr = {};
4808	struct bpf_program *prog, *first_prog = NULL;
4809	enum bpf_attach_type expected_attach_type;
4810	enum bpf_prog_type prog_type;
4811	struct bpf_object *obj;
4812	struct bpf_map *map;
4813	int err;
4814
4815	if (!attr)
4816		return -EINVAL;
4817	if (!attr->file)
4818		return -EINVAL;
4819
4820	open_attr.file = attr->file;
4821	open_attr.prog_type = attr->prog_type;
4822
4823	obj = bpf_object__open_xattr(&open_attr);
4824	if (IS_ERR_OR_NULL(obj))
4825		return -ENOENT;
4826
4827	bpf_object__for_each_program(prog, obj) {
4828		/*
4829		 * If type is not specified, try to guess it based on
4830		 * section name.
4831		 */
4832		prog_type = attr->prog_type;
4833		prog->prog_ifindex = attr->ifindex;
4834		expected_attach_type = attr->expected_attach_type;
4835		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
4836			err = bpf_program__identify_section(prog, &prog_type,
4837							    &expected_attach_type);
4838			if (err < 0) {
4839				bpf_object__close(obj);
4840				return -EINVAL;
4841			}
4842		}
4843
4844		bpf_program__set_type(prog, prog_type);
4845		bpf_program__set_expected_attach_type(prog,
4846						      expected_attach_type);
4847
4848		prog->log_level = attr->log_level;
4849		prog->prog_flags = attr->prog_flags;
4850		if (!first_prog)
4851			first_prog = prog;
4852	}
4853
4854	bpf_object__for_each_map(map, obj) {
4855		if (!bpf_map__is_offload_neutral(map))
4856			map->map_ifindex = attr->ifindex;
4857	}
4858
4859	if (!first_prog) {
4860		pr_warning("object file doesn't contain bpf program\n");
4861		bpf_object__close(obj);
4862		return -ENOENT;
4863	}
4864
4865	err = bpf_object__load(obj);
4866	if (err) {
4867		bpf_object__close(obj);
4868		return -EINVAL;
4869	}
4870
4871	*pobj = obj;
4872	*prog_fd = bpf_program__fd(first_prog);
4873	return 0;
4874}
4875
4876struct bpf_link {
4877	int (*destroy)(struct bpf_link *link);
4878};
4879
4880int bpf_link__destroy(struct bpf_link *link)
4881{
4882	int err;
4883
4884	if (!link)
4885		return 0;
4886
4887	err = link->destroy(link);
4888	free(link);
4889
4890	return err;
4891}
4892
4893struct bpf_link_fd {
4894	struct bpf_link link; /* has to be at the top of struct */
4895	int fd; /* hook FD */
4896};
4897
4898static int bpf_link__destroy_perf_event(struct bpf_link *link)
4899{
4900	struct bpf_link_fd *l = (void *)link;
4901	int err;
4902
4903	err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
4904	if (err)
4905		err = -errno;
4906
4907	close(l->fd);
4908	return err;
4909}
4910
4911struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
4912						int pfd)
4913{
4914	char errmsg[STRERR_BUFSIZE];
4915	struct bpf_link_fd *link;
4916	int prog_fd, err;
4917
4918	if (pfd < 0) {
4919		pr_warning("program '%s': invalid perf event FD %d\n",
4920			   bpf_program__title(prog, false), pfd);
4921		return ERR_PTR(-EINVAL);
4922	}
4923	prog_fd = bpf_program__fd(prog);
4924	if (prog_fd < 0) {
4925		pr_warning("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
4926			   bpf_program__title(prog, false));
4927		return ERR_PTR(-EINVAL);
4928	}
4929
4930	link = malloc(sizeof(*link));
4931	if (!link)
4932		return ERR_PTR(-ENOMEM);
4933	link->link.destroy = &bpf_link__destroy_perf_event;
4934	link->fd = pfd;
4935
4936	if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
4937		err = -errno;
4938		free(link);
4939		pr_warning("program '%s': failed to attach to pfd %d: %s\n",
4940			   bpf_program__title(prog, false), pfd,
4941			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
4942		return ERR_PTR(err);
4943	}
4944	if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
4945		err = -errno;
4946		free(link);
4947		pr_warning("program '%s': failed to enable pfd %d: %s\n",
4948			   bpf_program__title(prog, false), pfd,
4949			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
4950		return ERR_PTR(err);
4951	}
4952	return (struct bpf_link *)link;
4953}
4954
4955/*
4956 * this function is expected to parse integer in the range of [0, 2^31-1] from
4957 * given file using scanf format string fmt. If actual parsed value is
4958 * negative, the result might be indistinguishable from error
4959 */
4960static int parse_uint_from_file(const char *file, const char *fmt)
4961{
4962	char buf[STRERR_BUFSIZE];
4963	int err, ret;
4964	FILE *f;
4965
4966	f = fopen(file, "r");
4967	if (!f) {
4968		err = -errno;
4969		pr_debug("failed to open '%s': %s\n", file,
4970			 libbpf_strerror_r(err, buf, sizeof(buf)));
4971		return err;
4972	}
4973	err = fscanf(f, fmt, &ret);
4974	if (err != 1) {
4975		err = err == EOF ? -EIO : -errno;
4976		pr_debug("failed to parse '%s': %s\n", file,
4977			libbpf_strerror_r(err, buf, sizeof(buf)));
4978		fclose(f);
4979		return err;
4980	}
4981	fclose(f);
4982	return ret;
4983}
4984
4985static int determine_kprobe_perf_type(void)
4986{
4987	const char *file = "/sys/bus/event_source/devices/kprobe/type";
4988
4989	return parse_uint_from_file(file, "%d\n");
4990}
4991
4992static int determine_uprobe_perf_type(void)
4993{
4994	const char *file = "/sys/bus/event_source/devices/uprobe/type";
4995
4996	return parse_uint_from_file(file, "%d\n");
4997}
4998
4999static int determine_kprobe_retprobe_bit(void)
5000{
5001	const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
5002
5003	return parse_uint_from_file(file, "config:%d\n");
5004}
5005
5006static int determine_uprobe_retprobe_bit(void)
5007{
5008	const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
5009
5010	return parse_uint_from_file(file, "config:%d\n");
5011}
5012
5013static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
5014				 uint64_t offset, int pid)
5015{
5016	struct perf_event_attr attr = {};
5017	char errmsg[STRERR_BUFSIZE];
5018	int type, pfd, err;
5019
5020	type = uprobe ? determine_uprobe_perf_type()
5021		      : determine_kprobe_perf_type();
5022	if (type < 0) {
5023		pr_warning("failed to determine %s perf type: %s\n",
5024			   uprobe ? "uprobe" : "kprobe",
5025			   libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
5026		return type;
5027	}
5028	if (retprobe) {
5029		int bit = uprobe ? determine_uprobe_retprobe_bit()
5030				 : determine_kprobe_retprobe_bit();
5031
5032		if (bit < 0) {
5033			pr_warning("failed to determine %s retprobe bit: %s\n",
5034				   uprobe ? "uprobe" : "kprobe",
5035				   libbpf_strerror_r(bit, errmsg,
5036						     sizeof(errmsg)));
5037			return bit;
5038		}
5039		attr.config |= 1 << bit;
5040	}
5041	attr.size = sizeof(attr);
5042	attr.type = type;
5043	attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
5044	attr.config2 = offset;		 /* kprobe_addr or probe_offset */
5045
5046	/* pid filter is meaningful only for uprobes */
5047	pfd = syscall(__NR_perf_event_open, &attr,
5048		      pid < 0 ? -1 : pid /* pid */,
5049		      pid == -1 ? 0 : -1 /* cpu */,
5050		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
5051	if (pfd < 0) {
5052		err = -errno;
5053		pr_warning("%s perf_event_open() failed: %s\n",
5054			   uprobe ? "uprobe" : "kprobe",
5055			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5056		return err;
5057	}
5058	return pfd;
5059}
5060
5061struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
5062					    bool retprobe,
5063					    const char *func_name)
5064{
5065	char errmsg[STRERR_BUFSIZE];
5066	struct bpf_link *link;
5067	int pfd, err;
5068
5069	pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
5070				    0 /* offset */, -1 /* pid */);
5071	if (pfd < 0) {
5072		pr_warning("program '%s': failed to create %s '%s' perf event: %s\n",
5073			   bpf_program__title(prog, false),
5074			   retprobe ? "kretprobe" : "kprobe", func_name,
5075			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5076		return ERR_PTR(pfd);
5077	}
5078	link = bpf_program__attach_perf_event(prog, pfd);
5079	if (IS_ERR(link)) {
5080		close(pfd);
5081		err = PTR_ERR(link);
5082		pr_warning("program '%s': failed to attach to %s '%s': %s\n",
5083			   bpf_program__title(prog, false),
5084			   retprobe ? "kretprobe" : "kprobe", func_name,
5085			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5086		return link;
5087	}
5088	return link;
5089}
5090
5091struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
5092					    bool retprobe, pid_t pid,
5093					    const char *binary_path,
5094					    size_t func_offset)
5095{
5096	char errmsg[STRERR_BUFSIZE];
5097	struct bpf_link *link;
5098	int pfd, err;
5099
5100	pfd = perf_event_open_probe(true /* uprobe */, retprobe,
5101				    binary_path, func_offset, pid);
5102	if (pfd < 0) {
5103		pr_warning("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
5104			   bpf_program__title(prog, false),
5105			   retprobe ? "uretprobe" : "uprobe",
5106			   binary_path, func_offset,
5107			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5108		return ERR_PTR(pfd);
5109	}
5110	link = bpf_program__attach_perf_event(prog, pfd);
5111	if (IS_ERR(link)) {
5112		close(pfd);
5113		err = PTR_ERR(link);
5114		pr_warning("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
5115			   bpf_program__title(prog, false),
5116			   retprobe ? "uretprobe" : "uprobe",
5117			   binary_path, func_offset,
5118			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5119		return link;
5120	}
5121	return link;
5122}
5123
5124static int determine_tracepoint_id(const char *tp_category,
5125				   const char *tp_name)
5126{
5127	char file[PATH_MAX];
5128	int ret;
5129
5130	ret = snprintf(file, sizeof(file),
5131		       "/sys/kernel/debug/tracing/events/%s/%s/id",
5132		       tp_category, tp_name);
5133	if (ret < 0)
5134		return -errno;
5135	if (ret >= sizeof(file)) {
5136		pr_debug("tracepoint %s/%s path is too long\n",
5137			 tp_category, tp_name);
5138		return -E2BIG;
5139	}
5140	return parse_uint_from_file(file, "%d\n");
5141}
5142
5143static int perf_event_open_tracepoint(const char *tp_category,
5144				      const char *tp_name)
5145{
5146	struct perf_event_attr attr = {};
5147	char errmsg[STRERR_BUFSIZE];
5148	int tp_id, pfd, err;
5149
5150	tp_id = determine_tracepoint_id(tp_category, tp_name);
5151	if (tp_id < 0) {
5152		pr_warning("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
5153			   tp_category, tp_name,
5154			   libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
5155		return tp_id;
5156	}
5157
5158	attr.type = PERF_TYPE_TRACEPOINT;
5159	attr.size = sizeof(attr);
5160	attr.config = tp_id;
5161
5162	pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
5163		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
5164	if (pfd < 0) {
5165		err = -errno;
5166		pr_warning("tracepoint '%s/%s' perf_event_open() failed: %s\n",
5167			   tp_category, tp_name,
5168			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5169		return err;
5170	}
5171	return pfd;
5172}
5173
5174struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
5175						const char *tp_category,
5176						const char *tp_name)
5177{
5178	char errmsg[STRERR_BUFSIZE];
5179	struct bpf_link *link;
5180	int pfd, err;
5181
5182	pfd = perf_event_open_tracepoint(tp_category, tp_name);
5183	if (pfd < 0) {
5184		pr_warning("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
5185			   bpf_program__title(prog, false),
5186			   tp_category, tp_name,
5187			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5188		return ERR_PTR(pfd);
5189	}
5190	link = bpf_program__attach_perf_event(prog, pfd);
5191	if (IS_ERR(link)) {
5192		close(pfd);
5193		err = PTR_ERR(link);
5194		pr_warning("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
5195			   bpf_program__title(prog, false),
5196			   tp_category, tp_name,
5197			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5198		return link;
5199	}
5200	return link;
5201}
5202
5203static int bpf_link__destroy_fd(struct bpf_link *link)
5204{
5205	struct bpf_link_fd *l = (void *)link;
5206
5207	return close(l->fd);
5208}
5209
5210struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
5211						    const char *tp_name)
5212{
5213	char errmsg[STRERR_BUFSIZE];
5214	struct bpf_link_fd *link;
5215	int prog_fd, pfd;
5216
5217	prog_fd = bpf_program__fd(prog);
5218	if (prog_fd < 0) {
5219		pr_warning("program '%s': can't attach before loaded\n",
5220			   bpf_program__title(prog, false));
5221		return ERR_PTR(-EINVAL);
5222	}
5223
5224	link = malloc(sizeof(*link));
5225	if (!link)
5226		return ERR_PTR(-ENOMEM);
5227	link->link.destroy = &bpf_link__destroy_fd;
5228
5229	pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
5230	if (pfd < 0) {
5231		pfd = -errno;
5232		free(link);
5233		pr_warning("program '%s': failed to attach to raw tracepoint '%s': %s\n",
5234			   bpf_program__title(prog, false), tp_name,
5235			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5236		return ERR_PTR(pfd);
5237	}
5238	link->fd = pfd;
5239	return (struct bpf_link *)link;
5240}
5241
5242enum bpf_perf_event_ret
5243bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
5244			   void **copy_mem, size_t *copy_size,
5245			   bpf_perf_event_print_t fn, void *private_data)
5246{
5247	struct perf_event_mmap_page *header = mmap_mem;
5248	__u64 data_head = ring_buffer_read_head(header);
5249	__u64 data_tail = header->data_tail;
5250	void *base = ((__u8 *)header) + page_size;
5251	int ret = LIBBPF_PERF_EVENT_CONT;
5252	struct perf_event_header *ehdr;
5253	size_t ehdr_size;
5254
5255	while (data_head != data_tail) {
5256		ehdr = base + (data_tail & (mmap_size - 1));
5257		ehdr_size = ehdr->size;
5258
5259		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
5260			void *copy_start = ehdr;
5261			size_t len_first = base + mmap_size - copy_start;
5262			size_t len_secnd = ehdr_size - len_first;
5263
5264			if (*copy_size < ehdr_size) {
5265				free(*copy_mem);
5266				*copy_mem = malloc(ehdr_size);
5267				if (!*copy_mem) {
5268					*copy_size = 0;
5269					ret = LIBBPF_PERF_EVENT_ERROR;
5270					break;
5271				}
5272				*copy_size = ehdr_size;
5273			}
5274
5275			memcpy(*copy_mem, copy_start, len_first);
5276			memcpy(*copy_mem + len_first, base, len_secnd);
5277			ehdr = *copy_mem;
5278		}
5279
5280		ret = fn(ehdr, private_data);
5281		data_tail += ehdr_size;
5282		if (ret != LIBBPF_PERF_EVENT_CONT)
5283			break;
5284	}
5285
5286	ring_buffer_write_tail(header, data_tail);
5287	return ret;
5288}
5289
5290struct perf_buffer;
5291
5292struct perf_buffer_params {
5293	struct perf_event_attr *attr;
5294	/* if event_cb is specified, it takes precendence */
5295	perf_buffer_event_fn event_cb;
5296	/* sample_cb and lost_cb are higher-level common-case callbacks */
5297	perf_buffer_sample_fn sample_cb;
5298	perf_buffer_lost_fn lost_cb;
5299	void *ctx;
5300	int cpu_cnt;
5301	int *cpus;
5302	int *map_keys;
5303};
5304
5305struct perf_cpu_buf {
5306	struct perf_buffer *pb;
5307	void *base; /* mmap()'ed memory */
5308	void *buf; /* for reconstructing segmented data */
5309	size_t buf_size;
5310	int fd;
5311	int cpu;
5312	int map_key;
5313};
5314
5315struct perf_buffer {
5316	perf_buffer_event_fn event_cb;
5317	perf_buffer_sample_fn sample_cb;
5318	perf_buffer_lost_fn lost_cb;
5319	void *ctx; /* passed into callbacks */
5320
5321	size_t page_size;
5322	size_t mmap_size;
5323	struct perf_cpu_buf **cpu_bufs;
5324	struct epoll_event *events;
5325	int cpu_cnt;
5326	int epoll_fd; /* perf event FD */
5327	int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
5328};
5329
5330static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
5331				      struct perf_cpu_buf *cpu_buf)
5332{
5333	if (!cpu_buf)
5334		return;
5335	if (cpu_buf->base &&
5336	    munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
5337		pr_warning("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
5338	if (cpu_buf->fd >= 0) {
5339		ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
5340		close(cpu_buf->fd);
5341	}
5342	free(cpu_buf->buf);
5343	free(cpu_buf);
5344}
5345
5346void perf_buffer__free(struct perf_buffer *pb)
5347{
5348	int i;
5349
5350	if (!pb)
5351		return;
5352	if (pb->cpu_bufs) {
5353		for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
5354			struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
5355
5356			bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
5357			perf_buffer__free_cpu_buf(pb, cpu_buf);
5358		}
5359		free(pb->cpu_bufs);
5360	}
5361	if (pb->epoll_fd >= 0)
5362		close(pb->epoll_fd);
5363	free(pb->events);
5364	free(pb);
5365}
5366
5367static struct perf_cpu_buf *
5368perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
5369			  int cpu, int map_key)
5370{
5371	struct perf_cpu_buf *cpu_buf;
5372	char msg[STRERR_BUFSIZE];
5373	int err;
5374
5375	cpu_buf = calloc(1, sizeof(*cpu_buf));
5376	if (!cpu_buf)
5377		return ERR_PTR(-ENOMEM);
5378
5379	cpu_buf->pb = pb;
5380	cpu_buf->cpu = cpu;
5381	cpu_buf->map_key = map_key;
5382
5383	cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
5384			      -1, PERF_FLAG_FD_CLOEXEC);
5385	if (cpu_buf->fd < 0) {
5386		err = -errno;
5387		pr_warning("failed to open perf buffer event on cpu #%d: %s\n",
5388			   cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5389		goto error;
5390	}
5391
5392	cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
5393			     PROT_READ | PROT_WRITE, MAP_SHARED,
5394			     cpu_buf->fd, 0);
5395	if (cpu_buf->base == MAP_FAILED) {
5396		cpu_buf->base = NULL;
5397		err = -errno;
5398		pr_warning("failed to mmap perf buffer on cpu #%d: %s\n",
5399			   cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5400		goto error;
5401	}
5402
5403	if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
5404		err = -errno;
5405		pr_warning("failed to enable perf buffer event on cpu #%d: %s\n",
5406			   cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5407		goto error;
5408	}
5409
5410	return cpu_buf;
5411
5412error:
5413	perf_buffer__free_cpu_buf(pb, cpu_buf);
5414	return (struct perf_cpu_buf *)ERR_PTR(err);
5415}
5416
5417static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
5418					      struct perf_buffer_params *p);
5419
5420struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
5421				     const struct perf_buffer_opts *opts)
5422{
5423	struct perf_buffer_params p = {};
5424	struct perf_event_attr attr = { 0, };
5425
5426	attr.config = PERF_COUNT_SW_BPF_OUTPUT,
5427	attr.type = PERF_TYPE_SOFTWARE;
5428	attr.sample_type = PERF_SAMPLE_RAW;
5429	attr.sample_period = 1;
5430	attr.wakeup_events = 1;
5431
5432	p.attr = &attr;
5433	p.sample_cb = opts ? opts->sample_cb : NULL;
5434	p.lost_cb = opts ? opts->lost_cb : NULL;
5435	p.ctx = opts ? opts->ctx : NULL;
5436
5437	return __perf_buffer__new(map_fd, page_cnt, &p);
5438}
5439
5440struct perf_buffer *
5441perf_buffer__new_raw(int map_fd, size_t page_cnt,
5442		     const struct perf_buffer_raw_opts *opts)
5443{
5444	struct perf_buffer_params p = {};
5445
5446	p.attr = opts->attr;
5447	p.event_cb = opts->event_cb;
5448	p.ctx = opts->ctx;
5449	p.cpu_cnt = opts->cpu_cnt;
5450	p.cpus = opts->cpus;
5451	p.map_keys = opts->map_keys;
5452
5453	return __perf_buffer__new(map_fd, page_cnt, &p);
5454}
5455
5456static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
5457					      struct perf_buffer_params *p)
5458{
5459	struct bpf_map_info map = {};
5460	char msg[STRERR_BUFSIZE];
5461	struct perf_buffer *pb;
5462	__u32 map_info_len;
5463	int err, i;
5464
5465	if (page_cnt & (page_cnt - 1)) {
5466		pr_warning("page count should be power of two, but is %zu\n",
5467			   page_cnt);
5468		return ERR_PTR(-EINVAL);
5469	}
5470
5471	map_info_len = sizeof(map);
5472	err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
5473	if (err) {
5474		err = -errno;
5475		pr_warning("failed to get map info for map FD %d: %s\n",
5476			   map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
5477		return ERR_PTR(err);
5478	}
5479
5480	if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
5481		pr_warning("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
5482			   map.name);
5483		return ERR_PTR(-EINVAL);
5484	}
5485
5486	pb = calloc(1, sizeof(*pb));
5487	if (!pb)
5488		return ERR_PTR(-ENOMEM);
5489
5490	pb->event_cb = p->event_cb;
5491	pb->sample_cb = p->sample_cb;
5492	pb->lost_cb = p->lost_cb;
5493	pb->ctx = p->ctx;
5494
5495	pb->page_size = getpagesize();
5496	pb->mmap_size = pb->page_size * page_cnt;
5497	pb->map_fd = map_fd;
5498
5499	pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
5500	if (pb->epoll_fd < 0) {
5501		err = -errno;
5502		pr_warning("failed to create epoll instance: %s\n",
5503			   libbpf_strerror_r(err, msg, sizeof(msg)));
5504		goto error;
5505	}
5506
5507	if (p->cpu_cnt > 0) {
5508		pb->cpu_cnt = p->cpu_cnt;
5509	} else {
5510		pb->cpu_cnt = libbpf_num_possible_cpus();
5511		if (pb->cpu_cnt < 0) {
5512			err = pb->cpu_cnt;
5513			goto error;
5514		}
5515		if (map.max_entries < pb->cpu_cnt)
5516			pb->cpu_cnt = map.max_entries;
5517	}
5518
5519	pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
5520	if (!pb->events) {
5521		err = -ENOMEM;
5522		pr_warning("failed to allocate events: out of memory\n");
5523		goto error;
5524	}
5525	pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
5526	if (!pb->cpu_bufs) {
5527		err = -ENOMEM;
5528		pr_warning("failed to allocate buffers: out of memory\n");
5529		goto error;
5530	}
5531
5532	for (i = 0; i < pb->cpu_cnt; i++) {
5533		struct perf_cpu_buf *cpu_buf;
5534		int cpu, map_key;
5535
5536		cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
5537		map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
5538
5539		cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
5540		if (IS_ERR(cpu_buf)) {
5541			err = PTR_ERR(cpu_buf);
5542			goto error;
5543		}
5544
5545		pb->cpu_bufs[i] = cpu_buf;
5546
5547		err = bpf_map_update_elem(pb->map_fd, &map_key,
5548					  &cpu_buf->fd, 0);
5549		if (err) {
5550			err = -errno;
5551			pr_warning("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
5552				   cpu, map_key, cpu_buf->fd,
5553				   libbpf_strerror_r(err, msg, sizeof(msg)));
5554			goto error;
5555		}
5556
5557		pb->events[i].events = EPOLLIN;
5558		pb->events[i].data.ptr = cpu_buf;
5559		if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
5560			      &pb->events[i]) < 0) {
5561			err = -errno;
5562			pr_warning("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
5563				   cpu, cpu_buf->fd,
5564				   libbpf_strerror_r(err, msg, sizeof(msg)));
5565			goto error;
5566		}
5567	}
5568
5569	return pb;
5570
5571error:
5572	if (pb)
5573		perf_buffer__free(pb);
5574	return ERR_PTR(err);
5575}
5576
5577struct perf_sample_raw {
5578	struct perf_event_header header;
5579	uint32_t size;
5580	char data[0];
5581};
5582
5583struct perf_sample_lost {
5584	struct perf_event_header header;
5585	uint64_t id;
5586	uint64_t lost;
5587	uint64_t sample_id;
5588};
5589
5590static enum bpf_perf_event_ret
5591perf_buffer__process_record(struct perf_event_header *e, void *ctx)
5592{
5593	struct perf_cpu_buf *cpu_buf = ctx;
5594	struct perf_buffer *pb = cpu_buf->pb;
5595	void *data = e;
5596
5597	/* user wants full control over parsing perf event */
5598	if (pb->event_cb)
5599		return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
5600
5601	switch (e->type) {
5602	case PERF_RECORD_SAMPLE: {
5603		struct perf_sample_raw *s = data;
5604
5605		if (pb->sample_cb)
5606			pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
5607		break;
5608	}
5609	case PERF_RECORD_LOST: {
5610		struct perf_sample_lost *s = data;
5611
5612		if (pb->lost_cb)
5613			pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
5614		break;
5615	}
5616	default:
5617		pr_warning("unknown perf sample type %d\n", e->type);
5618		return LIBBPF_PERF_EVENT_ERROR;
5619	}
5620	return LIBBPF_PERF_EVENT_CONT;
5621}
5622
5623static int perf_buffer__process_records(struct perf_buffer *pb,
5624					struct perf_cpu_buf *cpu_buf)
5625{
5626	enum bpf_perf_event_ret ret;
5627
5628	ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
5629					 pb->page_size, &cpu_buf->buf,
5630					 &cpu_buf->buf_size,
5631					 perf_buffer__process_record, cpu_buf);
5632	if (ret != LIBBPF_PERF_EVENT_CONT)
5633		return ret;
5634	return 0;
5635}
5636
5637int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
5638{
5639	int i, cnt, err;
5640
5641	cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
5642	for (i = 0; i < cnt; i++) {
5643		struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
5644
5645		err = perf_buffer__process_records(pb, cpu_buf);
5646		if (err) {
5647			pr_warning("error while processing records: %d\n", err);
5648			return err;
5649		}
5650	}
5651	return cnt < 0 ? -errno : cnt;
5652}
5653
5654struct bpf_prog_info_array_desc {
5655	int	array_offset;	/* e.g. offset of jited_prog_insns */
5656	int	count_offset;	/* e.g. offset of jited_prog_len */
5657	int	size_offset;	/* > 0: offset of rec size,
5658				 * < 0: fix size of -size_offset
5659				 */
5660};
5661
5662static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
5663	[BPF_PROG_INFO_JITED_INSNS] = {
5664		offsetof(struct bpf_prog_info, jited_prog_insns),
5665		offsetof(struct bpf_prog_info, jited_prog_len),
5666		-1,
5667	},
5668	[BPF_PROG_INFO_XLATED_INSNS] = {
5669		offsetof(struct bpf_prog_info, xlated_prog_insns),
5670		offsetof(struct bpf_prog_info, xlated_prog_len),
5671		-1,
5672	},
5673	[BPF_PROG_INFO_MAP_IDS] = {
5674		offsetof(struct bpf_prog_info, map_ids),
5675		offsetof(struct bpf_prog_info, nr_map_ids),
5676		-(int)sizeof(__u32),
5677	},
5678	[BPF_PROG_INFO_JITED_KSYMS] = {
5679		offsetof(struct bpf_prog_info, jited_ksyms),
5680		offsetof(struct bpf_prog_info, nr_jited_ksyms),
5681		-(int)sizeof(__u64),
5682	},
5683	[BPF_PROG_INFO_JITED_FUNC_LENS] = {
5684		offsetof(struct bpf_prog_info, jited_func_lens),
5685		offsetof(struct bpf_prog_info, nr_jited_func_lens),
5686		-(int)sizeof(__u32),
5687	},
5688	[BPF_PROG_INFO_FUNC_INFO] = {
5689		offsetof(struct bpf_prog_info, func_info),
5690		offsetof(struct bpf_prog_info, nr_func_info),
5691		offsetof(struct bpf_prog_info, func_info_rec_size),
5692	},
5693	[BPF_PROG_INFO_LINE_INFO] = {
5694		offsetof(struct bpf_prog_info, line_info),
5695		offsetof(struct bpf_prog_info, nr_line_info),
5696		offsetof(struct bpf_prog_info, line_info_rec_size),
5697	},
5698	[BPF_PROG_INFO_JITED_LINE_INFO] = {
5699		offsetof(struct bpf_prog_info, jited_line_info),
5700		offsetof(struct bpf_prog_info, nr_jited_line_info),
5701		offsetof(struct bpf_prog_info, jited_line_info_rec_size),
5702	},
5703	[BPF_PROG_INFO_PROG_TAGS] = {
5704		offsetof(struct bpf_prog_info, prog_tags),
5705		offsetof(struct bpf_prog_info, nr_prog_tags),
5706		-(int)sizeof(__u8) * BPF_TAG_SIZE,
5707	},
5708
5709};
5710
5711static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
5712{
5713	__u32 *array = (__u32 *)info;
5714
5715	if (offset >= 0)
5716		return array[offset / sizeof(__u32)];
5717	return -(int)offset;
5718}
5719
5720static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
5721{
5722	__u64 *array = (__u64 *)info;
5723
5724	if (offset >= 0)
5725		return array[offset / sizeof(__u64)];
5726	return -(int)offset;
5727}
5728
5729static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
5730					 __u32 val)
5731{
5732	__u32 *array = (__u32 *)info;
5733
5734	if (offset >= 0)
5735		array[offset / sizeof(__u32)] = val;
5736}
5737
5738static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
5739					 __u64 val)
5740{
5741	__u64 *array = (__u64 *)info;
5742
5743	if (offset >= 0)
5744		array[offset / sizeof(__u64)] = val;
5745}
5746
5747struct bpf_prog_info_linear *
5748bpf_program__get_prog_info_linear(int fd, __u64 arrays)
5749{
5750	struct bpf_prog_info_linear *info_linear;
5751	struct bpf_prog_info info = {};
5752	__u32 info_len = sizeof(info);
5753	__u32 data_len = 0;
5754	int i, err;
5755	void *ptr;
5756
5757	if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
5758		return ERR_PTR(-EINVAL);
5759
5760	/* step 1: get array dimensions */
5761	err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
5762	if (err) {
5763		pr_debug("can't get prog info: %s", strerror(errno));
5764		return ERR_PTR(-EFAULT);
5765	}
5766
5767	/* step 2: calculate total size of all arrays */
5768	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5769		bool include_array = (arrays & (1UL << i)) > 0;
5770		struct bpf_prog_info_array_desc *desc;
5771		__u32 count, size;
5772
5773		desc = bpf_prog_info_array_desc + i;
5774
5775		/* kernel is too old to support this field */
5776		if (info_len < desc->array_offset + sizeof(__u32) ||
5777		    info_len < desc->count_offset + sizeof(__u32) ||
5778		    (desc->size_offset > 0 && info_len < desc->size_offset))
5779			include_array = false;
5780
5781		if (!include_array) {
5782			arrays &= ~(1UL << i);	/* clear the bit */
5783			continue;
5784		}
5785
5786		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5787		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5788
5789		data_len += count * size;
5790	}
5791
5792	/* step 3: allocate continuous memory */
5793	data_len = roundup(data_len, sizeof(__u64));
5794	info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
5795	if (!info_linear)
5796		return ERR_PTR(-ENOMEM);
5797
5798	/* step 4: fill data to info_linear->info */
5799	info_linear->arrays = arrays;
5800	memset(&info_linear->info, 0, sizeof(info));
5801	ptr = info_linear->data;
5802
5803	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5804		struct bpf_prog_info_array_desc *desc;
5805		__u32 count, size;
5806
5807		if ((arrays & (1UL << i)) == 0)
5808			continue;
5809
5810		desc  = bpf_prog_info_array_desc + i;
5811		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5812		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5813		bpf_prog_info_set_offset_u32(&info_linear->info,
5814					     desc->count_offset, count);
5815		bpf_prog_info_set_offset_u32(&info_linear->info,
5816					     desc->size_offset, size);
5817		bpf_prog_info_set_offset_u64(&info_linear->info,
5818					     desc->array_offset,
5819					     ptr_to_u64(ptr));
5820		ptr += count * size;
5821	}
5822
5823	/* step 5: call syscall again to get required arrays */
5824	err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
5825	if (err) {
5826		pr_debug("can't get prog info: %s", strerror(errno));
5827		free(info_linear);
5828		return ERR_PTR(-EFAULT);
5829	}
5830
5831	/* step 6: verify the data */
5832	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5833		struct bpf_prog_info_array_desc *desc;
5834		__u32 v1, v2;
5835
5836		if ((arrays & (1UL << i)) == 0)
5837			continue;
5838
5839		desc = bpf_prog_info_array_desc + i;
5840		v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5841		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
5842						   desc->count_offset);
5843		if (v1 != v2)
5844			pr_warning("%s: mismatch in element count\n", __func__);
5845
5846		v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5847		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
5848						   desc->size_offset);
5849		if (v1 != v2)
5850			pr_warning("%s: mismatch in rec size\n", __func__);
5851	}
5852
5853	/* step 7: update info_len and data_len */
5854	info_linear->info_len = sizeof(struct bpf_prog_info);
5855	info_linear->data_len = data_len;
5856
5857	return info_linear;
5858}
5859
5860void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
5861{
5862	int i;
5863
5864	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5865		struct bpf_prog_info_array_desc *desc;
5866		__u64 addr, offs;
5867
5868		if ((info_linear->arrays & (1UL << i)) == 0)
5869			continue;
5870
5871		desc = bpf_prog_info_array_desc + i;
5872		addr = bpf_prog_info_read_offset_u64(&info_linear->info,
5873						     desc->array_offset);
5874		offs = addr - ptr_to_u64(info_linear->data);
5875		bpf_prog_info_set_offset_u64(&info_linear->info,
5876					     desc->array_offset, offs);
5877	}
5878}
5879
5880void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
5881{
5882	int i;
5883
5884	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5885		struct bpf_prog_info_array_desc *desc;
5886		__u64 addr, offs;
5887
5888		if ((info_linear->arrays & (1UL << i)) == 0)
5889			continue;
5890
5891		desc = bpf_prog_info_array_desc + i;
5892		offs = bpf_prog_info_read_offset_u64(&info_linear->info,
5893						     desc->array_offset);
5894		addr = offs + ptr_to_u64(info_linear->data);
5895		bpf_prog_info_set_offset_u64(&info_linear->info,
5896					     desc->array_offset, addr);
5897	}
5898}
5899
5900int libbpf_num_possible_cpus(void)
5901{
5902	static const char *fcpu = "/sys/devices/system/cpu/possible";
5903	int len = 0, n = 0, il = 0, ir = 0;
5904	unsigned int start = 0, end = 0;
5905	int tmp_cpus = 0;
5906	static int cpus;
5907	char buf[128];
5908	int error = 0;
5909	int fd = -1;
5910
5911	tmp_cpus = READ_ONCE(cpus);
5912	if (tmp_cpus > 0)
5913		return tmp_cpus;
5914
5915	fd = open(fcpu, O_RDONLY);
5916	if (fd < 0) {
5917		error = errno;
5918		pr_warning("Failed to open file %s: %s\n",
5919			   fcpu, strerror(error));
5920		return -error;
5921	}
5922	len = read(fd, buf, sizeof(buf));
5923	close(fd);
5924	if (len <= 0) {
5925		error = len ? errno : EINVAL;
5926		pr_warning("Failed to read # of possible cpus from %s: %s\n",
5927			   fcpu, strerror(error));
5928		return -error;
5929	}
5930	if (len == sizeof(buf)) {
5931		pr_warning("File %s size overflow\n", fcpu);
5932		return -EOVERFLOW;
5933	}
5934	buf[len] = '\0';
5935
5936	for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
5937		/* Each sub string separated by ',' has format \d+-\d+ or \d+ */
5938		if (buf[ir] == ',' || buf[ir] == '\0') {
5939			buf[ir] = '\0';
5940			n = sscanf(&buf[il], "%u-%u", &start, &end);
5941			if (n <= 0) {
5942				pr_warning("Failed to get # CPUs from %s\n",
5943					   &buf[il]);
5944				return -EINVAL;
5945			} else if (n == 1) {
5946				end = start;
5947			}
5948			tmp_cpus += end - start + 1;
5949			il = ir + 1;
5950		}
5951	}
5952	if (tmp_cpus <= 0) {
5953		pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
5954		return -EINVAL;
5955	}
5956
5957	WRITE_ONCE(cpus, tmp_cpus);
5958	return tmp_cpus;
5959}