Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: LGPL-2.1
   2
   3/*
   4 * Common eBPF ELF object loading operations.
   5 *
   6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
   7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
   8 * Copyright (C) 2015 Huawei Inc.
   9 * Copyright (C) 2017 Nicira, Inc.
  10 *
  11 * This program is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU Lesser General Public
  13 * License as published by the Free Software Foundation;
  14 * version 2.1 of the License (not later!)
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU Lesser General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU Lesser General Public
  22 * License along with this program; if not,  see <http://www.gnu.org/licenses>
  23 */
  24
 
 
 
  25#include <stdlib.h>
  26#include <stdio.h>
  27#include <stdarg.h>
  28#include <libgen.h>
  29#include <inttypes.h>
 
  30#include <string.h>
  31#include <unistd.h>
 
  32#include <fcntl.h>
  33#include <errno.h>
 
  34#include <asm/unistd.h>
  35#include <linux/err.h>
  36#include <linux/kernel.h>
  37#include <linux/bpf.h>
 
 
  38#include <linux/list.h>
  39#include <linux/limits.h>
 
 
 
 
 
 
  40#include <sys/stat.h>
  41#include <sys/types.h>
  42#include <sys/vfs.h>
 
 
  43#include <libelf.h>
  44#include <gelf.h>
 
  45
  46#include "libbpf.h"
  47#include "bpf.h"
  48
  49#ifndef EM_BPF
  50#define EM_BPF 247
  51#endif
 
  52
  53#ifndef BPF_FS_MAGIC
  54#define BPF_FS_MAGIC		0xcafe4a11
  55#endif
  56
 
 
 
 
 
 
 
  57#define __printf(a, b)	__attribute__((format(printf, a, b)))
  58
  59__printf(1, 2)
  60static int __base_pr(const char *format, ...)
 
 
 
  61{
  62	va_list args;
  63	int err;
  64
  65	va_start(args, format);
  66	err = vfprintf(stderr, format, args);
  67	va_end(args);
  68	return err;
  69}
  70
  71static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
  72static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
  73static __printf(1, 2) libbpf_print_fn_t __pr_debug;
  74
  75#define __pr(func, fmt, ...)	\
  76do {				\
  77	if ((func))		\
  78		(func)("libbpf: " fmt, ##__VA_ARGS__); \
  79} while (0)
  80
  81#define pr_warning(fmt, ...)	__pr(__pr_warning, fmt, ##__VA_ARGS__)
  82#define pr_info(fmt, ...)	__pr(__pr_info, fmt, ##__VA_ARGS__)
  83#define pr_debug(fmt, ...)	__pr(__pr_debug, fmt, ##__VA_ARGS__)
  84
  85void libbpf_set_print(libbpf_print_fn_t warn,
  86		      libbpf_print_fn_t info,
  87		      libbpf_print_fn_t debug)
  88{
  89	__pr_warning = warn;
  90	__pr_info = info;
  91	__pr_debug = debug;
  92}
  93
  94#define STRERR_BUFSIZE  128
 
 
 
  95
  96#define ERRNO_OFFSET(e)		((e) - __LIBBPF_ERRNO__START)
  97#define ERRCODE_OFFSET(c)	ERRNO_OFFSET(LIBBPF_ERRNO__##c)
  98#define NR_ERRNO	(__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
  99
 100static const char *libbpf_strerror_table[NR_ERRNO] = {
 101	[ERRCODE_OFFSET(LIBELF)]	= "Something wrong in libelf",
 102	[ERRCODE_OFFSET(FORMAT)]	= "BPF object format invalid",
 103	[ERRCODE_OFFSET(KVERSION)]	= "'version' section incorrect or lost",
 104	[ERRCODE_OFFSET(ENDIAN)]	= "Endian mismatch",
 105	[ERRCODE_OFFSET(INTERNAL)]	= "Internal error in libbpf",
 106	[ERRCODE_OFFSET(RELOC)]		= "Relocation failed",
 107	[ERRCODE_OFFSET(VERIFY)]	= "Kernel verifier blocks program loading",
 108	[ERRCODE_OFFSET(PROG2BIG)]	= "Program too big",
 109	[ERRCODE_OFFSET(KVER)]		= "Incorrect kernel version",
 110	[ERRCODE_OFFSET(PROGTYPE)]	= "Kernel doesn't support this program type",
 111	[ERRCODE_OFFSET(WRNGPID)]	= "Wrong pid in netlink message",
 112	[ERRCODE_OFFSET(INVSEQ)]	= "Invalid netlink sequence",
 113};
 114
 115int libbpf_strerror(int err, char *buf, size_t size)
 116{
 117	if (!buf || !size)
 118		return -1;
 119
 120	err = err > 0 ? err : -err;
 
 
 
 121
 122	if (err < __LIBBPF_ERRNO__START) {
 123		int ret;
 
 
 124
 125		ret = strerror_r(err, buf, size);
 126		buf[size - 1] = '\0';
 127		return ret;
 128	}
 129
 130	if (err < __LIBBPF_ERRNO__END) {
 131		const char *msg;
 
 132
 133		msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
 134		snprintf(buf, size, "%s", msg);
 135		buf[size - 1] = '\0';
 136		return 0;
 137	}
 138
 139	snprintf(buf, size, "Unknown libbpf error %d", err);
 140	buf[size - 1] = '\0';
 141	return -1;
 142}
 
 
 143
 144#define CHECK_ERR(action, err, out) do {	\
 145	err = action;			\
 146	if (err)			\
 147		goto out;		\
 148} while(0)
 149
 
 150
 151/* Copied from tools/perf/util/util.h */
 152#ifndef zfree
 153# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
 154#endif
 155
 156#ifndef zclose
 157# define zclose(fd) ({			\
 158	int ___err = 0;			\
 159	if ((fd) >= 0)			\
 160		___err = close((fd));	\
 161	fd = -1;			\
 162	___err; })
 163#endif
 164
 165#ifdef HAVE_LIBELF_MMAP_SUPPORT
 166# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
 167#else
 168# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
 169#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170
 171/*
 172 * bpf_prog should be a better name but it has been used in
 173 * linux/filter.h.
 174 */
 175struct bpf_program {
 176	/* Index in elf obj file, for relocation use. */
 177	int idx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178	char *name;
 179	char *section_name;
 
 
 
 
 
 
 
 
 
 180	struct bpf_insn *insns;
 181	size_t insns_cnt, main_prog_cnt;
 182	enum bpf_prog_type type;
 
 
 
 183
 184	struct reloc_desc {
 185		enum {
 186			RELO_LD64,
 187			RELO_CALL,
 188		} type;
 189		int insn_idx;
 190		union {
 191			int map_idx;
 192			int text_off;
 193		};
 194	} *reloc_desc;
 195	int nr_reloc;
 
 196
 197	struct {
 198		int nr;
 199		int *fds;
 200	} instances;
 201	bpf_program_prep_t preprocessor;
 202
 203	struct bpf_object *obj;
 204	void *priv;
 205	bpf_program_clear_priv_t clear_priv;
 206
 
 
 
 207	enum bpf_attach_type expected_attach_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 208};
 209
 210struct bpf_map {
 211	int fd;
 212	char *name;
 213	size_t offset;
 
 
 
 
 214	struct bpf_map_def def;
 
 
 
 
 
 215	void *priv;
 216	bpf_map_clear_priv_t clear_priv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217};
 218
 219static LIST_HEAD(bpf_objects_list);
 220
 
 
 
 
 
 
 
 221struct bpf_object {
 
 222	char license[64];
 223	u32 kern_version;
 224
 225	struct bpf_program *programs;
 226	size_t nr_programs;
 227	struct bpf_map *maps;
 228	size_t nr_maps;
 
 
 
 
 
 
 
 229
 230	bool loaded;
 
 
 
 231
 232	/*
 233	 * Information when doing elf related work. Only valid if fd
 234	 * is valid.
 235	 */
 236	struct {
 237		int fd;
 238		void *obj_buf;
 239		size_t obj_buf_sz;
 240		Elf *elf;
 241		GElf_Ehdr ehdr;
 242		Elf_Data *symbols;
 
 
 
 
 
 243		size_t strtabidx;
 244		struct {
 245			GElf_Shdr shdr;
 246			Elf_Data *data;
 247		} *reloc;
 248		int nr_reloc;
 249		int maps_shndx;
 
 
 250		int text_shndx;
 
 
 
 
 
 251	} efile;
 252	/*
 253	 * All loaded bpf_object is linked in a list, which is
 254	 * hidden to caller. bpf_objects__<func> handlers deal with
 255	 * all objects.
 256	 */
 257	struct list_head list;
 258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 259	void *priv;
 260	bpf_object_clear_priv_t clear_priv;
 261
 262	char path[];
 263};
 264#define obj_elf_valid(o)	((o)->efile.elf)
 265
 266static void bpf_program__unload(struct bpf_program *prog)
 
 
 
 
 
 
 
 
 267{
 268	int i;
 269
 270	if (!prog)
 271		return;
 272
 273	/*
 274	 * If the object is opened but the program was never loaded,
 275	 * it is possible that prog->instances.nr == -1.
 276	 */
 277	if (prog->instances.nr > 0) {
 278		for (i = 0; i < prog->instances.nr; i++)
 279			zclose(prog->instances.fds[i]);
 280	} else if (prog->instances.nr != -1) {
 281		pr_warning("Internal error: instances.nr is %d\n",
 282			   prog->instances.nr);
 283	}
 284
 285	prog->instances.nr = -1;
 286	zfree(&prog->instances.fds);
 
 
 
 287}
 288
 289static void bpf_program__exit(struct bpf_program *prog)
 290{
 291	if (!prog)
 292		return;
 293
 294	if (prog->clear_priv)
 295		prog->clear_priv(prog, prog->priv);
 296
 297	prog->priv = NULL;
 298	prog->clear_priv = NULL;
 299
 300	bpf_program__unload(prog);
 301	zfree(&prog->name);
 302	zfree(&prog->section_name);
 
 303	zfree(&prog->insns);
 304	zfree(&prog->reloc_desc);
 305
 306	prog->nr_reloc = 0;
 307	prog->insns_cnt = 0;
 308	prog->idx = -1;
 309}
 310
 311static int
 312bpf_program__init(void *data, size_t size, char *section_name, int idx,
 313		  struct bpf_program *prog)
 
 
 
 
 
 
 
 
 
 314{
 315	if (size < sizeof(struct bpf_insn)) {
 316		pr_warning("corrupted section '%s'\n", section_name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 317		return -EINVAL;
 318	}
 319
 320	bzero(prog, sizeof(*prog));
 
 321
 322	prog->section_name = strdup(section_name);
 323	if (!prog->section_name) {
 324		pr_warning("failed to alloc name for prog under section(%d) %s\n",
 325			   idx, section_name);
 326		goto errout;
 327	}
 
 
 328
 329	prog->insns = malloc(size);
 330	if (!prog->insns) {
 331		pr_warning("failed to alloc insns for prog under section %s\n",
 332			   section_name);
 333		goto errout;
 334	}
 335	prog->insns_cnt = size / sizeof(struct bpf_insn);
 336	memcpy(prog->insns, data,
 337	       prog->insns_cnt * sizeof(struct bpf_insn));
 338	prog->idx = idx;
 339	prog->instances.fds = NULL;
 340	prog->instances.nr = -1;
 341	prog->type = BPF_PROG_TYPE_KPROBE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 342
 343	return 0;
 344errout:
 
 345	bpf_program__exit(prog);
 346	return -ENOMEM;
 347}
 348
 349static int
 350bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
 351			char *section_name, int idx)
 352{
 353	struct bpf_program prog, *progs;
 354	int nr_progs, err;
 355
 356	err = bpf_program__init(data, size, section_name, idx, &prog);
 357	if (err)
 358		return err;
 
 359
 360	progs = obj->programs;
 361	nr_progs = obj->nr_programs;
 
 
 362
 363	progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
 364	if (!progs) {
 365		/*
 366		 * In this case the original obj->programs
 367		 * is still valid, so don't need special treat for
 368		 * bpf_close_object().
 369		 */
 370		pr_warning("failed to alloc a new program under section '%s'\n",
 371			   section_name);
 372		bpf_program__exit(&prog);
 373		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 374	}
 375
 376	pr_debug("found program %s\n", prog.section_name);
 377	obj->programs = progs;
 378	obj->nr_programs = nr_progs + 1;
 379	prog.obj = obj;
 380	progs[nr_progs] = prog;
 381	return 0;
 382}
 383
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 384static int
 385bpf_object__init_prog_names(struct bpf_object *obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 386{
 387	Elf_Data *symbols = obj->efile.symbols;
 388	struct bpf_program *prog;
 389	size_t pi, si;
 
 
 
 
 
 
 
 
 
 
 
 
 390
 391	for (pi = 0; pi < obj->nr_programs; pi++) {
 392		const char *name = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 393
 394		prog = &obj->programs[pi];
 395		if (prog->idx == obj->efile.text_shndx) {
 396			name = ".text";
 397			goto skip_search;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 398		}
 399
 400		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
 401		     si++) {
 402			GElf_Sym sym;
 
 
 
 
 403
 404			if (!gelf_getsym(symbols, si, &sym))
 405				continue;
 406			if (sym.st_shndx != prog->idx)
 407				continue;
 408			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 409				continue;
 410
 411			name = elf_strptr(obj->efile.elf,
 412					  obj->efile.strtabidx,
 413					  sym.st_name);
 414			if (!name) {
 415				pr_warning("failed to get sym name string for prog %s\n",
 416					   prog->section_name);
 417				return -LIBBPF_ERRNO__LIBELF;
 
 
 
 
 
 418			}
 
 
 
 
 
 
 
 
 
 
 
 419		}
 420
 421		if (!name) {
 422			pr_warning("failed to find sym for prog %s\n",
 423				   prog->section_name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 424			return -EINVAL;
 425		}
 426skip_search:
 427		prog->name = strdup(name);
 428		if (!prog->name) {
 429			pr_warning("failed to allocate memory for prog sym %s\n",
 430				   name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 431			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 432		}
 
 
 
 
 
 
 
 
 
 
 433	}
 434
 435	return 0;
 436}
 437
 438static struct bpf_object *bpf_object__new(const char *path,
 439					  void *obj_buf,
 440					  size_t obj_buf_sz)
 
 441{
 442	struct bpf_object *obj;
 
 443
 444	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
 445	if (!obj) {
 446		pr_warning("alloc memory failed for %s\n", path);
 447		return ERR_PTR(-ENOMEM);
 448	}
 449
 450	strcpy(obj->path, path);
 451	obj->efile.fd = -1;
 
 
 
 
 
 
 
 
 
 
 452
 
 453	/*
 454	 * Caller of this function should also calls
 455	 * bpf_object__elf_finish() after data collection to return
 456	 * obj_buf to user. If not, we should duplicate the buffer to
 457	 * avoid user freeing them before elf finish.
 458	 */
 459	obj->efile.obj_buf = obj_buf;
 460	obj->efile.obj_buf_sz = obj_buf_sz;
 461	obj->efile.maps_shndx = -1;
 
 
 
 
 
 
 
 462
 
 463	obj->loaded = false;
 464
 465	INIT_LIST_HEAD(&obj->list);
 466	list_add(&obj->list, &bpf_objects_list);
 467	return obj;
 468}
 469
 470static void bpf_object__elf_finish(struct bpf_object *obj)
 471{
 472	if (!obj_elf_valid(obj))
 473		return;
 474
 475	if (obj->efile.elf) {
 476		elf_end(obj->efile.elf);
 477		obj->efile.elf = NULL;
 478	}
 479	obj->efile.symbols = NULL;
 
 
 
 
 480
 481	zfree(&obj->efile.reloc);
 482	obj->efile.nr_reloc = 0;
 483	zclose(obj->efile.fd);
 484	obj->efile.obj_buf = NULL;
 485	obj->efile.obj_buf_sz = 0;
 486}
 487
 488static int bpf_object__elf_init(struct bpf_object *obj)
 489{
 490	int err = 0;
 491	GElf_Ehdr *ep;
 492
 493	if (obj_elf_valid(obj)) {
 494		pr_warning("elf init: internal error\n");
 495		return -LIBBPF_ERRNO__LIBELF;
 496	}
 497
 498	if (obj->efile.obj_buf_sz > 0) {
 499		/*
 500		 * obj_buf should have been validated by
 501		 * bpf_object__open_buffer().
 502		 */
 503		obj->efile.elf = elf_memory(obj->efile.obj_buf,
 504					    obj->efile.obj_buf_sz);
 505	} else {
 506		obj->efile.fd = open(obj->path, O_RDONLY);
 507		if (obj->efile.fd < 0) {
 508			pr_warning("failed to open %s: %s\n", obj->path,
 509					strerror(errno));
 510			return -errno;
 
 
 
 511		}
 512
 513		obj->efile.elf = elf_begin(obj->efile.fd,
 514				LIBBPF_ELF_C_READ_MMAP,
 515				NULL);
 516	}
 517
 518	if (!obj->efile.elf) {
 519		pr_warning("failed to open %s as ELF file\n",
 520				obj->path);
 521		err = -LIBBPF_ERRNO__LIBELF;
 522		goto errout;
 523	}
 524
 525	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
 526		pr_warning("failed to get EHDR from %s\n",
 527				obj->path);
 528		err = -LIBBPF_ERRNO__FORMAT;
 529		goto errout;
 530	}
 531	ep = &obj->efile.ehdr;
 532
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 533	/* Old LLVM set e_machine to EM_NONE */
 534	if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
 535		pr_warning("%s is not an eBPF object file\n",
 536			obj->path);
 537		err = -LIBBPF_ERRNO__FORMAT;
 538		goto errout;
 539	}
 540
 541	return 0;
 542errout:
 543	bpf_object__elf_finish(obj);
 544	return err;
 545}
 546
 547static int
 548bpf_object__check_endianness(struct bpf_object *obj)
 549{
 550	static unsigned int const endian = 1;
 551
 552	switch (obj->efile.ehdr.e_ident[EI_DATA]) {
 553	case ELFDATA2LSB:
 554		/* We are big endian, BPF obj is little endian. */
 555		if (*(unsigned char const *)&endian != 1)
 556			goto mismatch;
 557		break;
 558
 559	case ELFDATA2MSB:
 560		/* We are little endian, BPF obj is big endian. */
 561		if (*(unsigned char const *)&endian != 0)
 562			goto mismatch;
 563		break;
 564	default:
 565		return -LIBBPF_ERRNO__ENDIAN;
 566	}
 567
 568	return 0;
 569
 570mismatch:
 571	pr_warning("Error: endianness mismatch.\n");
 572	return -LIBBPF_ERRNO__ENDIAN;
 573}
 574
 575static int
 576bpf_object__init_license(struct bpf_object *obj,
 577			 void *data, size_t size)
 578{
 579	memcpy(obj->license, data,
 580	       min(size, sizeof(obj->license) - 1));
 581	pr_debug("license of %s is %s\n", obj->path, obj->license);
 582	return 0;
 583}
 584
 585static int
 586bpf_object__init_kversion(struct bpf_object *obj,
 587			  void *data, size_t size)
 588{
 589	u32 kver;
 590
 591	if (size != sizeof(kver)) {
 592		pr_warning("invalid kver section in %s\n", obj->path);
 593		return -LIBBPF_ERRNO__FORMAT;
 594	}
 595	memcpy(&kver, data, sizeof(kver));
 596	obj->kern_version = kver;
 597	pr_debug("kernel version of %s is %x\n", obj->path,
 598		 obj->kern_version);
 599	return 0;
 600}
 601
 602static int compare_bpf_map(const void *_a, const void *_b)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 603{
 604	const struct bpf_map *a = _a;
 605	const struct bpf_map *b = _b;
 
 
 
 
 
 
 606
 607	return a->offset - b->offset;
 
 
 
 
 
 608}
 609
 610static int
 611bpf_object__init_maps(struct bpf_object *obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 612{
 613	int i, map_idx, map_def_sz, nr_maps = 0;
 614	Elf_Scn *scn;
 615	Elf_Data *data;
 616	Elf_Data *symbols = obj->efile.symbols;
 
 
 
 617
 618	if (obj->efile.maps_shndx < 0)
 619		return -EINVAL;
 
 620	if (!symbols)
 621		return -EINVAL;
 622
 623	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
 624	if (scn)
 625		data = elf_getdata(scn, NULL);
 626	if (!scn || !data) {
 627		pr_warning("failed to get Elf_Data from map section %d\n",
 628			   obj->efile.maps_shndx);
 629		return -EINVAL;
 630	}
 631
 632	/*
 633	 * Count number of maps. Each map has a name.
 634	 * Array of maps is not supported: only the first element is
 635	 * considered.
 636	 *
 637	 * TODO: Detect array of map and report error.
 638	 */
 639	for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
 
 640		GElf_Sym sym;
 641
 642		if (!gelf_getsym(symbols, i, &sym))
 643			continue;
 644		if (sym.st_shndx != obj->efile.maps_shndx)
 645			continue;
 646		nr_maps++;
 647	}
 648
 649	/* Alloc obj->maps and fill nr_maps. */
 650	pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
 651		 nr_maps, data->d_size);
 652
 653	if (!nr_maps)
 654		return 0;
 655
 656	/* Assume equally sized map definitions */
 657	map_def_sz = data->d_size / nr_maps;
 658	if (!data->d_size || (data->d_size % nr_maps) != 0) {
 659		pr_warning("unable to determine map definition size "
 660			   "section %s, %d maps in %zd bytes\n",
 661			   obj->path, nr_maps, data->d_size);
 662		return -EINVAL;
 663	}
 664
 665	obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
 666	if (!obj->maps) {
 667		pr_warning("alloc maps for object failed\n");
 668		return -ENOMEM;
 669	}
 670	obj->nr_maps = nr_maps;
 671
 672	/*
 673	 * fill all fd with -1 so won't close incorrect
 674	 * fd (fd=0 is stdin) when failure (zclose won't close
 675	 * negative fd)).
 676	 */
 677	for (i = 0; i < nr_maps; i++)
 678		obj->maps[i].fd = -1;
 679
 680	/*
 681	 * Fill obj->maps using data in "maps" section.
 682	 */
 683	for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
 684		GElf_Sym sym;
 685		const char *map_name;
 686		struct bpf_map_def *def;
 
 687
 688		if (!gelf_getsym(symbols, i, &sym))
 689			continue;
 690		if (sym.st_shndx != obj->efile.maps_shndx)
 691			continue;
 692
 693		map_name = elf_strptr(obj->efile.elf,
 694				      obj->efile.strtabidx,
 695				      sym.st_name);
 696		obj->maps[map_idx].offset = sym.st_value;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 697		if (sym.st_value + map_def_sz > data->d_size) {
 698			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
 699				   obj->path, map_name);
 700			return -EINVAL;
 701		}
 702
 703		obj->maps[map_idx].name = strdup(map_name);
 704		if (!obj->maps[map_idx].name) {
 705			pr_warning("failed to alloc map name\n");
 706			return -ENOMEM;
 707		}
 708		pr_debug("map %d is \"%s\"\n", map_idx,
 709			 obj->maps[map_idx].name);
 710		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
 711		/*
 712		 * If the definition of the map in the object file fits in
 713		 * bpf_map_def, copy it.  Any extra fields in our version
 714		 * of bpf_map_def will default to zero as a result of the
 715		 * calloc above.
 716		 */
 717		if (map_def_sz <= sizeof(struct bpf_map_def)) {
 718			memcpy(&obj->maps[map_idx].def, def, map_def_sz);
 719		} else {
 720			/*
 721			 * Here the map structure being read is bigger than what
 722			 * we expect, truncate if the excess bits are all zero.
 723			 * If they are not zero, reject this map as
 724			 * incompatible.
 725			 */
 726			char *b;
 
 727			for (b = ((char *)def) + sizeof(struct bpf_map_def);
 728			     b < ((char *)def) + map_def_sz; b++) {
 729				if (*b != 0) {
 730					pr_warning("maps section in %s: \"%s\" "
 731						   "has unrecognized, non-zero "
 732						   "options\n",
 733						   obj->path, map_name);
 734					return -EINVAL;
 735				}
 736			}
 737			memcpy(&obj->maps[map_idx].def, def,
 738			       sizeof(struct bpf_map_def));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 739		}
 740		map_idx++;
 741	}
 742
 743	qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
 
 
 
 
 744	return 0;
 745}
 746
 747static bool section_have_execinstr(struct bpf_object *obj, int idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 748{
 
 
 
 
 
 749	Elf_Scn *scn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 750	GElf_Shdr sh;
 751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 752	scn = elf_getscn(obj->efile.elf, idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 753	if (!scn)
 754		return false;
 755
 756	if (gelf_getshdr(scn, &sh) != &sh)
 757		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 758
 759	if (sh.sh_flags & SHF_EXECINSTR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 760		return true;
 761
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762	return false;
 763}
 764
 
 
 
 
 
 
 
 
 
 
 
 
 765static int bpf_object__elf_collect(struct bpf_object *obj)
 766{
 767	Elf *elf = obj->efile.elf;
 768	GElf_Ehdr *ep = &obj->efile.ehdr;
 769	Elf_Scn *scn = NULL;
 770	int idx = 0, err = 0;
 
 
 
 
 771
 772	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
 773	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
 774		pr_warning("failed to get e_shstrndx from %s\n",
 775			   obj->path);
 776		return -LIBBPF_ERRNO__FORMAT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 777	}
 778
 
 779	while ((scn = elf_nextscn(elf, scn)) != NULL) {
 780		char *name;
 781		GElf_Shdr sh;
 782		Elf_Data *data;
 783
 784		idx++;
 785		if (gelf_getshdr(scn, &sh) != &sh) {
 786			pr_warning("failed to get section(%d) header from %s\n",
 787				   idx, obj->path);
 788			err = -LIBBPF_ERRNO__FORMAT;
 789			goto out;
 790		}
 791
 792		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
 793		if (!name) {
 794			pr_warning("failed to get section(%d) name from %s\n",
 795				   idx, obj->path);
 796			err = -LIBBPF_ERRNO__FORMAT;
 797			goto out;
 798		}
 799
 800		data = elf_getdata(scn, 0);
 801		if (!data) {
 802			pr_warning("failed to get section(%d) data from %s(%s)\n",
 803				   idx, name, obj->path);
 804			err = -LIBBPF_ERRNO__FORMAT;
 805			goto out;
 806		}
 807		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
 
 
 
 
 808			 idx, name, (unsigned long)data->d_size,
 809			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
 810			 (int)sh.sh_type);
 811
 812		if (strcmp(name, "license") == 0)
 813			err = bpf_object__init_license(obj,
 814						       data->d_buf,
 815						       data->d_size);
 816		else if (strcmp(name, "version") == 0)
 817			err = bpf_object__init_kversion(obj,
 818							data->d_buf,
 819							data->d_size);
 820		else if (strcmp(name, "maps") == 0)
 821			obj->efile.maps_shndx = idx;
 822		else if (sh.sh_type == SHT_SYMTAB) {
 823			if (obj->efile.symbols) {
 824				pr_warning("bpf: multiple SYMTAB in %s\n",
 825					   obj->path);
 826				err = -LIBBPF_ERRNO__FORMAT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 827			} else {
 828				obj->efile.symbols = data;
 829				obj->efile.strtabidx = sh.sh_link;
 830			}
 831		} else if ((sh.sh_type == SHT_PROGBITS) &&
 832			   (sh.sh_flags & SHF_EXECINSTR) &&
 833			   (data->d_size > 0)) {
 834			if (strcmp(name, ".text") == 0)
 835				obj->efile.text_shndx = idx;
 836			err = bpf_object__add_program(obj, data->d_buf,
 837						      data->d_size, name, idx);
 838			if (err) {
 839				char errmsg[STRERR_BUFSIZE];
 840
 841				strerror_r(-err, errmsg, sizeof(errmsg));
 842				pr_warning("failed to alloc program %s (%s): %s",
 843					   name, obj->path, errmsg);
 844			}
 845		} else if (sh.sh_type == SHT_REL) {
 846			void *reloc = obj->efile.reloc;
 847			int nr_reloc = obj->efile.nr_reloc + 1;
 848			int sec = sh.sh_info; /* points to other section */
 849
 850			/* Only do relo for section with exec instructions */
 851			if (!section_have_execinstr(obj, sec)) {
 852				pr_debug("skip relo %s(%d) for section(%d)\n",
 853					 name, idx, sec);
 
 
 
 854				continue;
 855			}
 856
 857			reloc = realloc(reloc,
 858					sizeof(*obj->efile.reloc) * nr_reloc);
 859			if (!reloc) {
 860				pr_warning("realloc failed\n");
 861				err = -ENOMEM;
 862			} else {
 863				int n = nr_reloc - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 864
 865				obj->efile.reloc = reloc;
 866				obj->efile.nr_reloc = nr_reloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 867
 868				obj->efile.reloc[n].shdr = sh;
 869				obj->efile.reloc[n].data = data;
 
 
 
 
 
 
 
 
 
 
 
 870			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871		} else {
 872			pr_debug("skip section(%d) %s\n", idx, name);
 
 873		}
 874		if (err)
 875			goto out;
 876	}
 
 877
 878	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
 879		pr_warning("Corrupted ELF file: index of strtab invalid\n");
 880		return LIBBPF_ERRNO__FORMAT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 881	}
 882	if (obj->efile.maps_shndx >= 0) {
 883		err = bpf_object__init_maps(obj);
 884		if (err)
 885			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 886	}
 887	err = bpf_object__init_prog_names(obj);
 888out:
 889	return err;
 890}
 891
 892static struct bpf_program *
 893bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894{
 895	struct bpf_program *prog;
 896	size_t i;
 897
 898	for (i = 0; i < obj->nr_programs; i++) {
 899		prog = &obj->programs[i];
 900		if (prog->idx == idx)
 
 901			return prog;
 902	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 903	return NULL;
 904}
 905
 906static int
 907bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
 908			   Elf_Data *data, struct bpf_object *obj)
 909{
 910	Elf_Data *symbols = obj->efile.symbols;
 911	int text_shndx = obj->efile.text_shndx;
 912	int maps_shndx = obj->efile.maps_shndx;
 913	struct bpf_map *maps = obj->maps;
 914	size_t nr_maps = obj->nr_maps;
 915	int i, nrels;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 916
 917	pr_debug("collecting relocating info for: '%s'\n",
 918		 prog->section_name);
 919	nrels = shdr->sh_size / shdr->sh_entsize;
 920
 921	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
 922	if (!prog->reloc_desc) {
 923		pr_warning("failed to alloc memory in relocation\n");
 924		return -ENOMEM;
 925	}
 926	prog->nr_reloc = nrels;
 927
 928	for (i = 0; i < nrels; i++) {
 929		GElf_Sym sym;
 930		GElf_Rel rel;
 931		unsigned int insn_idx;
 932		struct bpf_insn *insns = prog->insns;
 933		size_t map_idx;
 934
 935		if (!gelf_getrel(data, i, &rel)) {
 936			pr_warning("relocation: failed to get %d reloc\n", i);
 937			return -LIBBPF_ERRNO__FORMAT;
 938		}
 939
 940		if (!gelf_getsym(symbols,
 941				 GELF_R_SYM(rel.r_info),
 942				 &sym)) {
 943			pr_warning("relocation: symbol %"PRIx64" not found\n",
 944				   GELF_R_SYM(rel.r_info));
 945			return -LIBBPF_ERRNO__FORMAT;
 946		}
 947		pr_debug("relo for %lld value %lld name %d\n",
 948			 (long long) (rel.r_info >> 32),
 949			 (long long) sym.st_value, sym.st_name);
 950
 951		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
 952			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
 953				   prog->section_name, sym.st_shndx);
 954			return -LIBBPF_ERRNO__RELOC;
 955		}
 956
 957		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
 958		pr_debug("relocation: insn_idx=%u\n", insn_idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 959
 960		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
 961			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
 962				pr_warning("incorrect bpf_call opcode\n");
 963				return -LIBBPF_ERRNO__RELOC;
 964			}
 965			prog->reloc_desc[i].type = RELO_CALL;
 966			prog->reloc_desc[i].insn_idx = insn_idx;
 967			prog->reloc_desc[i].text_off = sym.st_value;
 968			continue;
 969		}
 970
 971		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
 972			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
 973				   insn_idx, insns[insn_idx].code);
 974			return -LIBBPF_ERRNO__RELOC;
 975		}
 976
 977		/* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
 978		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
 979			if (maps[map_idx].offset == sym.st_value) {
 980				pr_debug("relocation: find map %zd (%s) for insn %u\n",
 981					 map_idx, maps[map_idx].name, insn_idx);
 982				break;
 983			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 984		}
 
 985
 986		if (map_idx >= nr_maps) {
 987			pr_warning("bpf relocation: map_idx %d large than %d\n",
 988				   (int)map_idx, (int)nr_maps - 1);
 989			return -LIBBPF_ERRNO__RELOC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 990		}
 991
 992		prog->reloc_desc[i].type = RELO_LD64;
 993		prog->reloc_desc[i].insn_idx = insn_idx;
 994		prog->reloc_desc[i].map_idx = map_idx;
 
 995	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 996	return 0;
 997}
 998
 999static int
1000bpf_object__create_maps(struct bpf_object *obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001{
 
1002	unsigned int i;
 
1003
1004	for (i = 0; i < obj->nr_maps; i++) {
1005		struct bpf_map_def *def = &obj->maps[i].def;
1006		int *pfd = &obj->maps[i].fd;
1007
1008		*pfd = bpf_create_map_name(def->type,
1009					   obj->maps[i].name,
1010					   def->key_size,
1011					   def->value_size,
1012					   def->max_entries,
1013					   def->map_flags);
1014		if (*pfd < 0) {
1015			size_t j;
1016			int err = *pfd;
1017
1018			pr_warning("failed to create map (name: '%s'): %s\n",
1019				   obj->maps[i].name,
1020				   strerror(errno));
1021			for (j = 0; j < i; j++)
1022				zclose(obj->maps[j].fd);
1023			return err;
1024		}
1025		pr_debug("create map %s: fd=%d\n", obj->maps[i].name, *pfd);
 
1026	}
1027
 
 
 
1028	return 0;
1029}
1030
1031static int
1032bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1033			struct reloc_desc *relo)
1034{
1035	struct bpf_insn *insn, *new_insn;
1036	struct bpf_program *text;
1037	size_t new_cnt;
 
 
1038
1039	if (relo->type != RELO_CALL)
1040		return -LIBBPF_ERRNO__RELOC;
1041
1042	if (prog->idx == obj->efile.text_shndx) {
1043		pr_warning("relo in .text insn %d into off %d\n",
1044			   relo->insn_idx, relo->text_off);
1045		return -LIBBPF_ERRNO__RELOC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1046	}
1047
1048	if (prog->main_prog_cnt == 0) {
1049		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1050		if (!text) {
1051			pr_warning("no .text section found yet relo into text exist\n");
1052			return -LIBBPF_ERRNO__RELOC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053		}
1054		new_cnt = prog->insns_cnt + text->insns_cnt;
1055		new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
1056		if (!new_insn) {
1057			pr_warning("oom in prog realloc\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059		}
1060		memcpy(new_insn + prog->insns_cnt, text->insns,
1061		       text->insns_cnt * sizeof(*insn));
1062		prog->insns = new_insn;
1063		prog->main_prog_cnt = prog->insns_cnt;
1064		prog->insns_cnt = new_cnt;
1065		pr_debug("added %zd insn from %s to prog %s\n",
1066			 text->insns_cnt, text->section_name,
1067			 prog->section_name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1068	}
1069	insn = &prog->insns[relo->insn_idx];
1070	insn->imm += prog->main_prog_cnt - relo->insn_idx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1071	return 0;
1072}
1073
1074static int
1075bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1076{
1077	int i, err;
 
 
 
 
 
 
 
1078
1079	if (!prog || !prog->reloc_desc)
1080		return 0;
1081
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1082	for (i = 0; i < prog->nr_reloc; i++) {
1083		if (prog->reloc_desc[i].type == RELO_LD64) {
1084			struct bpf_insn *insns = prog->insns;
1085			int insn_idx, map_idx;
1086
1087			insn_idx = prog->reloc_desc[i].insn_idx;
1088			map_idx = prog->reloc_desc[i].map_idx;
1089
1090			if (insn_idx >= (int)prog->insns_cnt) {
1091				pr_warning("relocation out of range: '%s'\n",
1092					   prog->section_name);
1093				return -LIBBPF_ERRNO__RELOC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1094			}
1095			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1096			insns[insn_idx].imm = obj->maps[map_idx].fd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1097		} else {
1098			err = bpf_program__reloc_text(prog, obj,
1099						      &prog->reloc_desc[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1100			if (err)
1101				return err;
1102		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103	}
1104
1105	zfree(&prog->reloc_desc);
1106	prog->nr_reloc = 0;
1107	return 0;
1108}
1109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
1111static int
1112bpf_object__relocate(struct bpf_object *obj)
1113{
1114	struct bpf_program *prog;
1115	size_t i;
1116	int err;
1117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1118	for (i = 0; i < obj->nr_programs; i++) {
1119		prog = &obj->programs[i];
 
 
 
 
 
 
 
 
 
1120
1121		err = bpf_program__relocate(prog, obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122		if (err) {
1123			pr_warning("failed to relocate '%s'\n",
1124				   prog->section_name);
1125			return err;
1126		}
1127	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1128	return 0;
1129}
1130
1131static int bpf_object__collect_reloc(struct bpf_object *obj)
 
 
 
 
1132{
1133	int i, err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1134
1135	if (!obj_elf_valid(obj)) {
1136		pr_warning("Internal error: elf object is closed\n");
1137		return -LIBBPF_ERRNO__INTERNAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1138	}
1139
1140	for (i = 0; i < obj->efile.nr_reloc; i++) {
1141		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1142		Elf_Data *data = obj->efile.reloc[i].data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143		int idx = shdr->sh_info;
1144		struct bpf_program *prog;
1145
1146		if (shdr->sh_type != SHT_REL) {
1147			pr_warning("internal error at %d\n", __LINE__);
1148			return -LIBBPF_ERRNO__INTERNAL;
1149		}
1150
1151		prog = bpf_object__find_prog_by_idx(obj, idx);
1152		if (!prog) {
1153			pr_warning("relocation failed: no section(%d)\n", idx);
1154			return -LIBBPF_ERRNO__RELOC;
1155		}
1156
1157		err = bpf_program__collect_reloc(prog,
1158						 shdr, data,
1159						 obj);
1160		if (err)
1161			return err;
1162	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1163	return 0;
1164}
1165
1166static int
1167load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1168	     const char *name, struct bpf_insn *insns, int insns_cnt,
1169	     char *license, u32 kern_version, int *pfd)
1170{
1171	struct bpf_load_program_attr load_attr;
1172	char *log_buf;
1173	int ret;
 
 
1174
1175	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1176	load_attr.prog_type = type;
1177	load_attr.expected_attach_type = expected_attach_type;
1178	load_attr.name = name;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1179	load_attr.insns = insns;
1180	load_attr.insns_cnt = insns_cnt;
1181	load_attr.license = license;
 
 
 
 
 
 
1182	load_attr.kern_version = kern_version;
 
1183
1184	if (!load_attr.insns || !load_attr.insns_cnt)
1185		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1186
1187	log_buf = malloc(BPF_LOG_BUF_SIZE);
1188	if (!log_buf)
1189		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1190
1191	ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
 
 
1192
1193	if (ret >= 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1194		*pfd = ret;
1195		ret = 0;
1196		goto out;
1197	}
1198
1199	ret = -LIBBPF_ERRNO__LOAD;
1200	pr_warning("load bpf program failed: %s\n", strerror(errno));
 
 
 
 
 
 
 
 
 
1201
1202	if (log_buf && log_buf[0] != '\0') {
1203		ret = -LIBBPF_ERRNO__VERIFY;
1204		pr_warning("-- BEGIN DUMP LOG ---\n");
1205		pr_warning("\n%s\n", log_buf);
1206		pr_warning("-- END LOG --\n");
1207	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1208		pr_warning("Program too large (%zu insns), at most %d insns\n",
1209			   load_attr.insns_cnt, BPF_MAXINSNS);
1210		ret = -LIBBPF_ERRNO__PROG2BIG;
1211	} else {
1212		/* Wrong program type? */
1213		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1214			int fd;
1215
1216			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1217			load_attr.expected_attach_type = 0;
1218			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1219			if (fd >= 0) {
1220				close(fd);
1221				ret = -LIBBPF_ERRNO__PROGTYPE;
1222				goto out;
1223			}
 
1224		}
1225
1226		if (log_buf)
1227			ret = -LIBBPF_ERRNO__KVER;
1228	}
1229
1230out:
1231	free(log_buf);
1232	return ret;
1233}
1234
1235static int
1236bpf_program__load(struct bpf_program *prog,
1237		  char *license, u32 kern_version)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1238{
1239	int err = 0, fd, i;
1240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1241	if (prog->instances.nr < 0 || !prog->instances.fds) {
1242		if (prog->preprocessor) {
1243			pr_warning("Internal error: can't load program '%s'\n",
1244				   prog->section_name);
1245			return -LIBBPF_ERRNO__INTERNAL;
1246		}
1247
1248		prog->instances.fds = malloc(sizeof(int));
1249		if (!prog->instances.fds) {
1250			pr_warning("Not enough memory for BPF fds\n");
1251			return -ENOMEM;
1252		}
1253		prog->instances.nr = 1;
1254		prog->instances.fds[0] = -1;
1255	}
1256
1257	if (!prog->preprocessor) {
1258		if (prog->instances.nr != 1) {
1259			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1260				   prog->section_name, prog->instances.nr);
1261		}
1262		err = load_program(prog->type, prog->expected_attach_type,
1263				   prog->name, prog->insns, prog->insns_cnt,
1264				   license, kern_version, &fd);
 
1265		if (!err)
1266			prog->instances.fds[0] = fd;
1267		goto out;
1268	}
1269
1270	for (i = 0; i < prog->instances.nr; i++) {
1271		struct bpf_prog_prep_result result;
1272		bpf_program_prep_t preprocessor = prog->preprocessor;
1273
1274		bzero(&result, sizeof(result));
1275		err = preprocessor(prog, i, prog->insns,
1276				   prog->insns_cnt, &result);
1277		if (err) {
1278			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1279				   i, prog->section_name);
1280			goto out;
1281		}
1282
1283		if (!result.new_insn_ptr || !result.new_insn_cnt) {
1284			pr_debug("Skip loading the %dth instance of program '%s'\n",
1285				 i, prog->section_name);
1286			prog->instances.fds[i] = -1;
1287			if (result.pfd)
1288				*result.pfd = -1;
1289			continue;
1290		}
1291
1292		err = load_program(prog->type, prog->expected_attach_type,
1293				   prog->name, result.new_insn_ptr,
1294				   result.new_insn_cnt,
1295				   license, kern_version, &fd);
1296
1297		if (err) {
1298			pr_warning("Loading the %dth instance of program '%s' failed\n",
1299					i, prog->section_name);
1300			goto out;
1301		}
1302
1303		if (result.pfd)
1304			*result.pfd = fd;
1305		prog->instances.fds[i] = fd;
1306	}
1307out:
1308	if (err)
1309		pr_warning("failed to load program '%s'\n",
1310			   prog->section_name);
1311	zfree(&prog->insns);
1312	prog->insns_cnt = 0;
1313	return err;
1314}
1315
1316static int
1317bpf_object__load_progs(struct bpf_object *obj)
1318{
 
1319	size_t i;
1320	int err;
1321
1322	for (i = 0; i < obj->nr_programs; i++) {
1323		if (obj->programs[i].idx == obj->efile.text_shndx)
1324			continue;
1325		err = bpf_program__load(&obj->programs[i],
1326					obj->license,
1327					obj->kern_version);
1328		if (err)
1329			return err;
1330	}
1331	return 0;
1332}
1333
1334static int bpf_object__validate(struct bpf_object *obj)
1335{
1336	if (obj->kern_version == 0) {
1337		pr_warning("%s doesn't provide kernel version\n",
1338			   obj->path);
1339		return -LIBBPF_ERRNO__KVERSION;
 
 
 
 
 
 
1340	}
 
 
1341	return 0;
1342}
1343
 
 
1344static struct bpf_object *
1345__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz)
 
1346{
 
 
1347	struct bpf_object *obj;
 
1348	int err;
1349
1350	if (elf_version(EV_CURRENT) == EV_NONE) {
1351		pr_warning("failed to init libelf for %s\n", path);
 
1352		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1353	}
1354
1355	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1356	if (IS_ERR(obj))
1357		return obj;
1358
1359	CHECK_ERR(bpf_object__elf_init(obj), err, out);
1360	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1361	CHECK_ERR(bpf_object__elf_collect(obj), err, out);
1362	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1363	CHECK_ERR(bpf_object__validate(obj), err, out);
 
 
 
1364
 
 
 
 
 
 
 
 
 
1365	bpf_object__elf_finish(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1366	return obj;
1367out:
1368	bpf_object__close(obj);
1369	return ERR_PTR(err);
1370}
1371
1372struct bpf_object *bpf_object__open(const char *path)
 
1373{
 
 
 
 
1374	/* param validation */
1375	if (!path)
1376		return NULL;
1377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1378	pr_debug("loading %s\n", path);
1379
1380	return __bpf_object__open(path, NULL, 0);
1381}
1382
1383struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1384					   size_t obj_buf_sz,
1385					   const char *name)
1386{
1387	char tmp_name[64];
 
1388
1389	/* param validation */
1390	if (!obj_buf || obj_buf_sz <= 0)
1391		return NULL;
1392
1393	if (!name) {
1394		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1395			 (unsigned long)obj_buf,
1396			 (unsigned long)obj_buf_sz);
1397		tmp_name[sizeof(tmp_name) - 1] = '\0';
1398		name = tmp_name;
1399	}
1400	pr_debug("loading object '%s' from buffer\n",
1401		 name);
 
 
 
 
1402
1403	return __bpf_object__open(name, obj_buf, obj_buf_sz);
1404}
1405
1406int bpf_object__unload(struct bpf_object *obj)
1407{
1408	size_t i;
1409
1410	if (!obj)
1411		return -EINVAL;
1412
1413	for (i = 0; i < obj->nr_maps; i++)
1414		zclose(obj->maps[i].fd);
 
 
 
1415
1416	for (i = 0; i < obj->nr_programs; i++)
1417		bpf_program__unload(&obj->programs[i]);
1418
1419	return 0;
1420}
1421
1422int bpf_object__load(struct bpf_object *obj)
1423{
1424	int err;
1425
1426	if (!obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1427		return -EINVAL;
 
1428
1429	if (obj->loaded) {
1430		pr_warning("object should not be loaded twice\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1431		return -EINVAL;
1432	}
1433
1434	obj->loaded = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1435
1436	CHECK_ERR(bpf_object__create_maps(obj), err, out);
1437	CHECK_ERR(bpf_object__relocate(obj), err, out);
1438	CHECK_ERR(bpf_object__load_progs(obj), err, out);
1439
1440	return 0;
1441out:
 
 
 
 
 
1442	bpf_object__unload(obj);
1443	pr_warning("failed to load object '%s'\n", obj->path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444	return err;
1445}
1446
1447static int check_path(const char *path)
1448{
 
1449	struct statfs st_fs;
1450	char *dname, *dir;
1451	int err = 0;
1452
1453	if (path == NULL)
1454		return -EINVAL;
1455
1456	dname = strdup(path);
1457	if (dname == NULL)
1458		return -ENOMEM;
1459
1460	dir = dirname(dname);
1461	if (statfs(dir, &st_fs)) {
1462		pr_warning("failed to statfs %s: %s\n", dir, strerror(errno));
 
1463		err = -errno;
1464	}
1465	free(dname);
1466
1467	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1468		pr_warning("specified path %s is not on BPF FS\n", path);
1469		err = -EINVAL;
1470	}
1471
1472	return err;
1473}
1474
1475int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1476			      int instance)
1477{
 
1478	int err;
1479
 
 
 
 
1480	err = check_path(path);
1481	if (err)
1482		return err;
1483
1484	if (prog == NULL) {
1485		pr_warning("invalid program pointer\n");
1486		return -EINVAL;
1487	}
1488
1489	if (instance < 0 || instance >= prog->instances.nr) {
1490		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1491			   instance, prog->section_name, prog->instances.nr);
1492		return -EINVAL;
1493	}
1494
1495	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1496		pr_warning("failed to pin program: %s\n", strerror(errno));
1497		return -errno;
 
 
1498	}
1499	pr_debug("pinned program '%s'\n", path);
1500
1501	return 0;
1502}
1503
1504static int make_dir(const char *path)
 
1505{
1506	int err = 0;
1507
1508	if (mkdir(path, 0700) && errno != EEXIST)
1509		err = -errno;
1510
 
1511	if (err)
1512		pr_warning("failed to mkdir %s: %s\n", path, strerror(-err));
1513	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1514}
1515
1516int bpf_program__pin(struct bpf_program *prog, const char *path)
1517{
1518	int i, err;
1519
 
 
 
 
1520	err = check_path(path);
1521	if (err)
1522		return err;
1523
1524	if (prog == NULL) {
1525		pr_warning("invalid program pointer\n");
1526		return -EINVAL;
1527	}
1528
1529	if (prog->instances.nr <= 0) {
1530		pr_warning("no instances of prog %s to pin\n",
1531			   prog->section_name);
1532		return -EINVAL;
 
 
 
 
1533	}
1534
1535	err = make_dir(path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1536	if (err)
1537		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1538
1539	for (i = 0; i < prog->instances.nr; i++) {
1540		char buf[PATH_MAX];
1541		int len;
1542
1543		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1544		if (len < 0)
1545			return -EINVAL;
1546		else if (len >= PATH_MAX)
1547			return -ENAMETOOLONG;
1548
1549		err = bpf_program__pin_instance(prog, buf, i);
1550		if (err)
1551			return err;
1552	}
1553
 
 
 
 
1554	return 0;
1555}
1556
1557int bpf_map__pin(struct bpf_map *map, const char *path)
1558{
 
1559	int err;
1560
1561	err = check_path(path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1562	if (err)
1563		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1564
1565	if (map == NULL) {
1566		pr_warning("invalid map pointer\n");
1567		return -EINVAL;
1568	}
1569
1570	if (bpf_obj_pin(map->fd, path)) {
1571		pr_warning("failed to pin map: %s\n", strerror(errno));
1572		return -errno;
 
 
 
 
 
 
 
 
1573	}
1574
1575	pr_debug("pinned map '%s'\n", path);
 
 
 
 
 
 
 
 
 
 
1576	return 0;
1577}
1578
1579int bpf_object__pin(struct bpf_object *obj, const char *path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580{
1581	struct bpf_program *prog;
1582	struct bpf_map *map;
1583	int err;
1584
1585	if (!obj)
1586		return -ENOENT;
1587
1588	if (!obj->loaded) {
1589		pr_warning("object not yet loaded; load it first\n");
1590		return -ENOENT;
1591	}
1592
1593	err = make_dir(path);
1594	if (err)
1595		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1596
1597	bpf_map__for_each(map, obj) {
 
1598		char buf[PATH_MAX];
1599		int len;
1600
1601		len = snprintf(buf, PATH_MAX, "%s/%s", path,
1602			       bpf_map__name(map));
1603		if (len < 0)
1604			return -EINVAL;
1605		else if (len >= PATH_MAX)
1606			return -ENAMETOOLONG;
1607
1608		err = bpf_map__pin(map, buf);
1609		if (err)
1610			return err;
1611	}
1612
 
 
 
 
 
 
 
 
 
 
 
1613	bpf_object__for_each_program(prog, obj) {
1614		char buf[PATH_MAX];
1615		int len;
1616
1617		len = snprintf(buf, PATH_MAX, "%s/%s", path,
1618			       prog->section_name);
1619		if (len < 0)
1620			return -EINVAL;
1621		else if (len >= PATH_MAX)
1622			return -ENAMETOOLONG;
1623
1624		err = bpf_program__pin(prog, buf);
1625		if (err)
1626			return err;
1627	}
1628
1629	return 0;
1630}
1631
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1632void bpf_object__close(struct bpf_object *obj)
1633{
1634	size_t i;
1635
1636	if (!obj)
1637		return;
1638
1639	if (obj->clear_priv)
1640		obj->clear_priv(obj, obj->priv);
1641
 
1642	bpf_object__elf_finish(obj);
1643	bpf_object__unload(obj);
 
 
 
 
 
 
 
 
 
1644
1645	for (i = 0; i < obj->nr_maps; i++) {
1646		zfree(&obj->maps[i].name);
1647		if (obj->maps[i].clear_priv)
1648			obj->maps[i].clear_priv(&obj->maps[i],
1649						obj->maps[i].priv);
1650		obj->maps[i].priv = NULL;
1651		obj->maps[i].clear_priv = NULL;
1652	}
1653	zfree(&obj->maps);
1654	obj->nr_maps = 0;
1655
1656	if (obj->programs && obj->nr_programs) {
1657		for (i = 0; i < obj->nr_programs; i++)
1658			bpf_program__exit(&obj->programs[i]);
1659	}
1660	zfree(&obj->programs);
1661
1662	list_del(&obj->list);
1663	free(obj);
1664}
1665
1666struct bpf_object *
1667bpf_object__next(struct bpf_object *prev)
1668{
1669	struct bpf_object *next;
1670
1671	if (!prev)
1672		next = list_first_entry(&bpf_objects_list,
1673					struct bpf_object,
1674					list);
1675	else
1676		next = list_next_entry(prev, list);
1677
1678	/* Empty list is noticed here so don't need checking on entry. */
1679	if (&next->list == &bpf_objects_list)
1680		return NULL;
1681
1682	return next;
1683}
1684
1685const char *bpf_object__name(struct bpf_object *obj)
1686{
1687	return obj ? obj->path : ERR_PTR(-EINVAL);
1688}
1689
1690unsigned int bpf_object__kversion(struct bpf_object *obj)
1691{
1692	return obj ? obj->kern_version : 0;
1693}
1694
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1695int bpf_object__set_priv(struct bpf_object *obj, void *priv,
1696			 bpf_object_clear_priv_t clear_priv)
1697{
1698	if (obj->priv && obj->clear_priv)
1699		obj->clear_priv(obj, obj->priv);
1700
1701	obj->priv = priv;
1702	obj->clear_priv = clear_priv;
1703	return 0;
1704}
1705
1706void *bpf_object__priv(struct bpf_object *obj)
1707{
1708	return obj ? obj->priv : ERR_PTR(-EINVAL);
1709}
1710
1711struct bpf_program *
1712bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1713{
1714	size_t idx;
1715
1716	if (!obj->programs)
1717		return NULL;
1718	/* First handler */
1719	if (prev == NULL)
1720		return &obj->programs[0];
 
 
 
 
 
 
 
 
 
 
 
 
 
1721
1722	if (prev->obj != obj) {
1723		pr_warning("error: program handler doesn't match object\n");
1724		return NULL;
 
 
 
 
 
 
 
 
 
1725	}
1726
1727	idx = (prev - obj->programs) + 1;
1728	if (idx >= obj->nr_programs)
1729		return NULL;
1730	return &obj->programs[idx];
1731}
1732
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1733int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1734			  bpf_program_clear_priv_t clear_priv)
1735{
1736	if (prog->priv && prog->clear_priv)
1737		prog->clear_priv(prog, prog->priv);
1738
1739	prog->priv = priv;
1740	prog->clear_priv = clear_priv;
1741	return 0;
1742}
1743
1744void *bpf_program__priv(struct bpf_program *prog)
1745{
1746	return prog ? prog->priv : ERR_PTR(-EINVAL);
1747}
1748
1749const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1750{
1751	const char *title;
1752
1753	title = prog->section_name;
1754	if (needs_copy) {
1755		title = strdup(title);
1756		if (!title) {
1757			pr_warning("failed to strdup program title\n");
1758			return ERR_PTR(-ENOMEM);
1759		}
1760	}
1761
1762	return title;
1763}
1764
1765int bpf_program__fd(struct bpf_program *prog)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766{
1767	return bpf_program__nth_fd(prog, 0);
1768}
1769
 
 
 
 
 
1770int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1771			  bpf_program_prep_t prep)
1772{
1773	int *instances_fds;
1774
1775	if (nr_instances <= 0 || !prep)
1776		return -EINVAL;
1777
1778	if (prog->instances.nr > 0 || prog->instances.fds) {
1779		pr_warning("Can't set pre-processor after loading\n");
1780		return -EINVAL;
1781	}
1782
1783	instances_fds = malloc(sizeof(int) * nr_instances);
1784	if (!instances_fds) {
1785		pr_warning("alloc memory failed for fds\n");
1786		return -ENOMEM;
1787	}
1788
1789	/* fill all fd with -1 */
1790	memset(instances_fds, -1, sizeof(int) * nr_instances);
1791
1792	prog->instances.nr = nr_instances;
1793	prog->instances.fds = instances_fds;
1794	prog->preprocessor = prep;
1795	return 0;
1796}
1797
1798int bpf_program__nth_fd(struct bpf_program *prog, int n)
1799{
1800	int fd;
1801
 
 
 
1802	if (n >= prog->instances.nr || n < 0) {
1803		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
1804			   n, prog->section_name, prog->instances.nr);
1805		return -EINVAL;
1806	}
1807
1808	fd = prog->instances.fds[n];
1809	if (fd < 0) {
1810		pr_warning("%dth instance of program '%s' is invalid\n",
1811			   n, prog->section_name);
1812		return -ENOENT;
1813	}
1814
1815	return fd;
1816}
1817
 
 
 
 
 
1818void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
1819{
1820	prog->type = type;
1821}
1822
1823static bool bpf_program__is_type(struct bpf_program *prog,
1824				 enum bpf_prog_type type)
1825{
1826	return prog ? (prog->type == type) : false;
1827}
1828
1829#define BPF_PROG_TYPE_FNS(NAME, TYPE)			\
1830int bpf_program__set_##NAME(struct bpf_program *prog)	\
1831{							\
1832	if (!prog)					\
1833		return -EINVAL;				\
1834	bpf_program__set_type(prog, TYPE);		\
1835	return 0;					\
1836}							\
1837							\
1838bool bpf_program__is_##NAME(struct bpf_program *prog)	\
1839{							\
1840	return bpf_program__is_type(prog, TYPE);	\
1841}							\
1842
1843BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
 
1844BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
1845BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
1846BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
1847BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
 
1848BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
1849BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
 
 
 
 
1850
1851static void bpf_program__set_expected_attach_type(struct bpf_program *prog,
1852						 enum bpf_attach_type type)
1853{
1854	prog->expected_attach_type = type;
1855}
1856
1857#define BPF_PROG_SEC_FULL(string, ptype, atype) \
1858	{ string, sizeof(string) - 1, ptype, atype }
 
 
 
1859
1860#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1861
1862#define BPF_SA_PROG_SEC(string, ptype) \
1863	BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype)
 
 
1864
1865static const struct {
1866	const char *sec;
1867	size_t len;
1868	enum bpf_prog_type prog_type;
1869	enum bpf_attach_type expected_attach_type;
1870} section_names[] = {
1871	BPF_PROG_SEC("socket",		BPF_PROG_TYPE_SOCKET_FILTER),
1872	BPF_PROG_SEC("kprobe/",		BPF_PROG_TYPE_KPROBE),
1873	BPF_PROG_SEC("kretprobe/",	BPF_PROG_TYPE_KPROBE),
1874	BPF_PROG_SEC("classifier",	BPF_PROG_TYPE_SCHED_CLS),
1875	BPF_PROG_SEC("action",		BPF_PROG_TYPE_SCHED_ACT),
1876	BPF_PROG_SEC("tracepoint/",	BPF_PROG_TYPE_TRACEPOINT),
1877	BPF_PROG_SEC("xdp",		BPF_PROG_TYPE_XDP),
1878	BPF_PROG_SEC("perf_event",	BPF_PROG_TYPE_PERF_EVENT),
1879	BPF_PROG_SEC("cgroup/skb",	BPF_PROG_TYPE_CGROUP_SKB),
1880	BPF_PROG_SEC("cgroup/sock",	BPF_PROG_TYPE_CGROUP_SOCK),
1881	BPF_PROG_SEC("cgroup/dev",	BPF_PROG_TYPE_CGROUP_DEVICE),
1882	BPF_PROG_SEC("lwt_in",		BPF_PROG_TYPE_LWT_IN),
1883	BPF_PROG_SEC("lwt_out",		BPF_PROG_TYPE_LWT_OUT),
1884	BPF_PROG_SEC("lwt_xmit",	BPF_PROG_TYPE_LWT_XMIT),
1885	BPF_PROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS),
1886	BPF_PROG_SEC("sk_skb",		BPF_PROG_TYPE_SK_SKB),
1887	BPF_PROG_SEC("sk_msg",		BPF_PROG_TYPE_SK_MSG),
1888	BPF_SA_PROG_SEC("cgroup/bind4",	BPF_CGROUP_INET4_BIND),
1889	BPF_SA_PROG_SEC("cgroup/bind6",	BPF_CGROUP_INET6_BIND),
1890	BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
1891	BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1892};
1893
 
1894#undef BPF_PROG_SEC
1895#undef BPF_PROG_SEC_FULL
1896#undef BPF_SA_PROG_SEC
 
 
 
 
1897
1898static int bpf_program__identify_section(struct bpf_program *prog)
1899{
1900	int i;
1901
1902	if (!prog->section_name)
1903		goto err;
 
 
 
 
 
 
1904
1905	for (i = 0; i < ARRAY_SIZE(section_names); i++)
1906		if (strncmp(prog->section_name, section_names[i].sec,
1907			    section_names[i].len) == 0)
1908			return i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1909
1910err:
1911	pr_warning("failed to guess program type based on section name %s\n",
1912		   prog->section_name);
1913
1914	return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1915}
1916
1917int bpf_map__fd(struct bpf_map *map)
1918{
1919	return map ? map->fd : -EINVAL;
1920}
1921
1922const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
1923{
1924	return map ? &map->def : ERR_PTR(-EINVAL);
1925}
1926
1927const char *bpf_map__name(struct bpf_map *map)
1928{
1929	return map ? map->name : NULL;
1930}
1931
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1932int bpf_map__set_priv(struct bpf_map *map, void *priv,
1933		     bpf_map_clear_priv_t clear_priv)
1934{
1935	if (!map)
1936		return -EINVAL;
1937
1938	if (map->priv) {
1939		if (map->clear_priv)
1940			map->clear_priv(map, map->priv);
1941	}
1942
1943	map->priv = priv;
1944	map->clear_priv = clear_priv;
1945	return 0;
1946}
1947
1948void *bpf_map__priv(struct bpf_map *map)
1949{
1950	return map ? map->priv : ERR_PTR(-EINVAL);
1951}
1952
1953struct bpf_map *
1954bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1955{
1956	size_t idx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1957	struct bpf_map *s, *e;
1958
1959	if (!obj || !obj->maps)
1960		return NULL;
1961
1962	s = obj->maps;
1963	e = obj->maps + obj->nr_maps;
1964
1965	if (prev == NULL)
1966		return s;
1967
1968	if ((prev < s) || (prev >= e)) {
1969		pr_warning("error in %s: map handler doesn't belong to object\n",
1970			   __func__);
1971		return NULL;
1972	}
1973
1974	idx = (prev - obj->maps) + 1;
1975	if (idx >= obj->nr_maps)
1976		return NULL;
1977	return &obj->maps[idx];
1978}
1979
1980struct bpf_map *
1981bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1982{
1983	struct bpf_map *pos;
1984
1985	bpf_map__for_each(pos, obj) {
1986		if (pos->name && !strcmp(pos->name, name))
1987			return pos;
1988	}
1989	return NULL;
 
 
 
 
 
 
1990}
1991
1992struct bpf_map *
1993bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
1994{
1995	int i;
1996
1997	for (i = 0; i < obj->nr_maps; i++) {
1998		if (obj->maps[i].offset == offset)
1999			return &obj->maps[i];
2000	}
2001	return ERR_PTR(-ENOENT);
2002}
2003
2004long libbpf_get_error(const void *ptr)
2005{
 
 
 
2006	if (IS_ERR(ptr))
2007		return PTR_ERR(ptr);
2008	return 0;
 
 
 
 
 
 
2009}
2010
2011int bpf_prog_load(const char *file, enum bpf_prog_type type,
2012		  struct bpf_object **pobj, int *prog_fd)
2013{
2014	struct bpf_prog_load_attr attr;
2015
2016	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2017	attr.file = file;
2018	attr.prog_type = type;
2019	attr.expected_attach_type = 0;
2020
2021	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2022}
2023
2024int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2025			struct bpf_object **pobj, int *prog_fd)
2026{
 
2027	struct bpf_program *prog, *first_prog = NULL;
2028	enum bpf_attach_type expected_attach_type;
2029	enum bpf_prog_type prog_type;
2030	struct bpf_object *obj;
2031	int section_idx;
2032	int err;
2033
2034	if (!attr)
2035		return -EINVAL;
 
 
2036
2037	obj = bpf_object__open(attr->file);
2038	if (IS_ERR_OR_NULL(obj))
2039		return -ENOENT;
 
 
 
 
2040
2041	bpf_object__for_each_program(prog, obj) {
 
2042		/*
2043		 * If type is not specified, try to guess it based on
2044		 * section name.
 
2045		 */
2046		prog_type = attr->prog_type;
2047		expected_attach_type = attr->expected_attach_type;
2048		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2049			section_idx = bpf_program__identify_section(prog);
2050			if (section_idx < 0) {
2051				bpf_object__close(obj);
2052				return -EINVAL;
2053			}
2054			prog_type = section_names[section_idx].prog_type;
2055			expected_attach_type =
2056				section_names[section_idx].expected_attach_type;
 
2057		}
2058
2059		bpf_program__set_type(prog, prog_type);
2060		bpf_program__set_expected_attach_type(prog,
2061						      expected_attach_type);
2062
2063		if (prog->idx != obj->efile.text_shndx && !first_prog)
2064			first_prog = prog;
2065	}
2066
 
 
 
 
 
2067	if (!first_prog) {
2068		pr_warning("object file doesn't contain bpf program\n");
2069		bpf_object__close(obj);
2070		return -ENOENT;
2071	}
2072
2073	err = bpf_object__load(obj);
2074	if (err) {
2075		bpf_object__close(obj);
2076		return -EINVAL;
2077	}
2078
2079	*pobj = obj;
2080	*prog_fd = bpf_program__fd(first_prog);
2081	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2082}
v5.14.15
    1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
    2
    3/*
    4 * Common eBPF ELF object loading operations.
    5 *
    6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
    7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
    8 * Copyright (C) 2015 Huawei Inc.
    9 * Copyright (C) 2017 Nicira, Inc.
   10 * Copyright (C) 2019 Isovalent, Inc.
 
 
 
 
 
 
 
 
 
 
 
 
   11 */
   12
   13#ifndef _GNU_SOURCE
   14#define _GNU_SOURCE
   15#endif
   16#include <stdlib.h>
   17#include <stdio.h>
   18#include <stdarg.h>
   19#include <libgen.h>
   20#include <inttypes.h>
   21#include <limits.h>
   22#include <string.h>
   23#include <unistd.h>
   24#include <endian.h>
   25#include <fcntl.h>
   26#include <errno.h>
   27#include <ctype.h>
   28#include <asm/unistd.h>
   29#include <linux/err.h>
   30#include <linux/kernel.h>
   31#include <linux/bpf.h>
   32#include <linux/btf.h>
   33#include <linux/filter.h>
   34#include <linux/list.h>
   35#include <linux/limits.h>
   36#include <linux/perf_event.h>
   37#include <linux/ring_buffer.h>
   38#include <linux/version.h>
   39#include <sys/epoll.h>
   40#include <sys/ioctl.h>
   41#include <sys/mman.h>
   42#include <sys/stat.h>
   43#include <sys/types.h>
   44#include <sys/vfs.h>
   45#include <sys/utsname.h>
   46#include <sys/resource.h>
   47#include <libelf.h>
   48#include <gelf.h>
   49#include <zlib.h>
   50
   51#include "libbpf.h"
   52#include "bpf.h"
   53#include "btf.h"
   54#include "str_error.h"
   55#include "libbpf_internal.h"
   56#include "hashmap.h"
   57#include "bpf_gen_internal.h"
   58
   59#ifndef BPF_FS_MAGIC
   60#define BPF_FS_MAGIC		0xcafe4a11
   61#endif
   62
   63#define BPF_INSN_SZ (sizeof(struct bpf_insn))
   64
   65/* vsprintf() in __base_pr() uses nonliteral format string. It may break
   66 * compilation if user enables corresponding warning. Disable it explicitly.
   67 */
   68#pragma GCC diagnostic ignored "-Wformat-nonliteral"
   69
   70#define __printf(a, b)	__attribute__((format(printf, a, b)))
   71
   72static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
   73static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
   74
   75static int __base_pr(enum libbpf_print_level level, const char *format,
   76		     va_list args)
   77{
   78	if (level == LIBBPF_DEBUG)
   79		return 0;
   80
   81	return vfprintf(stderr, format, args);
 
 
 
   82}
   83
   84static libbpf_print_fn_t __libbpf_pr = __base_pr;
   85
   86libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
   87{
   88	libbpf_print_fn_t old_print_fn = __libbpf_pr;
   89
   90	__libbpf_pr = fn;
   91	return old_print_fn;
 
 
 
 
 
 
 
 
 
 
 
 
 
   92}
   93
   94__printf(2, 3)
   95void libbpf_print(enum libbpf_print_level level, const char *format, ...)
   96{
   97	va_list args;
   98
   99	if (!__libbpf_pr)
  100		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  101
  102	va_start(args, format);
  103	__libbpf_pr(level, format, args);
  104	va_end(args);
  105}
  106
  107static void pr_perm_msg(int err)
  108{
  109	struct rlimit limit;
  110	char buf[100];
  111
  112	if (err != -EPERM || geteuid() != 0)
  113		return;
 
 
  114
  115	err = getrlimit(RLIMIT_MEMLOCK, &limit);
  116	if (err)
  117		return;
  118
  119	if (limit.rlim_cur == RLIM_INFINITY)
  120		return;
 
 
 
  121
  122	if (limit.rlim_cur < 1024)
  123		snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
  124	else if (limit.rlim_cur < 1024*1024)
  125		snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
  126	else
  127		snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
  128
  129	pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
  130		buf);
  131}
 
 
  132
  133#define STRERR_BUFSIZE  128
  134
  135/* Copied from tools/perf/util/util.h */
  136#ifndef zfree
  137# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
  138#endif
  139
  140#ifndef zclose
  141# define zclose(fd) ({			\
  142	int ___err = 0;			\
  143	if ((fd) >= 0)			\
  144		___err = close((fd));	\
  145	fd = -1;			\
  146	___err; })
  147#endif
  148
  149static inline __u64 ptr_to_u64(const void *ptr)
  150{
  151	return (__u64) (unsigned long) ptr;
  152}
  153
  154/* this goes away in libbpf 1.0 */
  155enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE;
  156
  157int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
  158{
  159	/* __LIBBPF_STRICT_LAST is the last power-of-2 value used + 1, so to
  160	 * get all possible values we compensate last +1, and then (2*x - 1)
  161	 * to get the bit mask
  162	 */
  163	if (mode != LIBBPF_STRICT_ALL
  164	    && (mode & ~((__LIBBPF_STRICT_LAST - 1) * 2 - 1)))
  165		return errno = EINVAL, -EINVAL;
  166
  167	libbpf_mode = mode;
  168	return 0;
  169}
  170
  171enum kern_feature_id {
  172	/* v4.14: kernel support for program & map names. */
  173	FEAT_PROG_NAME,
  174	/* v5.2: kernel support for global data sections. */
  175	FEAT_GLOBAL_DATA,
  176	/* BTF support */
  177	FEAT_BTF,
  178	/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
  179	FEAT_BTF_FUNC,
  180	/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
  181	FEAT_BTF_DATASEC,
  182	/* BTF_FUNC_GLOBAL is supported */
  183	FEAT_BTF_GLOBAL_FUNC,
  184	/* BPF_F_MMAPABLE is supported for arrays */
  185	FEAT_ARRAY_MMAP,
  186	/* kernel support for expected_attach_type in BPF_PROG_LOAD */
  187	FEAT_EXP_ATTACH_TYPE,
  188	/* bpf_probe_read_{kernel,user}[_str] helpers */
  189	FEAT_PROBE_READ_KERN,
  190	/* BPF_PROG_BIND_MAP is supported */
  191	FEAT_PROG_BIND_MAP,
  192	/* Kernel support for module BTFs */
  193	FEAT_MODULE_BTF,
  194	/* BTF_KIND_FLOAT support */
  195	FEAT_BTF_FLOAT,
  196	__FEAT_CNT,
  197};
  198
  199static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
  200
  201enum reloc_type {
  202	RELO_LD64,
  203	RELO_CALL,
  204	RELO_DATA,
  205	RELO_EXTERN_VAR,
  206	RELO_EXTERN_FUNC,
  207	RELO_SUBPROG_ADDR,
  208};
  209
  210struct reloc_desc {
  211	enum reloc_type type;
  212	int insn_idx;
  213	int map_idx;
  214	int sym_off;
  215};
  216
  217struct bpf_sec_def;
  218
  219typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
  220					struct bpf_program *prog);
  221
  222struct bpf_sec_def {
  223	const char *sec;
  224	size_t len;
  225	enum bpf_prog_type prog_type;
  226	enum bpf_attach_type expected_attach_type;
  227	bool is_exp_attach_type_optional;
  228	bool is_attachable;
  229	bool is_attach_btf;
  230	bool is_sleepable;
  231	attach_fn_t attach_fn;
  232};
  233
  234/*
  235 * bpf_prog should be a better name but it has been used in
  236 * linux/filter.h.
  237 */
  238struct bpf_program {
  239	const struct bpf_sec_def *sec_def;
  240	char *sec_name;
  241	size_t sec_idx;
  242	/* this program's instruction offset (in number of instructions)
  243	 * within its containing ELF section
  244	 */
  245	size_t sec_insn_off;
  246	/* number of original instructions in ELF section belonging to this
  247	 * program, not taking into account subprogram instructions possible
  248	 * appended later during relocation
  249	 */
  250	size_t sec_insn_cnt;
  251	/* Offset (in number of instructions) of the start of instruction
  252	 * belonging to this BPF program  within its containing main BPF
  253	 * program. For the entry-point (main) BPF program, this is always
  254	 * zero. For a sub-program, this gets reset before each of main BPF
  255	 * programs are processed and relocated and is used to determined
  256	 * whether sub-program was already appended to the main program, and
  257	 * if yes, at which instruction offset.
  258	 */
  259	size_t sub_insn_off;
  260
  261	char *name;
  262	/* sec_name with / replaced by _; makes recursive pinning
  263	 * in bpf_object__pin_programs easier
  264	 */
  265	char *pin_name;
  266
  267	/* instructions that belong to BPF program; insns[0] is located at
  268	 * sec_insn_off instruction within its ELF section in ELF file, so
  269	 * when mapping ELF file instruction index to the local instruction,
  270	 * one needs to subtract sec_insn_off; and vice versa.
  271	 */
  272	struct bpf_insn *insns;
  273	/* actual number of instruction in this BPF program's image; for
  274	 * entry-point BPF programs this includes the size of main program
  275	 * itself plus all the used sub-programs, appended at the end
  276	 */
  277	size_t insns_cnt;
  278
  279	struct reloc_desc *reloc_desc;
 
 
 
 
 
 
 
 
 
 
  280	int nr_reloc;
  281	int log_level;
  282
  283	struct {
  284		int nr;
  285		int *fds;
  286	} instances;
  287	bpf_program_prep_t preprocessor;
  288
  289	struct bpf_object *obj;
  290	void *priv;
  291	bpf_program_clear_priv_t clear_priv;
  292
  293	bool load;
  294	bool mark_btf_static;
  295	enum bpf_prog_type type;
  296	enum bpf_attach_type expected_attach_type;
  297	int prog_ifindex;
  298	__u32 attach_btf_obj_fd;
  299	__u32 attach_btf_id;
  300	__u32 attach_prog_fd;
  301	void *func_info;
  302	__u32 func_info_rec_size;
  303	__u32 func_info_cnt;
  304
  305	void *line_info;
  306	__u32 line_info_rec_size;
  307	__u32 line_info_cnt;
  308	__u32 prog_flags;
  309};
  310
  311struct bpf_struct_ops {
  312	const char *tname;
  313	const struct btf_type *type;
  314	struct bpf_program **progs;
  315	__u32 *kern_func_off;
  316	/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
  317	void *data;
  318	/* e.g. struct bpf_struct_ops_tcp_congestion_ops in
  319	 *      btf_vmlinux's format.
  320	 * struct bpf_struct_ops_tcp_congestion_ops {
  321	 *	[... some other kernel fields ...]
  322	 *	struct tcp_congestion_ops data;
  323	 * }
  324	 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
  325	 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
  326	 * from "data".
  327	 */
  328	void *kern_vdata;
  329	__u32 type_id;
  330};
  331
  332#define DATA_SEC ".data"
  333#define BSS_SEC ".bss"
  334#define RODATA_SEC ".rodata"
  335#define KCONFIG_SEC ".kconfig"
  336#define KSYMS_SEC ".ksyms"
  337#define STRUCT_OPS_SEC ".struct_ops"
  338
  339enum libbpf_map_type {
  340	LIBBPF_MAP_UNSPEC,
  341	LIBBPF_MAP_DATA,
  342	LIBBPF_MAP_BSS,
  343	LIBBPF_MAP_RODATA,
  344	LIBBPF_MAP_KCONFIG,
  345};
  346
  347static const char * const libbpf_type_to_btf_name[] = {
  348	[LIBBPF_MAP_DATA]	= DATA_SEC,
  349	[LIBBPF_MAP_BSS]	= BSS_SEC,
  350	[LIBBPF_MAP_RODATA]	= RODATA_SEC,
  351	[LIBBPF_MAP_KCONFIG]	= KCONFIG_SEC,
  352};
  353
  354struct bpf_map {
 
  355	char *name;
  356	int fd;
  357	int sec_idx;
  358	size_t sec_offset;
  359	int map_ifindex;
  360	int inner_map_fd;
  361	struct bpf_map_def def;
  362	__u32 numa_node;
  363	__u32 btf_var_idx;
  364	__u32 btf_key_type_id;
  365	__u32 btf_value_type_id;
  366	__u32 btf_vmlinux_value_type_id;
  367	void *priv;
  368	bpf_map_clear_priv_t clear_priv;
  369	enum libbpf_map_type libbpf_type;
  370	void *mmaped;
  371	struct bpf_struct_ops *st_ops;
  372	struct bpf_map *inner_map;
  373	void **init_slots;
  374	int init_slots_sz;
  375	char *pin_path;
  376	bool pinned;
  377	bool reused;
  378};
  379
  380enum extern_type {
  381	EXT_UNKNOWN,
  382	EXT_KCFG,
  383	EXT_KSYM,
  384};
  385
  386enum kcfg_type {
  387	KCFG_UNKNOWN,
  388	KCFG_CHAR,
  389	KCFG_BOOL,
  390	KCFG_INT,
  391	KCFG_TRISTATE,
  392	KCFG_CHAR_ARR,
  393};
  394
  395struct extern_desc {
  396	enum extern_type type;
  397	int sym_idx;
  398	int btf_id;
  399	int sec_btf_id;
  400	const char *name;
  401	bool is_set;
  402	bool is_weak;
  403	union {
  404		struct {
  405			enum kcfg_type type;
  406			int sz;
  407			int align;
  408			int data_off;
  409			bool is_signed;
  410		} kcfg;
  411		struct {
  412			unsigned long long addr;
  413
  414			/* target btf_id of the corresponding kernel var. */
  415			int kernel_btf_obj_fd;
  416			int kernel_btf_id;
  417
  418			/* local btf_id of the ksym extern's type. */
  419			__u32 type_id;
  420		} ksym;
  421	};
  422};
  423
  424static LIST_HEAD(bpf_objects_list);
  425
  426struct module_btf {
  427	struct btf *btf;
  428	char *name;
  429	__u32 id;
  430	int fd;
  431};
  432
  433struct bpf_object {
  434	char name[BPF_OBJ_NAME_LEN];
  435	char license[64];
  436	__u32 kern_version;
  437
  438	struct bpf_program *programs;
  439	size_t nr_programs;
  440	struct bpf_map *maps;
  441	size_t nr_maps;
  442	size_t maps_cap;
  443
  444	char *kconfig;
  445	struct extern_desc *externs;
  446	int nr_extern;
  447	int kconfig_map_idx;
  448	int rodata_map_idx;
  449
  450	bool loaded;
  451	bool has_subcalls;
  452
  453	struct bpf_gen *gen_loader;
  454
  455	/*
  456	 * Information when doing elf related work. Only valid if fd
  457	 * is valid.
  458	 */
  459	struct {
  460		int fd;
  461		const void *obj_buf;
  462		size_t obj_buf_sz;
  463		Elf *elf;
  464		GElf_Ehdr ehdr;
  465		Elf_Data *symbols;
  466		Elf_Data *data;
  467		Elf_Data *rodata;
  468		Elf_Data *bss;
  469		Elf_Data *st_ops_data;
  470		size_t shstrndx; /* section index for section name strings */
  471		size_t strtabidx;
  472		struct {
  473			GElf_Shdr shdr;
  474			Elf_Data *data;
  475		} *reloc_sects;
  476		int nr_reloc_sects;
  477		int maps_shndx;
  478		int btf_maps_shndx;
  479		__u32 btf_maps_sec_btf_id;
  480		int text_shndx;
  481		int symbols_shndx;
  482		int data_shndx;
  483		int rodata_shndx;
  484		int bss_shndx;
  485		int st_ops_shndx;
  486	} efile;
  487	/*
  488	 * All loaded bpf_object is linked in a list, which is
  489	 * hidden to caller. bpf_objects__<func> handlers deal with
  490	 * all objects.
  491	 */
  492	struct list_head list;
  493
  494	struct btf *btf;
  495	struct btf_ext *btf_ext;
  496
  497	/* Parse and load BTF vmlinux if any of the programs in the object need
  498	 * it at load time.
  499	 */
  500	struct btf *btf_vmlinux;
  501	/* vmlinux BTF override for CO-RE relocations */
  502	struct btf *btf_vmlinux_override;
  503	/* Lazily initialized kernel module BTFs */
  504	struct module_btf *btf_modules;
  505	bool btf_modules_loaded;
  506	size_t btf_module_cnt;
  507	size_t btf_module_cap;
  508
  509	void *priv;
  510	bpf_object_clear_priv_t clear_priv;
  511
  512	char path[];
  513};
  514#define obj_elf_valid(o)	((o)->efile.elf)
  515
  516static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
  517static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
  518static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
  519static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
  520static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
  521static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
  522static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
  523
  524void bpf_program__unload(struct bpf_program *prog)
  525{
  526	int i;
  527
  528	if (!prog)
  529		return;
  530
  531	/*
  532	 * If the object is opened but the program was never loaded,
  533	 * it is possible that prog->instances.nr == -1.
  534	 */
  535	if (prog->instances.nr > 0) {
  536		for (i = 0; i < prog->instances.nr; i++)
  537			zclose(prog->instances.fds[i]);
  538	} else if (prog->instances.nr != -1) {
  539		pr_warn("Internal error: instances.nr is %d\n",
  540			prog->instances.nr);
  541	}
  542
  543	prog->instances.nr = -1;
  544	zfree(&prog->instances.fds);
  545
  546	zfree(&prog->func_info);
  547	zfree(&prog->line_info);
  548}
  549
  550static void bpf_program__exit(struct bpf_program *prog)
  551{
  552	if (!prog)
  553		return;
  554
  555	if (prog->clear_priv)
  556		prog->clear_priv(prog, prog->priv);
  557
  558	prog->priv = NULL;
  559	prog->clear_priv = NULL;
  560
  561	bpf_program__unload(prog);
  562	zfree(&prog->name);
  563	zfree(&prog->sec_name);
  564	zfree(&prog->pin_name);
  565	zfree(&prog->insns);
  566	zfree(&prog->reloc_desc);
  567
  568	prog->nr_reloc = 0;
  569	prog->insns_cnt = 0;
  570	prog->sec_idx = -1;
  571}
  572
  573static char *__bpf_program__pin_name(struct bpf_program *prog)
  574{
  575	char *name, *p;
  576
  577	name = p = strdup(prog->sec_name);
  578	while ((p = strchr(p, '/')))
  579		*p = '_';
  580
  581	return name;
  582}
  583
  584static bool insn_is_subprog_call(const struct bpf_insn *insn)
  585{
  586	return BPF_CLASS(insn->code) == BPF_JMP &&
  587	       BPF_OP(insn->code) == BPF_CALL &&
  588	       BPF_SRC(insn->code) == BPF_K &&
  589	       insn->src_reg == BPF_PSEUDO_CALL &&
  590	       insn->dst_reg == 0 &&
  591	       insn->off == 0;
  592}
  593
  594static bool is_ldimm64_insn(struct bpf_insn *insn)
  595{
  596	return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
  597}
  598
  599static bool is_call_insn(const struct bpf_insn *insn)
  600{
  601	return insn->code == (BPF_JMP | BPF_CALL);
  602}
  603
  604static bool insn_is_pseudo_func(struct bpf_insn *insn)
  605{
  606	return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
  607}
  608
  609static int
  610bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
  611		      const char *name, size_t sec_idx, const char *sec_name,
  612		      size_t sec_off, void *insn_data, size_t insn_data_sz)
  613{
  614	if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
  615		pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
  616			sec_name, name, sec_off, insn_data_sz);
  617		return -EINVAL;
  618	}
  619
  620	memset(prog, 0, sizeof(*prog));
  621	prog->obj = obj;
  622
  623	prog->sec_idx = sec_idx;
  624	prog->sec_insn_off = sec_off / BPF_INSN_SZ;
  625	prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
  626	/* insns_cnt can later be increased by appending used subprograms */
  627	prog->insns_cnt = prog->sec_insn_cnt;
  628
  629	prog->type = BPF_PROG_TYPE_UNSPEC;
  630	prog->load = true;
  631
 
 
 
 
 
 
 
 
 
 
  632	prog->instances.fds = NULL;
  633	prog->instances.nr = -1;
  634
  635	prog->sec_name = strdup(sec_name);
  636	if (!prog->sec_name)
  637		goto errout;
  638
  639	prog->name = strdup(name);
  640	if (!prog->name)
  641		goto errout;
  642
  643	prog->pin_name = __bpf_program__pin_name(prog);
  644	if (!prog->pin_name)
  645		goto errout;
  646
  647	prog->insns = malloc(insn_data_sz);
  648	if (!prog->insns)
  649		goto errout;
  650	memcpy(prog->insns, insn_data, insn_data_sz);
  651
  652	return 0;
  653errout:
  654	pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
  655	bpf_program__exit(prog);
  656	return -ENOMEM;
  657}
  658
  659static int
  660bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
  661			 const char *sec_name, int sec_idx)
  662{
  663	Elf_Data *symbols = obj->efile.symbols;
  664	struct bpf_program *prog, *progs;
  665	void *data = sec_data->d_buf;
  666	size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
  667	int nr_progs, err, i;
  668	const char *name;
  669	GElf_Sym sym;
  670
  671	progs = obj->programs;
  672	nr_progs = obj->nr_programs;
  673	nr_syms = symbols->d_size / sizeof(GElf_Sym);
  674	sec_off = 0;
  675
  676	for (i = 0; i < nr_syms; i++) {
  677		if (!gelf_getsym(symbols, i, &sym))
  678			continue;
  679		if (sym.st_shndx != sec_idx)
  680			continue;
  681		if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
  682			continue;
  683
  684		prog_sz = sym.st_size;
  685		sec_off = sym.st_value;
  686
  687		name = elf_sym_str(obj, sym.st_name);
  688		if (!name) {
  689			pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
  690				sec_name, sec_off);
  691			return -LIBBPF_ERRNO__FORMAT;
  692		}
  693
  694		if (sec_off + prog_sz > sec_sz) {
  695			pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
  696				sec_name, sec_off);
  697			return -LIBBPF_ERRNO__FORMAT;
  698		}
  699
  700		if (sec_idx != obj->efile.text_shndx && GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
  701			pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
  702			return -ENOTSUP;
  703		}
  704
  705		pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
  706			 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
  707
  708		progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
  709		if (!progs) {
  710			/*
  711			 * In this case the original obj->programs
  712			 * is still valid, so don't need special treat for
  713			 * bpf_close_object().
  714			 */
  715			pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
  716				sec_name, name);
  717			return -ENOMEM;
  718		}
  719		obj->programs = progs;
  720
  721		prog = &progs[nr_progs];
  722
  723		err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
  724					    sec_off, data + sec_off, prog_sz);
  725		if (err)
  726			return err;
  727
  728		/* if function is a global/weak symbol, but has restricted
  729		 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
  730		 * as static to enable more permissive BPF verification mode
  731		 * with more outside context available to BPF verifier
  732		 */
  733		if (GELF_ST_BIND(sym.st_info) != STB_LOCAL
  734		    && (GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN
  735			|| GELF_ST_VISIBILITY(sym.st_other) == STV_INTERNAL))
  736			prog->mark_btf_static = true;
  737
  738		nr_progs++;
  739		obj->nr_programs = nr_progs;
  740	}
  741
 
 
 
 
 
  742	return 0;
  743}
  744
  745static __u32 get_kernel_version(void)
  746{
  747	__u32 major, minor, patch;
  748	struct utsname info;
  749
  750	uname(&info);
  751	if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
  752		return 0;
  753	return KERNEL_VERSION(major, minor, patch);
  754}
  755
  756static const struct btf_member *
  757find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
  758{
  759	struct btf_member *m;
  760	int i;
  761
  762	for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
  763		if (btf_member_bit_offset(t, i) == bit_offset)
  764			return m;
  765	}
  766
  767	return NULL;
  768}
  769
  770static const struct btf_member *
  771find_member_by_name(const struct btf *btf, const struct btf_type *t,
  772		    const char *name)
  773{
  774	struct btf_member *m;
  775	int i;
  776
  777	for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
  778		if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
  779			return m;
  780	}
  781
  782	return NULL;
  783}
  784
  785#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
  786static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
  787				   const char *name, __u32 kind);
  788
  789static int
  790find_struct_ops_kern_types(const struct btf *btf, const char *tname,
  791			   const struct btf_type **type, __u32 *type_id,
  792			   const struct btf_type **vtype, __u32 *vtype_id,
  793			   const struct btf_member **data_member)
  794{
  795	const struct btf_type *kern_type, *kern_vtype;
  796	const struct btf_member *kern_data_member;
  797	__s32 kern_vtype_id, kern_type_id;
  798	__u32 i;
  799
  800	kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
  801	if (kern_type_id < 0) {
  802		pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
  803			tname);
  804		return kern_type_id;
  805	}
  806	kern_type = btf__type_by_id(btf, kern_type_id);
  807
  808	/* Find the corresponding "map_value" type that will be used
  809	 * in map_update(BPF_MAP_TYPE_STRUCT_OPS).  For example,
  810	 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
  811	 * btf_vmlinux.
  812	 */
  813	kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
  814						tname, BTF_KIND_STRUCT);
  815	if (kern_vtype_id < 0) {
  816		pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
  817			STRUCT_OPS_VALUE_PREFIX, tname);
  818		return kern_vtype_id;
  819	}
  820	kern_vtype = btf__type_by_id(btf, kern_vtype_id);
  821
  822	/* Find "struct tcp_congestion_ops" from
  823	 * struct bpf_struct_ops_tcp_congestion_ops {
  824	 *	[ ... ]
  825	 *	struct tcp_congestion_ops data;
  826	 * }
  827	 */
  828	kern_data_member = btf_members(kern_vtype);
  829	for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
  830		if (kern_data_member->type == kern_type_id)
  831			break;
  832	}
  833	if (i == btf_vlen(kern_vtype)) {
  834		pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
  835			tname, STRUCT_OPS_VALUE_PREFIX, tname);
  836		return -EINVAL;
  837	}
  838
  839	*type = kern_type;
  840	*type_id = kern_type_id;
  841	*vtype = kern_vtype;
  842	*vtype_id = kern_vtype_id;
  843	*data_member = kern_data_member;
  844
  845	return 0;
  846}
  847
  848static bool bpf_map__is_struct_ops(const struct bpf_map *map)
  849{
  850	return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
  851}
  852
  853/* Init the map's fields that depend on kern_btf */
  854static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
  855					 const struct btf *btf,
  856					 const struct btf *kern_btf)
  857{
  858	const struct btf_member *member, *kern_member, *kern_data_member;
  859	const struct btf_type *type, *kern_type, *kern_vtype;
  860	__u32 i, kern_type_id, kern_vtype_id, kern_data_off;
  861	struct bpf_struct_ops *st_ops;
  862	void *data, *kern_data;
  863	const char *tname;
  864	int err;
  865
  866	st_ops = map->st_ops;
  867	type = st_ops->type;
  868	tname = st_ops->tname;
  869	err = find_struct_ops_kern_types(kern_btf, tname,
  870					 &kern_type, &kern_type_id,
  871					 &kern_vtype, &kern_vtype_id,
  872					 &kern_data_member);
  873	if (err)
  874		return err;
  875
  876	pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
  877		 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
  878
  879	map->def.value_size = kern_vtype->size;
  880	map->btf_vmlinux_value_type_id = kern_vtype_id;
  881
  882	st_ops->kern_vdata = calloc(1, kern_vtype->size);
  883	if (!st_ops->kern_vdata)
  884		return -ENOMEM;
  885
  886	data = st_ops->data;
  887	kern_data_off = kern_data_member->offset / 8;
  888	kern_data = st_ops->kern_vdata + kern_data_off;
  889
  890	member = btf_members(type);
  891	for (i = 0; i < btf_vlen(type); i++, member++) {
  892		const struct btf_type *mtype, *kern_mtype;
  893		__u32 mtype_id, kern_mtype_id;
  894		void *mdata, *kern_mdata;
  895		__s64 msize, kern_msize;
  896		__u32 moff, kern_moff;
  897		__u32 kern_member_idx;
  898		const char *mname;
  899
  900		mname = btf__name_by_offset(btf, member->name_off);
  901		kern_member = find_member_by_name(kern_btf, kern_type, mname);
  902		if (!kern_member) {
  903			pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
  904				map->name, mname);
  905			return -ENOTSUP;
  906		}
  907
  908		kern_member_idx = kern_member - btf_members(kern_type);
  909		if (btf_member_bitfield_size(type, i) ||
  910		    btf_member_bitfield_size(kern_type, kern_member_idx)) {
  911			pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
  912				map->name, mname);
  913			return -ENOTSUP;
  914		}
  915
  916		moff = member->offset / 8;
  917		kern_moff = kern_member->offset / 8;
  918
  919		mdata = data + moff;
  920		kern_mdata = kern_data + kern_moff;
  921
  922		mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
  923		kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
  924						    &kern_mtype_id);
  925		if (BTF_INFO_KIND(mtype->info) !=
  926		    BTF_INFO_KIND(kern_mtype->info)) {
  927			pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
  928				map->name, mname, BTF_INFO_KIND(mtype->info),
  929				BTF_INFO_KIND(kern_mtype->info));
  930			return -ENOTSUP;
  931		}
  932
  933		if (btf_is_ptr(mtype)) {
  934			struct bpf_program *prog;
  935
  936			prog = st_ops->progs[i];
  937			if (!prog)
  938				continue;
  939
  940			kern_mtype = skip_mods_and_typedefs(kern_btf,
  941							    kern_mtype->type,
  942							    &kern_mtype_id);
  943
  944			/* mtype->type must be a func_proto which was
  945			 * guaranteed in bpf_object__collect_st_ops_relos(),
  946			 * so only check kern_mtype for func_proto here.
  947			 */
  948			if (!btf_is_func_proto(kern_mtype)) {
  949				pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
  950					map->name, mname);
  951				return -ENOTSUP;
  952			}
  953
  954			prog->attach_btf_id = kern_type_id;
  955			prog->expected_attach_type = kern_member_idx;
  956
  957			st_ops->kern_func_off[i] = kern_data_off + kern_moff;
  958
  959			pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
  960				 map->name, mname, prog->name, moff,
  961				 kern_moff);
  962
  963			continue;
  964		}
  965
  966		msize = btf__resolve_size(btf, mtype_id);
  967		kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
  968		if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
  969			pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
  970				map->name, mname, (ssize_t)msize,
  971				(ssize_t)kern_msize);
  972			return -ENOTSUP;
  973		}
  974
  975		pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
  976			 map->name, mname, (unsigned int)msize,
  977			 moff, kern_moff);
  978		memcpy(kern_mdata, mdata, msize);
  979	}
  980
  981	return 0;
  982}
  983
  984static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
  985{
  986	struct bpf_map *map;
  987	size_t i;
  988	int err;
  989
  990	for (i = 0; i < obj->nr_maps; i++) {
  991		map = &obj->maps[i];
  992
  993		if (!bpf_map__is_struct_ops(map))
  994			continue;
  995
  996		err = bpf_map__init_kern_struct_ops(map, obj->btf,
  997						    obj->btf_vmlinux);
  998		if (err)
  999			return err;
 1000	}
 1001
 1002	return 0;
 1003}
 1004
 1005static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
 1006{
 1007	const struct btf_type *type, *datasec;
 1008	const struct btf_var_secinfo *vsi;
 1009	struct bpf_struct_ops *st_ops;
 1010	const char *tname, *var_name;
 1011	__s32 type_id, datasec_id;
 1012	const struct btf *btf;
 1013	struct bpf_map *map;
 1014	__u32 i;
 1015
 1016	if (obj->efile.st_ops_shndx == -1)
 1017		return 0;
 1018
 1019	btf = obj->btf;
 1020	datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
 1021					    BTF_KIND_DATASEC);
 1022	if (datasec_id < 0) {
 1023		pr_warn("struct_ops init: DATASEC %s not found\n",
 1024			STRUCT_OPS_SEC);
 1025		return -EINVAL;
 1026	}
 1027
 1028	datasec = btf__type_by_id(btf, datasec_id);
 1029	vsi = btf_var_secinfos(datasec);
 1030	for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
 1031		type = btf__type_by_id(obj->btf, vsi->type);
 1032		var_name = btf__name_by_offset(obj->btf, type->name_off);
 1033
 1034		type_id = btf__resolve_type(obj->btf, vsi->type);
 1035		if (type_id < 0) {
 1036			pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
 1037				vsi->type, STRUCT_OPS_SEC);
 1038			return -EINVAL;
 1039		}
 1040
 1041		type = btf__type_by_id(obj->btf, type_id);
 1042		tname = btf__name_by_offset(obj->btf, type->name_off);
 1043		if (!tname[0]) {
 1044			pr_warn("struct_ops init: anonymous type is not supported\n");
 1045			return -ENOTSUP;
 1046		}
 1047		if (!btf_is_struct(type)) {
 1048			pr_warn("struct_ops init: %s is not a struct\n", tname);
 1049			return -EINVAL;
 1050		}
 1051
 1052		map = bpf_object__add_map(obj);
 1053		if (IS_ERR(map))
 1054			return PTR_ERR(map);
 1055
 1056		map->sec_idx = obj->efile.st_ops_shndx;
 1057		map->sec_offset = vsi->offset;
 1058		map->name = strdup(var_name);
 1059		if (!map->name)
 1060			return -ENOMEM;
 1061
 1062		map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
 1063		map->def.key_size = sizeof(int);
 1064		map->def.value_size = type->size;
 1065		map->def.max_entries = 1;
 1066
 1067		map->st_ops = calloc(1, sizeof(*map->st_ops));
 1068		if (!map->st_ops)
 1069			return -ENOMEM;
 1070		st_ops = map->st_ops;
 1071		st_ops->data = malloc(type->size);
 1072		st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
 1073		st_ops->kern_func_off = malloc(btf_vlen(type) *
 1074					       sizeof(*st_ops->kern_func_off));
 1075		if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
 1076			return -ENOMEM;
 1077
 1078		if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
 1079			pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
 1080				var_name, STRUCT_OPS_SEC);
 1081			return -EINVAL;
 1082		}
 1083
 1084		memcpy(st_ops->data,
 1085		       obj->efile.st_ops_data->d_buf + vsi->offset,
 1086		       type->size);
 1087		st_ops->tname = tname;
 1088		st_ops->type = type;
 1089		st_ops->type_id = type_id;
 1090
 1091		pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
 1092			 tname, type_id, var_name, vsi->offset);
 1093	}
 1094
 1095	return 0;
 1096}
 1097
 1098static struct bpf_object *bpf_object__new(const char *path,
 1099					  const void *obj_buf,
 1100					  size_t obj_buf_sz,
 1101					  const char *obj_name)
 1102{
 1103	struct bpf_object *obj;
 1104	char *end;
 1105
 1106	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
 1107	if (!obj) {
 1108		pr_warn("alloc memory failed for %s\n", path);
 1109		return ERR_PTR(-ENOMEM);
 1110	}
 1111
 1112	strcpy(obj->path, path);
 1113	if (obj_name) {
 1114		strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
 1115		obj->name[sizeof(obj->name) - 1] = 0;
 1116	} else {
 1117		/* Using basename() GNU version which doesn't modify arg. */
 1118		strncpy(obj->name, basename((void *)path),
 1119			sizeof(obj->name) - 1);
 1120		end = strchr(obj->name, '.');
 1121		if (end)
 1122			*end = 0;
 1123	}
 1124
 1125	obj->efile.fd = -1;
 1126	/*
 1127	 * Caller of this function should also call
 1128	 * bpf_object__elf_finish() after data collection to return
 1129	 * obj_buf to user. If not, we should duplicate the buffer to
 1130	 * avoid user freeing them before elf finish.
 1131	 */
 1132	obj->efile.obj_buf = obj_buf;
 1133	obj->efile.obj_buf_sz = obj_buf_sz;
 1134	obj->efile.maps_shndx = -1;
 1135	obj->efile.btf_maps_shndx = -1;
 1136	obj->efile.data_shndx = -1;
 1137	obj->efile.rodata_shndx = -1;
 1138	obj->efile.bss_shndx = -1;
 1139	obj->efile.st_ops_shndx = -1;
 1140	obj->kconfig_map_idx = -1;
 1141	obj->rodata_map_idx = -1;
 1142
 1143	obj->kern_version = get_kernel_version();
 1144	obj->loaded = false;
 1145
 1146	INIT_LIST_HEAD(&obj->list);
 1147	list_add(&obj->list, &bpf_objects_list);
 1148	return obj;
 1149}
 1150
 1151static void bpf_object__elf_finish(struct bpf_object *obj)
 1152{
 1153	if (!obj_elf_valid(obj))
 1154		return;
 1155
 1156	if (obj->efile.elf) {
 1157		elf_end(obj->efile.elf);
 1158		obj->efile.elf = NULL;
 1159	}
 1160	obj->efile.symbols = NULL;
 1161	obj->efile.data = NULL;
 1162	obj->efile.rodata = NULL;
 1163	obj->efile.bss = NULL;
 1164	obj->efile.st_ops_data = NULL;
 1165
 1166	zfree(&obj->efile.reloc_sects);
 1167	obj->efile.nr_reloc_sects = 0;
 1168	zclose(obj->efile.fd);
 1169	obj->efile.obj_buf = NULL;
 1170	obj->efile.obj_buf_sz = 0;
 1171}
 1172
 1173static int bpf_object__elf_init(struct bpf_object *obj)
 1174{
 1175	int err = 0;
 1176	GElf_Ehdr *ep;
 1177
 1178	if (obj_elf_valid(obj)) {
 1179		pr_warn("elf: init internal error\n");
 1180		return -LIBBPF_ERRNO__LIBELF;
 1181	}
 1182
 1183	if (obj->efile.obj_buf_sz > 0) {
 1184		/*
 1185		 * obj_buf should have been validated by
 1186		 * bpf_object__open_buffer().
 1187		 */
 1188		obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
 1189					    obj->efile.obj_buf_sz);
 1190	} else {
 1191		obj->efile.fd = open(obj->path, O_RDONLY);
 1192		if (obj->efile.fd < 0) {
 1193			char errmsg[STRERR_BUFSIZE], *cp;
 1194
 1195			err = -errno;
 1196			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
 1197			pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
 1198			return err;
 1199		}
 1200
 1201		obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
 
 
 1202	}
 1203
 1204	if (!obj->efile.elf) {
 1205		pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
 
 1206		err = -LIBBPF_ERRNO__LIBELF;
 1207		goto errout;
 1208	}
 1209
 1210	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
 1211		pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
 
 1212		err = -LIBBPF_ERRNO__FORMAT;
 1213		goto errout;
 1214	}
 1215	ep = &obj->efile.ehdr;
 1216
 1217	if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
 1218		pr_warn("elf: failed to get section names section index for %s: %s\n",
 1219			obj->path, elf_errmsg(-1));
 1220		err = -LIBBPF_ERRNO__FORMAT;
 1221		goto errout;
 1222	}
 1223
 1224	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
 1225	if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
 1226		pr_warn("elf: failed to get section names strings from %s: %s\n",
 1227			obj->path, elf_errmsg(-1));
 1228		err = -LIBBPF_ERRNO__FORMAT;
 1229		goto errout;
 1230	}
 1231
 1232	/* Old LLVM set e_machine to EM_NONE */
 1233	if (ep->e_type != ET_REL ||
 1234	    (ep->e_machine && ep->e_machine != EM_BPF)) {
 1235		pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
 1236		err = -LIBBPF_ERRNO__FORMAT;
 1237		goto errout;
 1238	}
 1239
 1240	return 0;
 1241errout:
 1242	bpf_object__elf_finish(obj);
 1243	return err;
 1244}
 1245
 1246static int bpf_object__check_endianness(struct bpf_object *obj)
 
 1247{
 1248#if __BYTE_ORDER == __LITTLE_ENDIAN
 1249	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
 1250		return 0;
 1251#elif __BYTE_ORDER == __BIG_ENDIAN
 1252	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
 1253		return 0;
 1254#else
 1255# error "Unrecognized __BYTE_ORDER__"
 1256#endif
 1257	pr_warn("elf: endianness mismatch in %s.\n", obj->path);
 
 
 
 
 
 
 
 
 
 
 
 
 1258	return -LIBBPF_ERRNO__ENDIAN;
 1259}
 1260
 1261static int
 1262bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
 
 1263{
 1264	memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
 
 1265	pr_debug("license of %s is %s\n", obj->path, obj->license);
 1266	return 0;
 1267}
 1268
 1269static int
 1270bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
 
 1271{
 1272	__u32 kver;
 1273
 1274	if (size != sizeof(kver)) {
 1275		pr_warn("invalid kver section in %s\n", obj->path);
 1276		return -LIBBPF_ERRNO__FORMAT;
 1277	}
 1278	memcpy(&kver, data, sizeof(kver));
 1279	obj->kern_version = kver;
 1280	pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
 
 1281	return 0;
 1282}
 1283
 1284static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
 1285{
 1286	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
 1287	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
 1288		return true;
 1289	return false;
 1290}
 1291
 1292int bpf_object__section_size(const struct bpf_object *obj, const char *name,
 1293			     __u32 *size)
 1294{
 1295	int ret = -ENOENT;
 1296
 1297	*size = 0;
 1298	if (!name) {
 1299		return -EINVAL;
 1300	} else if (!strcmp(name, DATA_SEC)) {
 1301		if (obj->efile.data)
 1302			*size = obj->efile.data->d_size;
 1303	} else if (!strcmp(name, BSS_SEC)) {
 1304		if (obj->efile.bss)
 1305			*size = obj->efile.bss->d_size;
 1306	} else if (!strcmp(name, RODATA_SEC)) {
 1307		if (obj->efile.rodata)
 1308			*size = obj->efile.rodata->d_size;
 1309	} else if (!strcmp(name, STRUCT_OPS_SEC)) {
 1310		if (obj->efile.st_ops_data)
 1311			*size = obj->efile.st_ops_data->d_size;
 1312	} else {
 1313		Elf_Scn *scn = elf_sec_by_name(obj, name);
 1314		Elf_Data *data = elf_sec_data(obj, scn);
 1315
 1316		if (data) {
 1317			ret = 0; /* found it */
 1318			*size = data->d_size;
 1319		}
 1320	}
 1321
 1322	return *size ? 0 : ret;
 1323}
 1324
 1325int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
 1326				__u32 *off)
 1327{
 1328	Elf_Data *symbols = obj->efile.symbols;
 1329	const char *sname;
 1330	size_t si;
 1331
 1332	if (!name || !off)
 1333		return -EINVAL;
 1334
 1335	for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
 1336		GElf_Sym sym;
 1337
 1338		if (!gelf_getsym(symbols, si, &sym))
 1339			continue;
 1340		if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
 1341		    GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
 1342			continue;
 1343
 1344		sname = elf_sym_str(obj, sym.st_name);
 1345		if (!sname) {
 1346			pr_warn("failed to get sym name string for var %s\n",
 1347				name);
 1348			return -EIO;
 1349		}
 1350		if (strcmp(name, sname) == 0) {
 1351			*off = sym.st_value;
 1352			return 0;
 1353		}
 1354	}
 1355
 1356	return -ENOENT;
 1357}
 1358
 1359static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
 1360{
 1361	struct bpf_map *new_maps;
 1362	size_t new_cap;
 1363	int i;
 1364
 1365	if (obj->nr_maps < obj->maps_cap)
 1366		return &obj->maps[obj->nr_maps++];
 1367
 1368	new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
 1369	new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
 1370	if (!new_maps) {
 1371		pr_warn("alloc maps for object failed\n");
 1372		return ERR_PTR(-ENOMEM);
 1373	}
 1374
 1375	obj->maps_cap = new_cap;
 1376	obj->maps = new_maps;
 1377
 1378	/* zero out new maps */
 1379	memset(obj->maps + obj->nr_maps, 0,
 1380	       (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
 1381	/*
 1382	 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
 1383	 * when failure (zclose won't close negative fd)).
 1384	 */
 1385	for (i = obj->nr_maps; i < obj->maps_cap; i++) {
 1386		obj->maps[i].fd = -1;
 1387		obj->maps[i].inner_map_fd = -1;
 1388	}
 1389
 1390	return &obj->maps[obj->nr_maps++];
 1391}
 1392
 1393static size_t bpf_map_mmap_sz(const struct bpf_map *map)
 1394{
 1395	long page_sz = sysconf(_SC_PAGE_SIZE);
 1396	size_t map_sz;
 1397
 1398	map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
 1399	map_sz = roundup(map_sz, page_sz);
 1400	return map_sz;
 1401}
 1402
 1403static char *internal_map_name(struct bpf_object *obj,
 1404			       enum libbpf_map_type type)
 1405{
 1406	char map_name[BPF_OBJ_NAME_LEN], *p;
 1407	const char *sfx = libbpf_type_to_btf_name[type];
 1408	int sfx_len = max((size_t)7, strlen(sfx));
 1409	int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
 1410			  strlen(obj->name));
 1411
 1412	snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
 1413		 sfx_len, libbpf_type_to_btf_name[type]);
 1414
 1415	/* sanitise map name to characters allowed by kernel */
 1416	for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
 1417		if (!isalnum(*p) && *p != '_' && *p != '.')
 1418			*p = '_';
 1419
 1420	return strdup(map_name);
 1421}
 1422
 1423static int
 1424bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
 1425			      int sec_idx, void *data, size_t data_sz)
 1426{
 1427	struct bpf_map_def *def;
 1428	struct bpf_map *map;
 1429	int err;
 1430
 1431	map = bpf_object__add_map(obj);
 1432	if (IS_ERR(map))
 1433		return PTR_ERR(map);
 1434
 1435	map->libbpf_type = type;
 1436	map->sec_idx = sec_idx;
 1437	map->sec_offset = 0;
 1438	map->name = internal_map_name(obj, type);
 1439	if (!map->name) {
 1440		pr_warn("failed to alloc map name\n");
 1441		return -ENOMEM;
 1442	}
 1443
 1444	def = &map->def;
 1445	def->type = BPF_MAP_TYPE_ARRAY;
 1446	def->key_size = sizeof(int);
 1447	def->value_size = data_sz;
 1448	def->max_entries = 1;
 1449	def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
 1450			 ? BPF_F_RDONLY_PROG : 0;
 1451	def->map_flags |= BPF_F_MMAPABLE;
 1452
 1453	pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
 1454		 map->name, map->sec_idx, map->sec_offset, def->map_flags);
 1455
 1456	map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
 1457			   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
 1458	if (map->mmaped == MAP_FAILED) {
 1459		err = -errno;
 1460		map->mmaped = NULL;
 1461		pr_warn("failed to alloc map '%s' content buffer: %d\n",
 1462			map->name, err);
 1463		zfree(&map->name);
 1464		return err;
 1465	}
 1466
 1467	if (data)
 1468		memcpy(map->mmaped, data, data_sz);
 1469
 1470	pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
 1471	return 0;
 1472}
 1473
 1474static int bpf_object__init_global_data_maps(struct bpf_object *obj)
 1475{
 1476	int err;
 1477
 1478	/*
 1479	 * Populate obj->maps with libbpf internal maps.
 1480	 */
 1481	if (obj->efile.data_shndx >= 0) {
 1482		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
 1483						    obj->efile.data_shndx,
 1484						    obj->efile.data->d_buf,
 1485						    obj->efile.data->d_size);
 1486		if (err)
 1487			return err;
 1488	}
 1489	if (obj->efile.rodata_shndx >= 0) {
 1490		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
 1491						    obj->efile.rodata_shndx,
 1492						    obj->efile.rodata->d_buf,
 1493						    obj->efile.rodata->d_size);
 1494		if (err)
 1495			return err;
 1496
 1497		obj->rodata_map_idx = obj->nr_maps - 1;
 1498	}
 1499	if (obj->efile.bss_shndx >= 0) {
 1500		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
 1501						    obj->efile.bss_shndx,
 1502						    NULL,
 1503						    obj->efile.bss->d_size);
 1504		if (err)
 1505			return err;
 1506	}
 1507	return 0;
 1508}
 1509
 1510
 1511static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
 1512					       const void *name)
 1513{
 1514	int i;
 1515
 1516	for (i = 0; i < obj->nr_extern; i++) {
 1517		if (strcmp(obj->externs[i].name, name) == 0)
 1518			return &obj->externs[i];
 1519	}
 1520	return NULL;
 1521}
 1522
 1523static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
 1524			      char value)
 1525{
 1526	switch (ext->kcfg.type) {
 1527	case KCFG_BOOL:
 1528		if (value == 'm') {
 1529			pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
 1530				ext->name, value);
 1531			return -EINVAL;
 1532		}
 1533		*(bool *)ext_val = value == 'y' ? true : false;
 1534		break;
 1535	case KCFG_TRISTATE:
 1536		if (value == 'y')
 1537			*(enum libbpf_tristate *)ext_val = TRI_YES;
 1538		else if (value == 'm')
 1539			*(enum libbpf_tristate *)ext_val = TRI_MODULE;
 1540		else /* value == 'n' */
 1541			*(enum libbpf_tristate *)ext_val = TRI_NO;
 1542		break;
 1543	case KCFG_CHAR:
 1544		*(char *)ext_val = value;
 1545		break;
 1546	case KCFG_UNKNOWN:
 1547	case KCFG_INT:
 1548	case KCFG_CHAR_ARR:
 1549	default:
 1550		pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
 1551			ext->name, value);
 1552		return -EINVAL;
 1553	}
 1554	ext->is_set = true;
 1555	return 0;
 1556}
 1557
 1558static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
 1559			      const char *value)
 1560{
 1561	size_t len;
 1562
 1563	if (ext->kcfg.type != KCFG_CHAR_ARR) {
 1564		pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
 1565		return -EINVAL;
 1566	}
 1567
 1568	len = strlen(value);
 1569	if (value[len - 1] != '"') {
 1570		pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
 1571			ext->name, value);
 1572		return -EINVAL;
 1573	}
 1574
 1575	/* strip quotes */
 1576	len -= 2;
 1577	if (len >= ext->kcfg.sz) {
 1578		pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
 1579			ext->name, value, len, ext->kcfg.sz - 1);
 1580		len = ext->kcfg.sz - 1;
 1581	}
 1582	memcpy(ext_val, value + 1, len);
 1583	ext_val[len] = '\0';
 1584	ext->is_set = true;
 1585	return 0;
 1586}
 1587
 1588static int parse_u64(const char *value, __u64 *res)
 1589{
 1590	char *value_end;
 1591	int err;
 1592
 1593	errno = 0;
 1594	*res = strtoull(value, &value_end, 0);
 1595	if (errno) {
 1596		err = -errno;
 1597		pr_warn("failed to parse '%s' as integer: %d\n", value, err);
 1598		return err;
 1599	}
 1600	if (*value_end) {
 1601		pr_warn("failed to parse '%s' as integer completely\n", value);
 1602		return -EINVAL;
 1603	}
 1604	return 0;
 1605}
 1606
 1607static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
 1608{
 1609	int bit_sz = ext->kcfg.sz * 8;
 1610
 1611	if (ext->kcfg.sz == 8)
 1612		return true;
 1613
 1614	/* Validate that value stored in u64 fits in integer of `ext->sz`
 1615	 * bytes size without any loss of information. If the target integer
 1616	 * is signed, we rely on the following limits of integer type of
 1617	 * Y bits and subsequent transformation:
 1618	 *
 1619	 *     -2^(Y-1) <= X           <= 2^(Y-1) - 1
 1620	 *            0 <= X + 2^(Y-1) <= 2^Y - 1
 1621	 *            0 <= X + 2^(Y-1) <  2^Y
 1622	 *
 1623	 *  For unsigned target integer, check that all the (64 - Y) bits are
 1624	 *  zero.
 1625	 */
 1626	if (ext->kcfg.is_signed)
 1627		return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
 1628	else
 1629		return (v >> bit_sz) == 0;
 1630}
 1631
 1632static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
 1633			      __u64 value)
 1634{
 1635	if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
 1636		pr_warn("extern (kcfg) %s=%llu should be integer\n",
 1637			ext->name, (unsigned long long)value);
 1638		return -EINVAL;
 1639	}
 1640	if (!is_kcfg_value_in_range(ext, value)) {
 1641		pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
 1642			ext->name, (unsigned long long)value, ext->kcfg.sz);
 1643		return -ERANGE;
 1644	}
 1645	switch (ext->kcfg.sz) {
 1646		case 1: *(__u8 *)ext_val = value; break;
 1647		case 2: *(__u16 *)ext_val = value; break;
 1648		case 4: *(__u32 *)ext_val = value; break;
 1649		case 8: *(__u64 *)ext_val = value; break;
 1650		default:
 1651			return -EINVAL;
 1652	}
 1653	ext->is_set = true;
 1654	return 0;
 1655}
 1656
 1657static int bpf_object__process_kconfig_line(struct bpf_object *obj,
 1658					    char *buf, void *data)
 1659{
 1660	struct extern_desc *ext;
 1661	char *sep, *value;
 1662	int len, err = 0;
 1663	void *ext_val;
 1664	__u64 num;
 1665
 1666	if (strncmp(buf, "CONFIG_", 7))
 1667		return 0;
 1668
 1669	sep = strchr(buf, '=');
 1670	if (!sep) {
 1671		pr_warn("failed to parse '%s': no separator\n", buf);
 1672		return -EINVAL;
 1673	}
 1674
 1675	/* Trim ending '\n' */
 1676	len = strlen(buf);
 1677	if (buf[len - 1] == '\n')
 1678		buf[len - 1] = '\0';
 1679	/* Split on '=' and ensure that a value is present. */
 1680	*sep = '\0';
 1681	if (!sep[1]) {
 1682		*sep = '=';
 1683		pr_warn("failed to parse '%s': no value\n", buf);
 1684		return -EINVAL;
 1685	}
 1686
 1687	ext = find_extern_by_name(obj, buf);
 1688	if (!ext || ext->is_set)
 1689		return 0;
 1690
 1691	ext_val = data + ext->kcfg.data_off;
 1692	value = sep + 1;
 1693
 1694	switch (*value) {
 1695	case 'y': case 'n': case 'm':
 1696		err = set_kcfg_value_tri(ext, ext_val, *value);
 1697		break;
 1698	case '"':
 1699		err = set_kcfg_value_str(ext, ext_val, value);
 1700		break;
 1701	default:
 1702		/* assume integer */
 1703		err = parse_u64(value, &num);
 1704		if (err) {
 1705			pr_warn("extern (kcfg) %s=%s should be integer\n",
 1706				ext->name, value);
 1707			return err;
 1708		}
 1709		err = set_kcfg_value_num(ext, ext_val, num);
 1710		break;
 1711	}
 1712	if (err)
 1713		return err;
 1714	pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
 1715	return 0;
 1716}
 1717
 1718static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
 1719{
 1720	char buf[PATH_MAX];
 1721	struct utsname uts;
 1722	int len, err = 0;
 1723	gzFile file;
 1724
 1725	uname(&uts);
 1726	len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
 1727	if (len < 0)
 1728		return -EINVAL;
 1729	else if (len >= PATH_MAX)
 1730		return -ENAMETOOLONG;
 1731
 1732	/* gzopen also accepts uncompressed files. */
 1733	file = gzopen(buf, "r");
 1734	if (!file)
 1735		file = gzopen("/proc/config.gz", "r");
 1736
 1737	if (!file) {
 1738		pr_warn("failed to open system Kconfig\n");
 1739		return -ENOENT;
 1740	}
 1741
 1742	while (gzgets(file, buf, sizeof(buf))) {
 1743		err = bpf_object__process_kconfig_line(obj, buf, data);
 1744		if (err) {
 1745			pr_warn("error parsing system Kconfig line '%s': %d\n",
 1746				buf, err);
 1747			goto out;
 1748		}
 1749	}
 1750
 1751out:
 1752	gzclose(file);
 1753	return err;
 1754}
 1755
 1756static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
 1757					const char *config, void *data)
 1758{
 1759	char buf[PATH_MAX];
 1760	int err = 0;
 1761	FILE *file;
 1762
 1763	file = fmemopen((void *)config, strlen(config), "r");
 1764	if (!file) {
 1765		err = -errno;
 1766		pr_warn("failed to open in-memory Kconfig: %d\n", err);
 1767		return err;
 1768	}
 1769
 1770	while (fgets(buf, sizeof(buf), file)) {
 1771		err = bpf_object__process_kconfig_line(obj, buf, data);
 1772		if (err) {
 1773			pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
 1774				buf, err);
 1775			break;
 1776		}
 1777	}
 1778
 1779	fclose(file);
 1780	return err;
 1781}
 1782
 1783static int bpf_object__init_kconfig_map(struct bpf_object *obj)
 1784{
 1785	struct extern_desc *last_ext = NULL, *ext;
 1786	size_t map_sz;
 1787	int i, err;
 1788
 1789	for (i = 0; i < obj->nr_extern; i++) {
 1790		ext = &obj->externs[i];
 1791		if (ext->type == EXT_KCFG)
 1792			last_ext = ext;
 1793	}
 1794
 1795	if (!last_ext)
 1796		return 0;
 1797
 1798	map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
 1799	err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
 1800					    obj->efile.symbols_shndx,
 1801					    NULL, map_sz);
 1802	if (err)
 1803		return err;
 1804
 1805	obj->kconfig_map_idx = obj->nr_maps - 1;
 1806
 1807	return 0;
 1808}
 1809
 1810static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
 1811{
 
 
 
 1812	Elf_Data *symbols = obj->efile.symbols;
 1813	int i, map_def_sz = 0, nr_maps = 0, nr_syms;
 1814	Elf_Data *data = NULL;
 1815	Elf_Scn *scn;
 1816
 1817	if (obj->efile.maps_shndx < 0)
 1818		return 0;
 1819
 1820	if (!symbols)
 1821		return -EINVAL;
 1822
 1823	scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
 1824	data = elf_sec_data(obj, scn);
 
 1825	if (!scn || !data) {
 1826		pr_warn("elf: failed to get legacy map definitions for %s\n",
 1827			obj->path);
 1828		return -EINVAL;
 1829	}
 1830
 1831	/*
 1832	 * Count number of maps. Each map has a name.
 1833	 * Array of maps is not supported: only the first element is
 1834	 * considered.
 1835	 *
 1836	 * TODO: Detect array of map and report error.
 1837	 */
 1838	nr_syms = symbols->d_size / sizeof(GElf_Sym);
 1839	for (i = 0; i < nr_syms; i++) {
 1840		GElf_Sym sym;
 1841
 1842		if (!gelf_getsym(symbols, i, &sym))
 1843			continue;
 1844		if (sym.st_shndx != obj->efile.maps_shndx)
 1845			continue;
 1846		nr_maps++;
 1847	}
 
 
 
 
 
 
 
 
 1848	/* Assume equally sized map definitions */
 1849	pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
 1850		 nr_maps, data->d_size, obj->path);
 
 
 
 
 
 1851
 1852	if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
 1853		pr_warn("elf: unable to determine legacy map definition size in %s\n",
 1854			obj->path);
 1855		return -EINVAL;
 1856	}
 1857	map_def_sz = data->d_size / nr_maps;
 
 
 
 
 
 
 
 
 1858
 1859	/* Fill obj->maps using data in "maps" section.  */
 1860	for (i = 0; i < nr_syms; i++) {
 
 
 1861		GElf_Sym sym;
 1862		const char *map_name;
 1863		struct bpf_map_def *def;
 1864		struct bpf_map *map;
 1865
 1866		if (!gelf_getsym(symbols, i, &sym))
 1867			continue;
 1868		if (sym.st_shndx != obj->efile.maps_shndx)
 1869			continue;
 1870
 1871		map = bpf_object__add_map(obj);
 1872		if (IS_ERR(map))
 1873			return PTR_ERR(map);
 1874
 1875		map_name = elf_sym_str(obj, sym.st_name);
 1876		if (!map_name) {
 1877			pr_warn("failed to get map #%d name sym string for obj %s\n",
 1878				i, obj->path);
 1879			return -LIBBPF_ERRNO__FORMAT;
 1880		}
 1881
 1882		if (GELF_ST_TYPE(sym.st_info) == STT_SECTION
 1883		    || GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
 1884			pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
 1885			return -ENOTSUP;
 1886		}
 1887
 1888		map->libbpf_type = LIBBPF_MAP_UNSPEC;
 1889		map->sec_idx = sym.st_shndx;
 1890		map->sec_offset = sym.st_value;
 1891		pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
 1892			 map_name, map->sec_idx, map->sec_offset);
 1893		if (sym.st_value + map_def_sz > data->d_size) {
 1894			pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
 1895				obj->path, map_name);
 1896			return -EINVAL;
 1897		}
 1898
 1899		map->name = strdup(map_name);
 1900		if (!map->name) {
 1901			pr_warn("failed to alloc map name\n");
 1902			return -ENOMEM;
 1903		}
 1904		pr_debug("map %d is \"%s\"\n", i, map->name);
 
 1905		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
 1906		/*
 1907		 * If the definition of the map in the object file fits in
 1908		 * bpf_map_def, copy it.  Any extra fields in our version
 1909		 * of bpf_map_def will default to zero as a result of the
 1910		 * calloc above.
 1911		 */
 1912		if (map_def_sz <= sizeof(struct bpf_map_def)) {
 1913			memcpy(&map->def, def, map_def_sz);
 1914		} else {
 1915			/*
 1916			 * Here the map structure being read is bigger than what
 1917			 * we expect, truncate if the excess bits are all zero.
 1918			 * If they are not zero, reject this map as
 1919			 * incompatible.
 1920			 */
 1921			char *b;
 1922
 1923			for (b = ((char *)def) + sizeof(struct bpf_map_def);
 1924			     b < ((char *)def) + map_def_sz; b++) {
 1925				if (*b != 0) {
 1926					pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
 1927						obj->path, map_name);
 1928					if (strict)
 1929						return -EINVAL;
 
 1930				}
 1931			}
 1932			memcpy(&map->def, def, sizeof(struct bpf_map_def));
 1933		}
 1934	}
 1935	return 0;
 1936}
 1937
 1938const struct btf_type *
 1939skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
 1940{
 1941	const struct btf_type *t = btf__type_by_id(btf, id);
 1942
 1943	if (res_id)
 1944		*res_id = id;
 1945
 1946	while (btf_is_mod(t) || btf_is_typedef(t)) {
 1947		if (res_id)
 1948			*res_id = t->type;
 1949		t = btf__type_by_id(btf, t->type);
 1950	}
 1951
 1952	return t;
 1953}
 1954
 1955static const struct btf_type *
 1956resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
 1957{
 1958	const struct btf_type *t;
 1959
 1960	t = skip_mods_and_typedefs(btf, id, NULL);
 1961	if (!btf_is_ptr(t))
 1962		return NULL;
 1963
 1964	t = skip_mods_and_typedefs(btf, t->type, res_id);
 1965
 1966	return btf_is_func_proto(t) ? t : NULL;
 1967}
 1968
 1969static const char *__btf_kind_str(__u16 kind)
 1970{
 1971	switch (kind) {
 1972	case BTF_KIND_UNKN: return "void";
 1973	case BTF_KIND_INT: return "int";
 1974	case BTF_KIND_PTR: return "ptr";
 1975	case BTF_KIND_ARRAY: return "array";
 1976	case BTF_KIND_STRUCT: return "struct";
 1977	case BTF_KIND_UNION: return "union";
 1978	case BTF_KIND_ENUM: return "enum";
 1979	case BTF_KIND_FWD: return "fwd";
 1980	case BTF_KIND_TYPEDEF: return "typedef";
 1981	case BTF_KIND_VOLATILE: return "volatile";
 1982	case BTF_KIND_CONST: return "const";
 1983	case BTF_KIND_RESTRICT: return "restrict";
 1984	case BTF_KIND_FUNC: return "func";
 1985	case BTF_KIND_FUNC_PROTO: return "func_proto";
 1986	case BTF_KIND_VAR: return "var";
 1987	case BTF_KIND_DATASEC: return "datasec";
 1988	case BTF_KIND_FLOAT: return "float";
 1989	default: return "unknown";
 1990	}
 1991}
 1992
 1993const char *btf_kind_str(const struct btf_type *t)
 1994{
 1995	return __btf_kind_str(btf_kind(t));
 1996}
 1997
 1998/*
 1999 * Fetch integer attribute of BTF map definition. Such attributes are
 2000 * represented using a pointer to an array, in which dimensionality of array
 2001 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
 2002 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
 2003 * type definition, while using only sizeof(void *) space in ELF data section.
 2004 */
 2005static bool get_map_field_int(const char *map_name, const struct btf *btf,
 2006			      const struct btf_member *m, __u32 *res)
 2007{
 2008	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
 2009	const char *name = btf__name_by_offset(btf, m->name_off);
 2010	const struct btf_array *arr_info;
 2011	const struct btf_type *arr_t;
 2012
 2013	if (!btf_is_ptr(t)) {
 2014		pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
 2015			map_name, name, btf_kind_str(t));
 2016		return false;
 2017	}
 2018
 2019	arr_t = btf__type_by_id(btf, t->type);
 2020	if (!arr_t) {
 2021		pr_warn("map '%s': attr '%s': type [%u] not found.\n",
 2022			map_name, name, t->type);
 2023		return false;
 2024	}
 2025	if (!btf_is_array(arr_t)) {
 2026		pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
 2027			map_name, name, btf_kind_str(arr_t));
 2028		return false;
 2029	}
 2030	arr_info = btf_array(arr_t);
 2031	*res = arr_info->nelems;
 2032	return true;
 2033}
 2034
 2035static int build_map_pin_path(struct bpf_map *map, const char *path)
 2036{
 2037	char buf[PATH_MAX];
 2038	int len;
 2039
 2040	if (!path)
 2041		path = "/sys/fs/bpf";
 2042
 2043	len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
 2044	if (len < 0)
 2045		return -EINVAL;
 2046	else if (len >= PATH_MAX)
 2047		return -ENAMETOOLONG;
 2048
 2049	return bpf_map__set_pin_path(map, buf);
 2050}
 2051
 2052int parse_btf_map_def(const char *map_name, struct btf *btf,
 2053		      const struct btf_type *def_t, bool strict,
 2054		      struct btf_map_def *map_def, struct btf_map_def *inner_def)
 2055{
 2056	const struct btf_type *t;
 2057	const struct btf_member *m;
 2058	bool is_inner = inner_def == NULL;
 2059	int vlen, i;
 2060
 2061	vlen = btf_vlen(def_t);
 2062	m = btf_members(def_t);
 2063	for (i = 0; i < vlen; i++, m++) {
 2064		const char *name = btf__name_by_offset(btf, m->name_off);
 2065
 2066		if (!name) {
 2067			pr_warn("map '%s': invalid field #%d.\n", map_name, i);
 2068			return -EINVAL;
 2069		}
 2070		if (strcmp(name, "type") == 0) {
 2071			if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
 2072				return -EINVAL;
 2073			map_def->parts |= MAP_DEF_MAP_TYPE;
 2074		} else if (strcmp(name, "max_entries") == 0) {
 2075			if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
 2076				return -EINVAL;
 2077			map_def->parts |= MAP_DEF_MAX_ENTRIES;
 2078		} else if (strcmp(name, "map_flags") == 0) {
 2079			if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
 2080				return -EINVAL;
 2081			map_def->parts |= MAP_DEF_MAP_FLAGS;
 2082		} else if (strcmp(name, "numa_node") == 0) {
 2083			if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
 2084				return -EINVAL;
 2085			map_def->parts |= MAP_DEF_NUMA_NODE;
 2086		} else if (strcmp(name, "key_size") == 0) {
 2087			__u32 sz;
 2088
 2089			if (!get_map_field_int(map_name, btf, m, &sz))
 2090				return -EINVAL;
 2091			if (map_def->key_size && map_def->key_size != sz) {
 2092				pr_warn("map '%s': conflicting key size %u != %u.\n",
 2093					map_name, map_def->key_size, sz);
 2094				return -EINVAL;
 2095			}
 2096			map_def->key_size = sz;
 2097			map_def->parts |= MAP_DEF_KEY_SIZE;
 2098		} else if (strcmp(name, "key") == 0) {
 2099			__s64 sz;
 2100
 2101			t = btf__type_by_id(btf, m->type);
 2102			if (!t) {
 2103				pr_warn("map '%s': key type [%d] not found.\n",
 2104					map_name, m->type);
 2105				return -EINVAL;
 2106			}
 2107			if (!btf_is_ptr(t)) {
 2108				pr_warn("map '%s': key spec is not PTR: %s.\n",
 2109					map_name, btf_kind_str(t));
 2110				return -EINVAL;
 2111			}
 2112			sz = btf__resolve_size(btf, t->type);
 2113			if (sz < 0) {
 2114				pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
 2115					map_name, t->type, (ssize_t)sz);
 2116				return sz;
 2117			}
 2118			if (map_def->key_size && map_def->key_size != sz) {
 2119				pr_warn("map '%s': conflicting key size %u != %zd.\n",
 2120					map_name, map_def->key_size, (ssize_t)sz);
 2121				return -EINVAL;
 2122			}
 2123			map_def->key_size = sz;
 2124			map_def->key_type_id = t->type;
 2125			map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
 2126		} else if (strcmp(name, "value_size") == 0) {
 2127			__u32 sz;
 2128
 2129			if (!get_map_field_int(map_name, btf, m, &sz))
 2130				return -EINVAL;
 2131			if (map_def->value_size && map_def->value_size != sz) {
 2132				pr_warn("map '%s': conflicting value size %u != %u.\n",
 2133					map_name, map_def->value_size, sz);
 2134				return -EINVAL;
 2135			}
 2136			map_def->value_size = sz;
 2137			map_def->parts |= MAP_DEF_VALUE_SIZE;
 2138		} else if (strcmp(name, "value") == 0) {
 2139			__s64 sz;
 2140
 2141			t = btf__type_by_id(btf, m->type);
 2142			if (!t) {
 2143				pr_warn("map '%s': value type [%d] not found.\n",
 2144					map_name, m->type);
 2145				return -EINVAL;
 2146			}
 2147			if (!btf_is_ptr(t)) {
 2148				pr_warn("map '%s': value spec is not PTR: %s.\n",
 2149					map_name, btf_kind_str(t));
 2150				return -EINVAL;
 2151			}
 2152			sz = btf__resolve_size(btf, t->type);
 2153			if (sz < 0) {
 2154				pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
 2155					map_name, t->type, (ssize_t)sz);
 2156				return sz;
 2157			}
 2158			if (map_def->value_size && map_def->value_size != sz) {
 2159				pr_warn("map '%s': conflicting value size %u != %zd.\n",
 2160					map_name, map_def->value_size, (ssize_t)sz);
 2161				return -EINVAL;
 2162			}
 2163			map_def->value_size = sz;
 2164			map_def->value_type_id = t->type;
 2165			map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
 2166		}
 2167		else if (strcmp(name, "values") == 0) {
 2168			char inner_map_name[128];
 2169			int err;
 2170
 2171			if (is_inner) {
 2172				pr_warn("map '%s': multi-level inner maps not supported.\n",
 2173					map_name);
 2174				return -ENOTSUP;
 2175			}
 2176			if (i != vlen - 1) {
 2177				pr_warn("map '%s': '%s' member should be last.\n",
 2178					map_name, name);
 2179				return -EINVAL;
 2180			}
 2181			if (!bpf_map_type__is_map_in_map(map_def->map_type)) {
 2182				pr_warn("map '%s': should be map-in-map.\n",
 2183					map_name);
 2184				return -ENOTSUP;
 2185			}
 2186			if (map_def->value_size && map_def->value_size != 4) {
 2187				pr_warn("map '%s': conflicting value size %u != 4.\n",
 2188					map_name, map_def->value_size);
 2189				return -EINVAL;
 2190			}
 2191			map_def->value_size = 4;
 2192			t = btf__type_by_id(btf, m->type);
 2193			if (!t) {
 2194				pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
 2195					map_name, m->type);
 2196				return -EINVAL;
 2197			}
 2198			if (!btf_is_array(t) || btf_array(t)->nelems) {
 2199				pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
 2200					map_name);
 2201				return -EINVAL;
 2202			}
 2203			t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
 2204			if (!btf_is_ptr(t)) {
 2205				pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
 2206					map_name, btf_kind_str(t));
 2207				return -EINVAL;
 2208			}
 2209			t = skip_mods_and_typedefs(btf, t->type, NULL);
 2210			if (!btf_is_struct(t)) {
 2211				pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
 2212					map_name, btf_kind_str(t));
 2213				return -EINVAL;
 2214			}
 2215
 2216			snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
 2217			err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
 2218			if (err)
 2219				return err;
 2220
 2221			map_def->parts |= MAP_DEF_INNER_MAP;
 2222		} else if (strcmp(name, "pinning") == 0) {
 2223			__u32 val;
 2224
 2225			if (is_inner) {
 2226				pr_warn("map '%s': inner def can't be pinned.\n", map_name);
 2227				return -EINVAL;
 2228			}
 2229			if (!get_map_field_int(map_name, btf, m, &val))
 2230				return -EINVAL;
 2231			if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
 2232				pr_warn("map '%s': invalid pinning value %u.\n",
 2233					map_name, val);
 2234				return -EINVAL;
 2235			}
 2236			map_def->pinning = val;
 2237			map_def->parts |= MAP_DEF_PINNING;
 2238		} else {
 2239			if (strict) {
 2240				pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
 2241				return -ENOTSUP;
 2242			}
 2243			pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
 2244		}
 
 2245	}
 2246
 2247	if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
 2248		pr_warn("map '%s': map type isn't specified.\n", map_name);
 2249		return -EINVAL;
 2250	}
 2251
 2252	return 0;
 2253}
 2254
 2255static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
 2256{
 2257	map->def.type = def->map_type;
 2258	map->def.key_size = def->key_size;
 2259	map->def.value_size = def->value_size;
 2260	map->def.max_entries = def->max_entries;
 2261	map->def.map_flags = def->map_flags;
 2262
 2263	map->numa_node = def->numa_node;
 2264	map->btf_key_type_id = def->key_type_id;
 2265	map->btf_value_type_id = def->value_type_id;
 2266
 2267	if (def->parts & MAP_DEF_MAP_TYPE)
 2268		pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
 2269
 2270	if (def->parts & MAP_DEF_KEY_TYPE)
 2271		pr_debug("map '%s': found key [%u], sz = %u.\n",
 2272			 map->name, def->key_type_id, def->key_size);
 2273	else if (def->parts & MAP_DEF_KEY_SIZE)
 2274		pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
 2275
 2276	if (def->parts & MAP_DEF_VALUE_TYPE)
 2277		pr_debug("map '%s': found value [%u], sz = %u.\n",
 2278			 map->name, def->value_type_id, def->value_size);
 2279	else if (def->parts & MAP_DEF_VALUE_SIZE)
 2280		pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
 2281
 2282	if (def->parts & MAP_DEF_MAX_ENTRIES)
 2283		pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
 2284	if (def->parts & MAP_DEF_MAP_FLAGS)
 2285		pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags);
 2286	if (def->parts & MAP_DEF_PINNING)
 2287		pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
 2288	if (def->parts & MAP_DEF_NUMA_NODE)
 2289		pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
 2290
 2291	if (def->parts & MAP_DEF_INNER_MAP)
 2292		pr_debug("map '%s': found inner map definition.\n", map->name);
 2293}
 2294
 2295static const char *btf_var_linkage_str(__u32 linkage)
 2296{
 2297	switch (linkage) {
 2298	case BTF_VAR_STATIC: return "static";
 2299	case BTF_VAR_GLOBAL_ALLOCATED: return "global";
 2300	case BTF_VAR_GLOBAL_EXTERN: return "extern";
 2301	default: return "unknown";
 2302	}
 2303}
 2304
 2305static int bpf_object__init_user_btf_map(struct bpf_object *obj,
 2306					 const struct btf_type *sec,
 2307					 int var_idx, int sec_idx,
 2308					 const Elf_Data *data, bool strict,
 2309					 const char *pin_root_path)
 2310{
 2311	struct btf_map_def map_def = {}, inner_def = {};
 2312	const struct btf_type *var, *def;
 2313	const struct btf_var_secinfo *vi;
 2314	const struct btf_var *var_extra;
 2315	const char *map_name;
 2316	struct bpf_map *map;
 2317	int err;
 2318
 2319	vi = btf_var_secinfos(sec) + var_idx;
 2320	var = btf__type_by_id(obj->btf, vi->type);
 2321	var_extra = btf_var(var);
 2322	map_name = btf__name_by_offset(obj->btf, var->name_off);
 2323
 2324	if (map_name == NULL || map_name[0] == '\0') {
 2325		pr_warn("map #%d: empty name.\n", var_idx);
 2326		return -EINVAL;
 2327	}
 2328	if ((__u64)vi->offset + vi->size > data->d_size) {
 2329		pr_warn("map '%s' BTF data is corrupted.\n", map_name);
 2330		return -EINVAL;
 2331	}
 2332	if (!btf_is_var(var)) {
 2333		pr_warn("map '%s': unexpected var kind %s.\n",
 2334			map_name, btf_kind_str(var));
 2335		return -EINVAL;
 2336	}
 2337	if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
 2338		pr_warn("map '%s': unsupported map linkage %s.\n",
 2339			map_name, btf_var_linkage_str(var_extra->linkage));
 2340		return -EOPNOTSUPP;
 2341	}
 2342
 2343	def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
 2344	if (!btf_is_struct(def)) {
 2345		pr_warn("map '%s': unexpected def kind %s.\n",
 2346			map_name, btf_kind_str(var));
 2347		return -EINVAL;
 2348	}
 2349	if (def->size > vi->size) {
 2350		pr_warn("map '%s': invalid def size.\n", map_name);
 2351		return -EINVAL;
 2352	}
 2353
 2354	map = bpf_object__add_map(obj);
 2355	if (IS_ERR(map))
 2356		return PTR_ERR(map);
 2357	map->name = strdup(map_name);
 2358	if (!map->name) {
 2359		pr_warn("map '%s': failed to alloc map name.\n", map_name);
 2360		return -ENOMEM;
 2361	}
 2362	map->libbpf_type = LIBBPF_MAP_UNSPEC;
 2363	map->def.type = BPF_MAP_TYPE_UNSPEC;
 2364	map->sec_idx = sec_idx;
 2365	map->sec_offset = vi->offset;
 2366	map->btf_var_idx = var_idx;
 2367	pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
 2368		 map_name, map->sec_idx, map->sec_offset);
 2369
 2370	err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
 2371	if (err)
 2372		return err;
 2373
 2374	fill_map_from_def(map, &map_def);
 2375
 2376	if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
 2377		err = build_map_pin_path(map, pin_root_path);
 2378		if (err) {
 2379			pr_warn("map '%s': couldn't build pin path.\n", map->name);
 2380			return err;
 2381		}
 2382	}
 2383
 2384	if (map_def.parts & MAP_DEF_INNER_MAP) {
 2385		map->inner_map = calloc(1, sizeof(*map->inner_map));
 2386		if (!map->inner_map)
 2387			return -ENOMEM;
 2388		map->inner_map->fd = -1;
 2389		map->inner_map->sec_idx = sec_idx;
 2390		map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
 2391		if (!map->inner_map->name)
 2392			return -ENOMEM;
 2393		sprintf(map->inner_map->name, "%s.inner", map_name);
 2394
 2395		fill_map_from_def(map->inner_map, &inner_def);
 2396	}
 2397
 2398	return 0;
 2399}
 2400
 2401static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
 2402					  const char *pin_root_path)
 2403{
 2404	const struct btf_type *sec = NULL;
 2405	int nr_types, i, vlen, err;
 2406	const struct btf_type *t;
 2407	const char *name;
 2408	Elf_Data *data;
 2409	Elf_Scn *scn;
 2410
 2411	if (obj->efile.btf_maps_shndx < 0)
 2412		return 0;
 2413
 2414	scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
 2415	data = elf_sec_data(obj, scn);
 2416	if (!scn || !data) {
 2417		pr_warn("elf: failed to get %s map definitions for %s\n",
 2418			MAPS_ELF_SEC, obj->path);
 2419		return -EINVAL;
 2420	}
 2421
 2422	nr_types = btf__get_nr_types(obj->btf);
 2423	for (i = 1; i <= nr_types; i++) {
 2424		t = btf__type_by_id(obj->btf, i);
 2425		if (!btf_is_datasec(t))
 2426			continue;
 2427		name = btf__name_by_offset(obj->btf, t->name_off);
 2428		if (strcmp(name, MAPS_ELF_SEC) == 0) {
 2429			sec = t;
 2430			obj->efile.btf_maps_sec_btf_id = i;
 2431			break;
 2432		}
 2433	}
 2434
 2435	if (!sec) {
 2436		pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
 2437		return -ENOENT;
 2438	}
 2439
 2440	vlen = btf_vlen(sec);
 2441	for (i = 0; i < vlen; i++) {
 2442		err = bpf_object__init_user_btf_map(obj, sec, i,
 2443						    obj->efile.btf_maps_shndx,
 2444						    data, strict,
 2445						    pin_root_path);
 2446		if (err)
 2447			return err;
 2448	}
 2449
 2450	return 0;
 2451}
 2452
 2453static int bpf_object__init_maps(struct bpf_object *obj,
 2454				 const struct bpf_object_open_opts *opts)
 2455{
 2456	const char *pin_root_path;
 2457	bool strict;
 2458	int err;
 2459
 2460	strict = !OPTS_GET(opts, relaxed_maps, false);
 2461	pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
 2462
 2463	err = bpf_object__init_user_maps(obj, strict);
 2464	err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
 2465	err = err ?: bpf_object__init_global_data_maps(obj);
 2466	err = err ?: bpf_object__init_kconfig_map(obj);
 2467	err = err ?: bpf_object__init_struct_ops_maps(obj);
 2468
 2469	return err;
 2470}
 2471
 2472static bool section_have_execinstr(struct bpf_object *obj, int idx)
 2473{
 2474	GElf_Shdr sh;
 2475
 2476	if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
 2477		return false;
 2478
 2479	return sh.sh_flags & SHF_EXECINSTR;
 2480}
 2481
 2482static bool btf_needs_sanitization(struct bpf_object *obj)
 2483{
 2484	bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
 2485	bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
 2486	bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
 2487	bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
 2488
 2489	return !has_func || !has_datasec || !has_func_global || !has_float;
 2490}
 2491
 2492static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
 2493{
 2494	bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
 2495	bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
 2496	bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
 2497	bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
 2498	struct btf_type *t;
 2499	int i, j, vlen;
 2500
 2501	for (i = 1; i <= btf__get_nr_types(btf); i++) {
 2502		t = (struct btf_type *)btf__type_by_id(btf, i);
 2503
 2504		if (!has_datasec && btf_is_var(t)) {
 2505			/* replace VAR with INT */
 2506			t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
 2507			/*
 2508			 * using size = 1 is the safest choice, 4 will be too
 2509			 * big and cause kernel BTF validation failure if
 2510			 * original variable took less than 4 bytes
 2511			 */
 2512			t->size = 1;
 2513			*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
 2514		} else if (!has_datasec && btf_is_datasec(t)) {
 2515			/* replace DATASEC with STRUCT */
 2516			const struct btf_var_secinfo *v = btf_var_secinfos(t);
 2517			struct btf_member *m = btf_members(t);
 2518			struct btf_type *vt;
 2519			char *name;
 2520
 2521			name = (char *)btf__name_by_offset(btf, t->name_off);
 2522			while (*name) {
 2523				if (*name == '.')
 2524					*name = '_';
 2525				name++;
 2526			}
 2527
 2528			vlen = btf_vlen(t);
 2529			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
 2530			for (j = 0; j < vlen; j++, v++, m++) {
 2531				/* order of field assignments is important */
 2532				m->offset = v->offset * 8;
 2533				m->type = v->type;
 2534				/* preserve variable name as member name */
 2535				vt = (void *)btf__type_by_id(btf, v->type);
 2536				m->name_off = vt->name_off;
 2537			}
 2538		} else if (!has_func && btf_is_func_proto(t)) {
 2539			/* replace FUNC_PROTO with ENUM */
 2540			vlen = btf_vlen(t);
 2541			t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
 2542			t->size = sizeof(__u32); /* kernel enforced */
 2543		} else if (!has_func && btf_is_func(t)) {
 2544			/* replace FUNC with TYPEDEF */
 2545			t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
 2546		} else if (!has_func_global && btf_is_func(t)) {
 2547			/* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
 2548			t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
 2549		} else if (!has_float && btf_is_float(t)) {
 2550			/* replace FLOAT with an equally-sized empty STRUCT;
 2551			 * since C compilers do not accept e.g. "float" as a
 2552			 * valid struct name, make it anonymous
 2553			 */
 2554			t->name_off = 0;
 2555			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
 2556		}
 2557	}
 2558}
 2559
 2560static bool libbpf_needs_btf(const struct bpf_object *obj)
 2561{
 2562	return obj->efile.btf_maps_shndx >= 0 ||
 2563	       obj->efile.st_ops_shndx >= 0 ||
 2564	       obj->nr_extern > 0;
 2565}
 2566
 2567static bool kernel_needs_btf(const struct bpf_object *obj)
 2568{
 2569	return obj->efile.st_ops_shndx >= 0;
 2570}
 2571
 2572static int bpf_object__init_btf(struct bpf_object *obj,
 2573				Elf_Data *btf_data,
 2574				Elf_Data *btf_ext_data)
 2575{
 2576	int err = -ENOENT;
 2577
 2578	if (btf_data) {
 2579		obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
 2580		err = libbpf_get_error(obj->btf);
 2581		if (err) {
 2582			obj->btf = NULL;
 2583			pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
 2584			goto out;
 2585		}
 2586		/* enforce 8-byte pointers for BPF-targeted BTFs */
 2587		btf__set_pointer_size(obj->btf, 8);
 2588	}
 2589	if (btf_ext_data) {
 2590		if (!obj->btf) {
 2591			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
 2592				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
 2593			goto out;
 2594		}
 2595		obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
 2596		err = libbpf_get_error(obj->btf_ext);
 2597		if (err) {
 2598			pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
 2599				BTF_EXT_ELF_SEC, err);
 2600			obj->btf_ext = NULL;
 2601			goto out;
 2602		}
 2603	}
 2604out:
 2605	if (err && libbpf_needs_btf(obj)) {
 2606		pr_warn("BTF is required, but is missing or corrupted.\n");
 2607		return err;
 2608	}
 2609	return 0;
 2610}
 2611
 2612static int bpf_object__finalize_btf(struct bpf_object *obj)
 2613{
 2614	int err;
 2615
 2616	if (!obj->btf)
 2617		return 0;
 2618
 2619	err = btf__finalize_data(obj, obj->btf);
 2620	if (err) {
 2621		pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
 2622		return err;
 2623	}
 2624
 2625	return 0;
 2626}
 2627
 2628static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
 2629{
 2630	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
 2631	    prog->type == BPF_PROG_TYPE_LSM)
 2632		return true;
 2633
 2634	/* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
 2635	 * also need vmlinux BTF
 2636	 */
 2637	if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
 2638		return true;
 2639
 2640	return false;
 2641}
 2642
 2643static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
 2644{
 2645	struct bpf_program *prog;
 2646	int i;
 2647
 2648	/* CO-RE relocations need kernel BTF */
 2649	if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
 2650		return true;
 2651
 2652	/* Support for typed ksyms needs kernel BTF */
 2653	for (i = 0; i < obj->nr_extern; i++) {
 2654		const struct extern_desc *ext;
 2655
 2656		ext = &obj->externs[i];
 2657		if (ext->type == EXT_KSYM && ext->ksym.type_id)
 2658			return true;
 2659	}
 2660
 2661	bpf_object__for_each_program(prog, obj) {
 2662		if (!prog->load)
 2663			continue;
 2664		if (prog_needs_vmlinux_btf(prog))
 2665			return true;
 2666	}
 2667
 2668	return false;
 2669}
 2670
 2671static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
 2672{
 2673	int err;
 2674
 2675	/* btf_vmlinux could be loaded earlier */
 2676	if (obj->btf_vmlinux || obj->gen_loader)
 2677		return 0;
 2678
 2679	if (!force && !obj_needs_vmlinux_btf(obj))
 2680		return 0;
 2681
 2682	obj->btf_vmlinux = libbpf_find_kernel_btf();
 2683	err = libbpf_get_error(obj->btf_vmlinux);
 2684	if (err) {
 2685		pr_warn("Error loading vmlinux BTF: %d\n", err);
 2686		obj->btf_vmlinux = NULL;
 2687		return err;
 2688	}
 2689	return 0;
 2690}
 2691
 2692static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
 2693{
 2694	struct btf *kern_btf = obj->btf;
 2695	bool btf_mandatory, sanitize;
 2696	int i, err = 0;
 2697
 2698	if (!obj->btf)
 2699		return 0;
 2700
 2701	if (!kernel_supports(obj, FEAT_BTF)) {
 2702		if (kernel_needs_btf(obj)) {
 2703			err = -EOPNOTSUPP;
 2704			goto report;
 2705		}
 2706		pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
 2707		return 0;
 2708	}
 2709
 2710	/* Even though some subprogs are global/weak, user might prefer more
 2711	 * permissive BPF verification process that BPF verifier performs for
 2712	 * static functions, taking into account more context from the caller
 2713	 * functions. In such case, they need to mark such subprogs with
 2714	 * __attribute__((visibility("hidden"))) and libbpf will adjust
 2715	 * corresponding FUNC BTF type to be marked as static and trigger more
 2716	 * involved BPF verification process.
 2717	 */
 2718	for (i = 0; i < obj->nr_programs; i++) {
 2719		struct bpf_program *prog = &obj->programs[i];
 2720		struct btf_type *t;
 2721		const char *name;
 2722		int j, n;
 2723
 2724		if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
 2725			continue;
 2726
 2727		n = btf__get_nr_types(obj->btf);
 2728		for (j = 1; j <= n; j++) {
 2729			t = btf_type_by_id(obj->btf, j);
 2730			if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
 2731				continue;
 2732
 2733			name = btf__str_by_offset(obj->btf, t->name_off);
 2734			if (strcmp(name, prog->name) != 0)
 2735				continue;
 2736
 2737			t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
 2738			break;
 2739		}
 2740	}
 2741
 2742	sanitize = btf_needs_sanitization(obj);
 2743	if (sanitize) {
 2744		const void *raw_data;
 2745		__u32 sz;
 2746
 2747		/* clone BTF to sanitize a copy and leave the original intact */
 2748		raw_data = btf__get_raw_data(obj->btf, &sz);
 2749		kern_btf = btf__new(raw_data, sz);
 2750		err = libbpf_get_error(kern_btf);
 2751		if (err)
 2752			return err;
 2753
 2754		/* enforce 8-byte pointers for BPF-targeted BTFs */
 2755		btf__set_pointer_size(obj->btf, 8);
 2756		bpf_object__sanitize_btf(obj, kern_btf);
 2757	}
 2758
 2759	if (obj->gen_loader) {
 2760		__u32 raw_size = 0;
 2761		const void *raw_data = btf__get_raw_data(kern_btf, &raw_size);
 2762
 2763		if (!raw_data)
 2764			return -ENOMEM;
 2765		bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
 2766		/* Pretend to have valid FD to pass various fd >= 0 checks.
 2767		 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
 2768		 */
 2769		btf__set_fd(kern_btf, 0);
 2770	} else {
 2771		err = btf__load(kern_btf);
 2772	}
 2773	if (sanitize) {
 2774		if (!err) {
 2775			/* move fd to libbpf's BTF */
 2776			btf__set_fd(obj->btf, btf__fd(kern_btf));
 2777			btf__set_fd(kern_btf, -1);
 2778		}
 2779		btf__free(kern_btf);
 2780	}
 2781report:
 2782	if (err) {
 2783		btf_mandatory = kernel_needs_btf(obj);
 2784		pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
 2785			btf_mandatory ? "BTF is mandatory, can't proceed."
 2786				      : "BTF is optional, ignoring.");
 2787		if (!btf_mandatory)
 2788			err = 0;
 2789	}
 2790	return err;
 2791}
 2792
 2793static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
 2794{
 2795	const char *name;
 2796
 2797	name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
 2798	if (!name) {
 2799		pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
 2800			off, obj->path, elf_errmsg(-1));
 2801		return NULL;
 2802	}
 2803
 2804	return name;
 2805}
 2806
 2807static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
 2808{
 2809	const char *name;
 2810
 2811	name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
 2812	if (!name) {
 2813		pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
 2814			off, obj->path, elf_errmsg(-1));
 2815		return NULL;
 2816	}
 2817
 2818	return name;
 2819}
 2820
 2821static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
 2822{
 2823	Elf_Scn *scn;
 2824
 2825	scn = elf_getscn(obj->efile.elf, idx);
 2826	if (!scn) {
 2827		pr_warn("elf: failed to get section(%zu) from %s: %s\n",
 2828			idx, obj->path, elf_errmsg(-1));
 2829		return NULL;
 2830	}
 2831	return scn;
 2832}
 2833
 2834static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
 2835{
 2836	Elf_Scn *scn = NULL;
 2837	Elf *elf = obj->efile.elf;
 2838	const char *sec_name;
 2839
 2840	while ((scn = elf_nextscn(elf, scn)) != NULL) {
 2841		sec_name = elf_sec_name(obj, scn);
 2842		if (!sec_name)
 2843			return NULL;
 2844
 2845		if (strcmp(sec_name, name) != 0)
 2846			continue;
 2847
 2848		return scn;
 2849	}
 2850	return NULL;
 2851}
 2852
 2853static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
 2854{
 2855	if (!scn)
 2856		return -EINVAL;
 2857
 2858	if (gelf_getshdr(scn, hdr) != hdr) {
 2859		pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
 2860			elf_ndxscn(scn), obj->path, elf_errmsg(-1));
 2861		return -EINVAL;
 2862	}
 2863
 2864	return 0;
 2865}
 2866
 2867static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
 2868{
 2869	const char *name;
 2870	GElf_Shdr sh;
 2871
 2872	if (!scn)
 2873		return NULL;
 2874
 2875	if (elf_sec_hdr(obj, scn, &sh))
 2876		return NULL;
 2877
 2878	name = elf_sec_str(obj, sh.sh_name);
 2879	if (!name) {
 2880		pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
 2881			elf_ndxscn(scn), obj->path, elf_errmsg(-1));
 2882		return NULL;
 2883	}
 2884
 2885	return name;
 2886}
 2887
 2888static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
 2889{
 2890	Elf_Data *data;
 2891
 2892	if (!scn)
 2893		return NULL;
 2894
 2895	data = elf_getdata(scn, 0);
 2896	if (!data) {
 2897		pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
 2898			elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
 2899			obj->path, elf_errmsg(-1));
 2900		return NULL;
 2901	}
 2902
 2903	return data;
 2904}
 2905
 2906static bool is_sec_name_dwarf(const char *name)
 2907{
 2908	/* approximation, but the actual list is too long */
 2909	return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
 2910}
 2911
 2912static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
 2913{
 2914	/* no special handling of .strtab */
 2915	if (hdr->sh_type == SHT_STRTAB)
 2916		return true;
 2917
 2918	/* ignore .llvm_addrsig section as well */
 2919	if (hdr->sh_type == SHT_LLVM_ADDRSIG)
 2920		return true;
 2921
 2922	/* no subprograms will lead to an empty .text section, ignore it */
 2923	if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
 2924	    strcmp(name, ".text") == 0)
 2925		return true;
 2926
 2927	/* DWARF sections */
 2928	if (is_sec_name_dwarf(name))
 2929		return true;
 2930
 2931	if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
 2932		name += sizeof(".rel") - 1;
 2933		/* DWARF section relocations */
 2934		if (is_sec_name_dwarf(name))
 2935			return true;
 2936
 2937		/* .BTF and .BTF.ext don't need relocations */
 2938		if (strcmp(name, BTF_ELF_SEC) == 0 ||
 2939		    strcmp(name, BTF_EXT_ELF_SEC) == 0)
 2940			return true;
 2941	}
 2942
 2943	return false;
 2944}
 2945
 2946static int cmp_progs(const void *_a, const void *_b)
 2947{
 2948	const struct bpf_program *a = _a;
 2949	const struct bpf_program *b = _b;
 2950
 2951	if (a->sec_idx != b->sec_idx)
 2952		return a->sec_idx < b->sec_idx ? -1 : 1;
 2953
 2954	/* sec_insn_off can't be the same within the section */
 2955	return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
 2956}
 2957
 2958static int bpf_object__elf_collect(struct bpf_object *obj)
 2959{
 2960	Elf *elf = obj->efile.elf;
 2961	Elf_Data *btf_ext_data = NULL;
 2962	Elf_Data *btf_data = NULL;
 2963	int idx = 0, err = 0;
 2964	const char *name;
 2965	Elf_Data *data;
 2966	Elf_Scn *scn;
 2967	GElf_Shdr sh;
 2968
 2969	/* a bunch of ELF parsing functionality depends on processing symbols,
 2970	 * so do the first pass and find the symbol table
 2971	 */
 2972	scn = NULL;
 2973	while ((scn = elf_nextscn(elf, scn)) != NULL) {
 2974		if (elf_sec_hdr(obj, scn, &sh))
 2975			return -LIBBPF_ERRNO__FORMAT;
 2976
 2977		if (sh.sh_type == SHT_SYMTAB) {
 2978			if (obj->efile.symbols) {
 2979				pr_warn("elf: multiple symbol tables in %s\n", obj->path);
 2980				return -LIBBPF_ERRNO__FORMAT;
 2981			}
 2982
 2983			data = elf_sec_data(obj, scn);
 2984			if (!data)
 2985				return -LIBBPF_ERRNO__FORMAT;
 2986
 2987			obj->efile.symbols = data;
 2988			obj->efile.symbols_shndx = elf_ndxscn(scn);
 2989			obj->efile.strtabidx = sh.sh_link;
 2990		}
 2991	}
 2992
 2993	scn = NULL;
 2994	while ((scn = elf_nextscn(elf, scn)) != NULL) {
 
 
 
 
 2995		idx++;
 
 
 
 
 
 
 2996
 2997		if (elf_sec_hdr(obj, scn, &sh))
 2998			return -LIBBPF_ERRNO__FORMAT;
 
 
 
 
 
 2999
 3000		name = elf_sec_str(obj, sh.sh_name);
 3001		if (!name)
 3002			return -LIBBPF_ERRNO__FORMAT;
 3003
 3004		if (ignore_elf_section(&sh, name))
 3005			continue;
 3006
 3007		data = elf_sec_data(obj, scn);
 3008		if (!data)
 3009			return -LIBBPF_ERRNO__FORMAT;
 3010
 3011		pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
 3012			 idx, name, (unsigned long)data->d_size,
 3013			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
 3014			 (int)sh.sh_type);
 3015
 3016		if (strcmp(name, "license") == 0) {
 3017			err = bpf_object__init_license(obj, data->d_buf, data->d_size);
 3018			if (err)
 3019				return err;
 3020		} else if (strcmp(name, "version") == 0) {
 3021			err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
 3022			if (err)
 3023				return err;
 3024		} else if (strcmp(name, "maps") == 0) {
 3025			obj->efile.maps_shndx = idx;
 3026		} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
 3027			obj->efile.btf_maps_shndx = idx;
 3028		} else if (strcmp(name, BTF_ELF_SEC) == 0) {
 3029			btf_data = data;
 3030		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
 3031			btf_ext_data = data;
 3032		} else if (sh.sh_type == SHT_SYMTAB) {
 3033			/* already processed during the first pass above */
 3034		} else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
 3035			if (sh.sh_flags & SHF_EXECINSTR) {
 3036				if (strcmp(name, ".text") == 0)
 3037					obj->efile.text_shndx = idx;
 3038				err = bpf_object__add_programs(obj, data, name, idx);
 3039				if (err)
 3040					return err;
 3041			} else if (strcmp(name, DATA_SEC) == 0) {
 3042				obj->efile.data = data;
 3043				obj->efile.data_shndx = idx;
 3044			} else if (strcmp(name, RODATA_SEC) == 0) {
 3045				obj->efile.rodata = data;
 3046				obj->efile.rodata_shndx = idx;
 3047			} else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
 3048				obj->efile.st_ops_data = data;
 3049				obj->efile.st_ops_shndx = idx;
 3050			} else {
 3051				pr_info("elf: skipping unrecognized data section(%d) %s\n",
 3052					idx, name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 3053			}
 3054		} else if (sh.sh_type == SHT_REL) {
 3055			int nr_sects = obj->efile.nr_reloc_sects;
 3056			void *sects = obj->efile.reloc_sects;
 3057			int sec = sh.sh_info; /* points to other section */
 3058
 3059			/* Only do relo for section with exec instructions */
 3060			if (!section_have_execinstr(obj, sec) &&
 3061			    strcmp(name, ".rel" STRUCT_OPS_SEC) &&
 3062			    strcmp(name, ".rel" MAPS_ELF_SEC)) {
 3063				pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
 3064					idx, name, sec,
 3065					elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
 3066				continue;
 3067			}
 3068
 3069			sects = libbpf_reallocarray(sects, nr_sects + 1,
 3070						    sizeof(*obj->efile.reloc_sects));
 3071			if (!sects)
 3072				return -ENOMEM;
 3073
 3074			obj->efile.reloc_sects = sects;
 3075			obj->efile.nr_reloc_sects++;
 3076
 3077			obj->efile.reloc_sects[nr_sects].shdr = sh;
 3078			obj->efile.reloc_sects[nr_sects].data = data;
 3079		} else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
 3080			obj->efile.bss = data;
 3081			obj->efile.bss_shndx = idx;
 3082		} else {
 3083			pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
 3084				(size_t)sh.sh_size);
 3085		}
 3086	}
 3087
 3088	if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
 3089		pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
 3090		return -LIBBPF_ERRNO__FORMAT;
 3091	}
 3092
 3093	/* sort BPF programs by section name and in-section instruction offset
 3094	 * for faster search */
 3095	qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
 3096
 3097	return bpf_object__init_btf(obj, btf_data, btf_ext_data);
 3098}
 3099
 3100static bool sym_is_extern(const GElf_Sym *sym)
 3101{
 3102	int bind = GELF_ST_BIND(sym->st_info);
 3103	/* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
 3104	return sym->st_shndx == SHN_UNDEF &&
 3105	       (bind == STB_GLOBAL || bind == STB_WEAK) &&
 3106	       GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
 3107}
 3108
 3109static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx)
 3110{
 3111	int bind = GELF_ST_BIND(sym->st_info);
 3112	int type = GELF_ST_TYPE(sym->st_info);
 3113
 3114	/* in .text section */
 3115	if (sym->st_shndx != text_shndx)
 3116		return false;
 3117
 3118	/* local function */
 3119	if (bind == STB_LOCAL && type == STT_SECTION)
 3120		return true;
 3121
 3122	/* global function */
 3123	return bind == STB_GLOBAL && type == STT_FUNC;
 3124}
 3125
 3126static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
 3127{
 3128	const struct btf_type *t;
 3129	const char *tname;
 3130	int i, n;
 3131
 3132	if (!btf)
 3133		return -ESRCH;
 3134
 3135	n = btf__get_nr_types(btf);
 3136	for (i = 1; i <= n; i++) {
 3137		t = btf__type_by_id(btf, i);
 3138
 3139		if (!btf_is_var(t) && !btf_is_func(t))
 3140			continue;
 3141
 3142		tname = btf__name_by_offset(btf, t->name_off);
 3143		if (strcmp(tname, ext_name))
 3144			continue;
 3145
 3146		if (btf_is_var(t) &&
 3147		    btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
 3148			return -EINVAL;
 3149
 3150		if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
 3151			return -EINVAL;
 3152
 3153		return i;
 3154	}
 3155
 3156	return -ENOENT;
 3157}
 3158
 3159static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
 3160	const struct btf_var_secinfo *vs;
 3161	const struct btf_type *t;
 3162	int i, j, n;
 3163
 3164	if (!btf)
 3165		return -ESRCH;
 3166
 3167	n = btf__get_nr_types(btf);
 3168	for (i = 1; i <= n; i++) {
 3169		t = btf__type_by_id(btf, i);
 3170
 3171		if (!btf_is_datasec(t))
 3172			continue;
 3173
 3174		vs = btf_var_secinfos(t);
 3175		for (j = 0; j < btf_vlen(t); j++, vs++) {
 3176			if (vs->type == ext_btf_id)
 3177				return i;
 3178		}
 3179	}
 3180
 3181	return -ENOENT;
 3182}
 3183
 3184static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
 3185				     bool *is_signed)
 3186{
 3187	const struct btf_type *t;
 3188	const char *name;
 3189
 3190	t = skip_mods_and_typedefs(btf, id, NULL);
 3191	name = btf__name_by_offset(btf, t->name_off);
 3192
 3193	if (is_signed)
 3194		*is_signed = false;
 3195	switch (btf_kind(t)) {
 3196	case BTF_KIND_INT: {
 3197		int enc = btf_int_encoding(t);
 3198
 3199		if (enc & BTF_INT_BOOL)
 3200			return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
 3201		if (is_signed)
 3202			*is_signed = enc & BTF_INT_SIGNED;
 3203		if (t->size == 1)
 3204			return KCFG_CHAR;
 3205		if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
 3206			return KCFG_UNKNOWN;
 3207		return KCFG_INT;
 3208	}
 3209	case BTF_KIND_ENUM:
 3210		if (t->size != 4)
 3211			return KCFG_UNKNOWN;
 3212		if (strcmp(name, "libbpf_tristate"))
 3213			return KCFG_UNKNOWN;
 3214		return KCFG_TRISTATE;
 3215	case BTF_KIND_ARRAY:
 3216		if (btf_array(t)->nelems == 0)
 3217			return KCFG_UNKNOWN;
 3218		if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
 3219			return KCFG_UNKNOWN;
 3220		return KCFG_CHAR_ARR;
 3221	default:
 3222		return KCFG_UNKNOWN;
 3223	}
 3224}
 3225
 3226static int cmp_externs(const void *_a, const void *_b)
 3227{
 3228	const struct extern_desc *a = _a;
 3229	const struct extern_desc *b = _b;
 3230
 3231	if (a->type != b->type)
 3232		return a->type < b->type ? -1 : 1;
 3233
 3234	if (a->type == EXT_KCFG) {
 3235		/* descending order by alignment requirements */
 3236		if (a->kcfg.align != b->kcfg.align)
 3237			return a->kcfg.align > b->kcfg.align ? -1 : 1;
 3238		/* ascending order by size, within same alignment class */
 3239		if (a->kcfg.sz != b->kcfg.sz)
 3240			return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
 3241	}
 3242
 3243	/* resolve ties by name */
 3244	return strcmp(a->name, b->name);
 3245}
 3246
 3247static int find_int_btf_id(const struct btf *btf)
 3248{
 3249	const struct btf_type *t;
 3250	int i, n;
 3251
 3252	n = btf__get_nr_types(btf);
 3253	for (i = 1; i <= n; i++) {
 3254		t = btf__type_by_id(btf, i);
 3255
 3256		if (btf_is_int(t) && btf_int_bits(t) == 32)
 3257			return i;
 3258	}
 3259
 3260	return 0;
 3261}
 3262
 3263static int add_dummy_ksym_var(struct btf *btf)
 3264{
 3265	int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
 3266	const struct btf_var_secinfo *vs;
 3267	const struct btf_type *sec;
 3268
 3269	if (!btf)
 3270		return 0;
 3271
 3272	sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
 3273					    BTF_KIND_DATASEC);
 3274	if (sec_btf_id < 0)
 3275		return 0;
 3276
 3277	sec = btf__type_by_id(btf, sec_btf_id);
 3278	vs = btf_var_secinfos(sec);
 3279	for (i = 0; i < btf_vlen(sec); i++, vs++) {
 3280		const struct btf_type *vt;
 3281
 3282		vt = btf__type_by_id(btf, vs->type);
 3283		if (btf_is_func(vt))
 3284			break;
 3285	}
 3286
 3287	/* No func in ksyms sec.  No need to add dummy var. */
 3288	if (i == btf_vlen(sec))
 3289		return 0;
 3290
 3291	int_btf_id = find_int_btf_id(btf);
 3292	dummy_var_btf_id = btf__add_var(btf,
 3293					"dummy_ksym",
 3294					BTF_VAR_GLOBAL_ALLOCATED,
 3295					int_btf_id);
 3296	if (dummy_var_btf_id < 0)
 3297		pr_warn("cannot create a dummy_ksym var\n");
 3298
 3299	return dummy_var_btf_id;
 3300}
 3301
 3302static int bpf_object__collect_externs(struct bpf_object *obj)
 3303{
 3304	struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
 3305	const struct btf_type *t;
 3306	struct extern_desc *ext;
 3307	int i, n, off, dummy_var_btf_id;
 3308	const char *ext_name, *sec_name;
 3309	Elf_Scn *scn;
 3310	GElf_Shdr sh;
 3311
 3312	if (!obj->efile.symbols)
 3313		return 0;
 3314
 3315	scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
 3316	if (elf_sec_hdr(obj, scn, &sh))
 3317		return -LIBBPF_ERRNO__FORMAT;
 3318
 3319	dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
 3320	if (dummy_var_btf_id < 0)
 3321		return dummy_var_btf_id;
 3322
 3323	n = sh.sh_size / sh.sh_entsize;
 3324	pr_debug("looking for externs among %d symbols...\n", n);
 3325
 3326	for (i = 0; i < n; i++) {
 3327		GElf_Sym sym;
 3328
 3329		if (!gelf_getsym(obj->efile.symbols, i, &sym))
 3330			return -LIBBPF_ERRNO__FORMAT;
 3331		if (!sym_is_extern(&sym))
 3332			continue;
 3333		ext_name = elf_sym_str(obj, sym.st_name);
 3334		if (!ext_name || !ext_name[0])
 3335			continue;
 3336
 3337		ext = obj->externs;
 3338		ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
 3339		if (!ext)
 3340			return -ENOMEM;
 3341		obj->externs = ext;
 3342		ext = &ext[obj->nr_extern];
 3343		memset(ext, 0, sizeof(*ext));
 3344		obj->nr_extern++;
 3345
 3346		ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
 3347		if (ext->btf_id <= 0) {
 3348			pr_warn("failed to find BTF for extern '%s': %d\n",
 3349				ext_name, ext->btf_id);
 3350			return ext->btf_id;
 3351		}
 3352		t = btf__type_by_id(obj->btf, ext->btf_id);
 3353		ext->name = btf__name_by_offset(obj->btf, t->name_off);
 3354		ext->sym_idx = i;
 3355		ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
 3356
 3357		ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
 3358		if (ext->sec_btf_id <= 0) {
 3359			pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
 3360				ext_name, ext->btf_id, ext->sec_btf_id);
 3361			return ext->sec_btf_id;
 3362		}
 3363		sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
 3364		sec_name = btf__name_by_offset(obj->btf, sec->name_off);
 3365
 3366		if (strcmp(sec_name, KCONFIG_SEC) == 0) {
 3367			if (btf_is_func(t)) {
 3368				pr_warn("extern function %s is unsupported under %s section\n",
 3369					ext->name, KCONFIG_SEC);
 3370				return -ENOTSUP;
 3371			}
 3372			kcfg_sec = sec;
 3373			ext->type = EXT_KCFG;
 3374			ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
 3375			if (ext->kcfg.sz <= 0) {
 3376				pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
 3377					ext_name, ext->kcfg.sz);
 3378				return ext->kcfg.sz;
 3379			}
 3380			ext->kcfg.align = btf__align_of(obj->btf, t->type);
 3381			if (ext->kcfg.align <= 0) {
 3382				pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
 3383					ext_name, ext->kcfg.align);
 3384				return -EINVAL;
 3385			}
 3386			ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
 3387						        &ext->kcfg.is_signed);
 3388			if (ext->kcfg.type == KCFG_UNKNOWN) {
 3389				pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
 3390				return -ENOTSUP;
 3391			}
 3392		} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
 3393			if (btf_is_func(t) && ext->is_weak) {
 3394				pr_warn("extern weak function %s is unsupported\n",
 3395					ext->name);
 3396				return -ENOTSUP;
 3397			}
 3398			ksym_sec = sec;
 3399			ext->type = EXT_KSYM;
 3400			skip_mods_and_typedefs(obj->btf, t->type,
 3401					       &ext->ksym.type_id);
 3402		} else {
 3403			pr_warn("unrecognized extern section '%s'\n", sec_name);
 3404			return -ENOTSUP;
 3405		}
 
 
 3406	}
 3407	pr_debug("collected %d externs total\n", obj->nr_extern);
 3408
 3409	if (!obj->nr_extern)
 3410		return 0;
 3411
 3412	/* sort externs by type, for kcfg ones also by (align, size, name) */
 3413	qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
 3414
 3415	/* for .ksyms section, we need to turn all externs into allocated
 3416	 * variables in BTF to pass kernel verification; we do this by
 3417	 * pretending that each extern is a 8-byte variable
 3418	 */
 3419	if (ksym_sec) {
 3420		/* find existing 4-byte integer type in BTF to use for fake
 3421		 * extern variables in DATASEC
 3422		 */
 3423		int int_btf_id = find_int_btf_id(obj->btf);
 3424		/* For extern function, a dummy_var added earlier
 3425		 * will be used to replace the vs->type and
 3426		 * its name string will be used to refill
 3427		 * the missing param's name.
 3428		 */
 3429		const struct btf_type *dummy_var;
 3430
 3431		dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
 3432		for (i = 0; i < obj->nr_extern; i++) {
 3433			ext = &obj->externs[i];
 3434			if (ext->type != EXT_KSYM)
 3435				continue;
 3436			pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
 3437				 i, ext->sym_idx, ext->name);
 3438		}
 3439
 3440		sec = ksym_sec;
 3441		n = btf_vlen(sec);
 3442		for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
 3443			struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
 3444			struct btf_type *vt;
 3445
 3446			vt = (void *)btf__type_by_id(obj->btf, vs->type);
 3447			ext_name = btf__name_by_offset(obj->btf, vt->name_off);
 3448			ext = find_extern_by_name(obj, ext_name);
 3449			if (!ext) {
 3450				pr_warn("failed to find extern definition for BTF %s '%s'\n",
 3451					btf_kind_str(vt), ext_name);
 3452				return -ESRCH;
 3453			}
 3454			if (btf_is_func(vt)) {
 3455				const struct btf_type *func_proto;
 3456				struct btf_param *param;
 3457				int j;
 3458
 3459				func_proto = btf__type_by_id(obj->btf,
 3460							     vt->type);
 3461				param = btf_params(func_proto);
 3462				/* Reuse the dummy_var string if the
 3463				 * func proto does not have param name.
 3464				 */
 3465				for (j = 0; j < btf_vlen(func_proto); j++)
 3466					if (param[j].type && !param[j].name_off)
 3467						param[j].name_off =
 3468							dummy_var->name_off;
 3469				vs->type = dummy_var_btf_id;
 3470				vt->info &= ~0xffff;
 3471				vt->info |= BTF_FUNC_GLOBAL;
 3472			} else {
 3473				btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
 3474				vt->type = int_btf_id;
 3475			}
 3476			vs->offset = off;
 3477			vs->size = sizeof(int);
 3478		}
 3479		sec->size = off;
 3480	}
 3481
 3482	if (kcfg_sec) {
 3483		sec = kcfg_sec;
 3484		/* for kcfg externs calculate their offsets within a .kconfig map */
 3485		off = 0;
 3486		for (i = 0; i < obj->nr_extern; i++) {
 3487			ext = &obj->externs[i];
 3488			if (ext->type != EXT_KCFG)
 3489				continue;
 3490
 3491			ext->kcfg.data_off = roundup(off, ext->kcfg.align);
 3492			off = ext->kcfg.data_off + ext->kcfg.sz;
 3493			pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
 3494				 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
 3495		}
 3496		sec->size = off;
 3497		n = btf_vlen(sec);
 3498		for (i = 0; i < n; i++) {
 3499			struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
 3500
 3501			t = btf__type_by_id(obj->btf, vs->type);
 3502			ext_name = btf__name_by_offset(obj->btf, t->name_off);
 3503			ext = find_extern_by_name(obj, ext_name);
 3504			if (!ext) {
 3505				pr_warn("failed to find extern definition for BTF var '%s'\n",
 3506					ext_name);
 3507				return -ESRCH;
 3508			}
 3509			btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
 3510			vs->offset = ext->kcfg.data_off;
 3511		}
 3512	}
 3513	return 0;
 
 
 3514}
 3515
 3516struct bpf_program *
 3517bpf_object__find_program_by_title(const struct bpf_object *obj,
 3518				  const char *title)
 3519{
 3520	struct bpf_program *pos;
 3521
 3522	bpf_object__for_each_program(pos, obj) {
 3523		if (pos->sec_name && !strcmp(pos->sec_name, title))
 3524			return pos;
 3525	}
 3526	return errno = ENOENT, NULL;
 3527}
 3528
 3529static bool prog_is_subprog(const struct bpf_object *obj,
 3530			    const struct bpf_program *prog)
 3531{
 3532	/* For legacy reasons, libbpf supports an entry-point BPF programs
 3533	 * without SEC() attribute, i.e., those in the .text section. But if
 3534	 * there are 2 or more such programs in the .text section, they all
 3535	 * must be subprograms called from entry-point BPF programs in
 3536	 * designated SEC()'tions, otherwise there is no way to distinguish
 3537	 * which of those programs should be loaded vs which are a subprogram.
 3538	 * Similarly, if there is a function/program in .text and at least one
 3539	 * other BPF program with custom SEC() attribute, then we just assume
 3540	 * .text programs are subprograms (even if they are not called from
 3541	 * other programs), because libbpf never explicitly supported mixing
 3542	 * SEC()-designated BPF programs and .text entry-point BPF programs.
 3543	 */
 3544	return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
 3545}
 3546
 3547struct bpf_program *
 3548bpf_object__find_program_by_name(const struct bpf_object *obj,
 3549				 const char *name)
 3550{
 3551	struct bpf_program *prog;
 
 3552
 3553	bpf_object__for_each_program(prog, obj) {
 3554		if (prog_is_subprog(obj, prog))
 3555			continue;
 3556		if (!strcmp(prog->name, name))
 3557			return prog;
 3558	}
 3559	return errno = ENOENT, NULL;
 3560}
 3561
 3562static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
 3563				      int shndx)
 3564{
 3565	return shndx == obj->efile.data_shndx ||
 3566	       shndx == obj->efile.bss_shndx ||
 3567	       shndx == obj->efile.rodata_shndx;
 3568}
 3569
 3570static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
 3571				      int shndx)
 3572{
 3573	return shndx == obj->efile.maps_shndx ||
 3574	       shndx == obj->efile.btf_maps_shndx;
 3575}
 3576
 3577static enum libbpf_map_type
 3578bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
 3579{
 3580	if (shndx == obj->efile.data_shndx)
 3581		return LIBBPF_MAP_DATA;
 3582	else if (shndx == obj->efile.bss_shndx)
 3583		return LIBBPF_MAP_BSS;
 3584	else if (shndx == obj->efile.rodata_shndx)
 3585		return LIBBPF_MAP_RODATA;
 3586	else if (shndx == obj->efile.symbols_shndx)
 3587		return LIBBPF_MAP_KCONFIG;
 3588	else
 3589		return LIBBPF_MAP_UNSPEC;
 3590}
 3591
 3592static int bpf_program__record_reloc(struct bpf_program *prog,
 3593				     struct reloc_desc *reloc_desc,
 3594				     __u32 insn_idx, const char *sym_name,
 3595				     const GElf_Sym *sym, const GElf_Rel *rel)
 3596{
 3597	struct bpf_insn *insn = &prog->insns[insn_idx];
 3598	size_t map_idx, nr_maps = prog->obj->nr_maps;
 3599	struct bpf_object *obj = prog->obj;
 3600	__u32 shdr_idx = sym->st_shndx;
 3601	enum libbpf_map_type type;
 3602	const char *sym_sec_name;
 3603	struct bpf_map *map;
 3604
 3605	if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
 3606		pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
 3607			prog->name, sym_name, insn_idx, insn->code);
 3608		return -LIBBPF_ERRNO__RELOC;
 3609	}
 3610
 3611	if (sym_is_extern(sym)) {
 3612		int sym_idx = GELF_R_SYM(rel->r_info);
 3613		int i, n = obj->nr_extern;
 3614		struct extern_desc *ext;
 3615
 3616		for (i = 0; i < n; i++) {
 3617			ext = &obj->externs[i];
 3618			if (ext->sym_idx == sym_idx)
 3619				break;
 3620		}
 3621		if (i >= n) {
 3622			pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
 3623				prog->name, sym_name, sym_idx);
 3624			return -LIBBPF_ERRNO__RELOC;
 3625		}
 3626		pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
 3627			 prog->name, i, ext->name, ext->sym_idx, insn_idx);
 3628		if (insn->code == (BPF_JMP | BPF_CALL))
 3629			reloc_desc->type = RELO_EXTERN_FUNC;
 3630		else
 3631			reloc_desc->type = RELO_EXTERN_VAR;
 3632		reloc_desc->insn_idx = insn_idx;
 3633		reloc_desc->sym_off = i; /* sym_off stores extern index */
 3634		return 0;
 3635	}
 3636
 3637	/* sub-program call relocation */
 3638	if (is_call_insn(insn)) {
 3639		if (insn->src_reg != BPF_PSEUDO_CALL) {
 3640			pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
 3641			return -LIBBPF_ERRNO__RELOC;
 3642		}
 3643		/* text_shndx can be 0, if no default "main" program exists */
 3644		if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
 3645			sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
 3646			pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
 3647				prog->name, sym_name, sym_sec_name);
 3648			return -LIBBPF_ERRNO__RELOC;
 3649		}
 3650		if (sym->st_value % BPF_INSN_SZ) {
 3651			pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
 3652				prog->name, sym_name, (size_t)sym->st_value);
 3653			return -LIBBPF_ERRNO__RELOC;
 3654		}
 3655		reloc_desc->type = RELO_CALL;
 3656		reloc_desc->insn_idx = insn_idx;
 3657		reloc_desc->sym_off = sym->st_value;
 3658		return 0;
 3659	}
 3660
 3661	if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
 3662		pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
 3663			prog->name, sym_name, shdr_idx);
 3664		return -LIBBPF_ERRNO__RELOC;
 3665	}
 3666
 3667	/* loading subprog addresses */
 3668	if (sym_is_subprog(sym, obj->efile.text_shndx)) {
 3669		/* global_func: sym->st_value = offset in the section, insn->imm = 0.
 3670		 * local_func: sym->st_value = 0, insn->imm = offset in the section.
 3671		 */
 3672		if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
 3673			pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
 3674				prog->name, sym_name, (size_t)sym->st_value, insn->imm);
 3675			return -LIBBPF_ERRNO__RELOC;
 3676		}
 3677
 3678		reloc_desc->type = RELO_SUBPROG_ADDR;
 3679		reloc_desc->insn_idx = insn_idx;
 3680		reloc_desc->sym_off = sym->st_value;
 3681		return 0;
 3682	}
 3683
 3684	type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
 3685	sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
 3686
 3687	/* generic map reference relocation */
 3688	if (type == LIBBPF_MAP_UNSPEC) {
 3689		if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
 3690			pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
 3691				prog->name, sym_name, sym_sec_name);
 3692			return -LIBBPF_ERRNO__RELOC;
 3693		}
 3694		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
 3695			map = &obj->maps[map_idx];
 3696			if (map->libbpf_type != type ||
 3697			    map->sec_idx != sym->st_shndx ||
 3698			    map->sec_offset != sym->st_value)
 3699				continue;
 3700			pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
 3701				 prog->name, map_idx, map->name, map->sec_idx,
 3702				 map->sec_offset, insn_idx);
 3703			break;
 3704		}
 3705		if (map_idx >= nr_maps) {
 3706			pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
 3707				prog->name, sym_sec_name, (size_t)sym->st_value);
 3708			return -LIBBPF_ERRNO__RELOC;
 3709		}
 3710		reloc_desc->type = RELO_LD64;
 3711		reloc_desc->insn_idx = insn_idx;
 3712		reloc_desc->map_idx = map_idx;
 3713		reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
 3714		return 0;
 3715	}
 3716
 3717	/* global data map relocation */
 3718	if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
 3719		pr_warn("prog '%s': bad data relo against section '%s'\n",
 3720			prog->name, sym_sec_name);
 3721		return -LIBBPF_ERRNO__RELOC;
 3722	}
 3723	for (map_idx = 0; map_idx < nr_maps; map_idx++) {
 3724		map = &obj->maps[map_idx];
 3725		if (map->libbpf_type != type)
 3726			continue;
 3727		pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
 3728			 prog->name, map_idx, map->name, map->sec_idx,
 3729			 map->sec_offset, insn_idx);
 3730		break;
 3731	}
 3732	if (map_idx >= nr_maps) {
 3733		pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
 3734			prog->name, sym_sec_name);
 3735		return -LIBBPF_ERRNO__RELOC;
 3736	}
 3737
 3738	reloc_desc->type = RELO_DATA;
 3739	reloc_desc->insn_idx = insn_idx;
 3740	reloc_desc->map_idx = map_idx;
 3741	reloc_desc->sym_off = sym->st_value;
 3742	return 0;
 3743}
 3744
 3745static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
 3746{
 3747	return insn_idx >= prog->sec_insn_off &&
 3748	       insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
 3749}
 3750
 3751static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
 3752						 size_t sec_idx, size_t insn_idx)
 3753{
 3754	int l = 0, r = obj->nr_programs - 1, m;
 3755	struct bpf_program *prog;
 3756
 3757	while (l < r) {
 3758		m = l + (r - l + 1) / 2;
 3759		prog = &obj->programs[m];
 3760
 3761		if (prog->sec_idx < sec_idx ||
 3762		    (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
 3763			l = m;
 3764		else
 3765			r = m - 1;
 3766	}
 3767	/* matching program could be at index l, but it still might be the
 3768	 * wrong one, so we need to double check conditions for the last time
 3769	 */
 3770	prog = &obj->programs[l];
 3771	if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
 3772		return prog;
 3773	return NULL;
 3774}
 3775
 3776static int
 3777bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
 
 3778{
 3779	Elf_Data *symbols = obj->efile.symbols;
 3780	const char *relo_sec_name, *sec_name;
 3781	size_t sec_idx = shdr->sh_info;
 3782	struct bpf_program *prog;
 3783	struct reloc_desc *relos;
 3784	int err, i, nrels;
 3785	const char *sym_name;
 3786	__u32 insn_idx;
 3787	Elf_Scn *scn;
 3788	Elf_Data *scn_data;
 3789	GElf_Sym sym;
 3790	GElf_Rel rel;
 3791
 3792	scn = elf_sec_by_idx(obj, sec_idx);
 3793	scn_data = elf_sec_data(obj, scn);
 3794
 3795	relo_sec_name = elf_sec_str(obj, shdr->sh_name);
 3796	sec_name = elf_sec_name(obj, scn);
 3797	if (!relo_sec_name || !sec_name)
 3798		return -EINVAL;
 3799
 3800	pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
 3801		 relo_sec_name, sec_idx, sec_name);
 3802	nrels = shdr->sh_size / shdr->sh_entsize;
 3803
 
 
 
 
 
 
 
 3804	for (i = 0; i < nrels; i++) {
 
 
 
 
 
 
 3805		if (!gelf_getrel(data, i, &rel)) {
 3806			pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
 3807			return -LIBBPF_ERRNO__FORMAT;
 3808		}
 3809		if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
 3810			pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
 3811				relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
 
 
 
 3812			return -LIBBPF_ERRNO__FORMAT;
 3813		}
 3814
 3815		if (rel.r_offset % BPF_INSN_SZ || rel.r_offset >= scn_data->d_size) {
 3816			pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
 3817				relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
 3818			return -LIBBPF_ERRNO__FORMAT;
 
 
 
 3819		}
 3820
 3821		insn_idx = rel.r_offset / BPF_INSN_SZ;
 3822		/* relocations against static functions are recorded as
 3823		 * relocations against the section that contains a function;
 3824		 * in such case, symbol will be STT_SECTION and sym.st_name
 3825		 * will point to empty string (0), so fetch section name
 3826		 * instead
 3827		 */
 3828		if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
 3829			sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
 3830		else
 3831			sym_name = elf_sym_str(obj, sym.st_name);
 3832		sym_name = sym_name ?: "<?";
 3833
 3834		pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
 3835			 relo_sec_name, i, insn_idx, sym_name);
 3836
 3837		prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
 3838		if (!prog) {
 3839			pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
 3840				relo_sec_name, i, sec_name, insn_idx);
 
 
 
 
 3841			continue;
 3842		}
 3843
 3844		relos = libbpf_reallocarray(prog->reloc_desc,
 3845					    prog->nr_reloc + 1, sizeof(*relos));
 3846		if (!relos)
 3847			return -ENOMEM;
 3848		prog->reloc_desc = relos;
 3849
 3850		/* adjust insn_idx to local BPF program frame of reference */
 3851		insn_idx -= prog->sec_insn_off;
 3852		err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
 3853						insn_idx, sym_name, &sym, &rel);
 3854		if (err)
 3855			return err;
 3856
 3857		prog->nr_reloc++;
 3858	}
 3859	return 0;
 3860}
 3861
 3862static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
 3863{
 3864	struct bpf_map_def *def = &map->def;
 3865	__u32 key_type_id = 0, value_type_id = 0;
 3866	int ret;
 3867
 3868	/* if it's BTF-defined map, we don't need to search for type IDs.
 3869	 * For struct_ops map, it does not need btf_key_type_id and
 3870	 * btf_value_type_id.
 3871	 */
 3872	if (map->sec_idx == obj->efile.btf_maps_shndx ||
 3873	    bpf_map__is_struct_ops(map))
 3874		return 0;
 3875
 3876	if (!bpf_map__is_internal(map)) {
 3877		ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
 3878					   def->value_size, &key_type_id,
 3879					   &value_type_id);
 3880	} else {
 3881		/*
 3882		 * LLVM annotates global data differently in BTF, that is,
 3883		 * only as '.data', '.bss' or '.rodata'.
 3884		 */
 3885		ret = btf__find_by_name(obj->btf,
 3886				libbpf_type_to_btf_name[map->libbpf_type]);
 3887	}
 3888	if (ret < 0)
 3889		return ret;
 3890
 3891	map->btf_key_type_id = key_type_id;
 3892	map->btf_value_type_id = bpf_map__is_internal(map) ?
 3893				 ret : value_type_id;
 3894	return 0;
 3895}
 3896
 3897static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
 3898{
 3899	char file[PATH_MAX], buff[4096];
 3900	FILE *fp;
 3901	__u32 val;
 3902	int err;
 3903
 3904	snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
 3905	memset(info, 0, sizeof(*info));
 3906
 3907	fp = fopen(file, "r");
 3908	if (!fp) {
 3909		err = -errno;
 3910		pr_warn("failed to open %s: %d. No procfs support?\n", file,
 3911			err);
 3912		return err;
 3913	}
 3914
 3915	while (fgets(buff, sizeof(buff), fp)) {
 3916		if (sscanf(buff, "map_type:\t%u", &val) == 1)
 3917			info->type = val;
 3918		else if (sscanf(buff, "key_size:\t%u", &val) == 1)
 3919			info->key_size = val;
 3920		else if (sscanf(buff, "value_size:\t%u", &val) == 1)
 3921			info->value_size = val;
 3922		else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
 3923			info->max_entries = val;
 3924		else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
 3925			info->map_flags = val;
 3926	}
 3927
 3928	fclose(fp);
 3929
 3930	return 0;
 3931}
 3932
 3933int bpf_map__reuse_fd(struct bpf_map *map, int fd)
 3934{
 3935	struct bpf_map_info info = {};
 3936	__u32 len = sizeof(info);
 3937	int new_fd, err;
 3938	char *new_name;
 3939
 3940	err = bpf_obj_get_info_by_fd(fd, &info, &len);
 3941	if (err && errno == EINVAL)
 3942		err = bpf_get_map_info_from_fdinfo(fd, &info);
 3943	if (err)
 3944		return libbpf_err(err);
 3945
 3946	new_name = strdup(info.name);
 3947	if (!new_name)
 3948		return libbpf_err(-errno);
 3949
 3950	new_fd = open("/", O_RDONLY | O_CLOEXEC);
 3951	if (new_fd < 0) {
 3952		err = -errno;
 3953		goto err_free_new_name;
 3954	}
 3955
 3956	new_fd = dup3(fd, new_fd, O_CLOEXEC);
 3957	if (new_fd < 0) {
 3958		err = -errno;
 3959		goto err_close_new_fd;
 3960	}
 3961
 3962	err = zclose(map->fd);
 3963	if (err) {
 3964		err = -errno;
 3965		goto err_close_new_fd;
 3966	}
 3967	free(map->name);
 3968
 3969	map->fd = new_fd;
 3970	map->name = new_name;
 3971	map->def.type = info.type;
 3972	map->def.key_size = info.key_size;
 3973	map->def.value_size = info.value_size;
 3974	map->def.max_entries = info.max_entries;
 3975	map->def.map_flags = info.map_flags;
 3976	map->btf_key_type_id = info.btf_key_type_id;
 3977	map->btf_value_type_id = info.btf_value_type_id;
 3978	map->reused = true;
 3979
 3980	return 0;
 3981
 3982err_close_new_fd:
 3983	close(new_fd);
 3984err_free_new_name:
 3985	free(new_name);
 3986	return libbpf_err(err);
 3987}
 3988
 3989__u32 bpf_map__max_entries(const struct bpf_map *map)
 3990{
 3991	return map->def.max_entries;
 3992}
 3993
 3994struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
 3995{
 3996	if (!bpf_map_type__is_map_in_map(map->def.type))
 3997		return errno = EINVAL, NULL;
 3998
 3999	return map->inner_map;
 4000}
 4001
 4002int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
 4003{
 4004	if (map->fd >= 0)
 4005		return libbpf_err(-EBUSY);
 4006	map->def.max_entries = max_entries;
 4007	return 0;
 4008}
 4009
 4010int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
 4011{
 4012	if (!map || !max_entries)
 4013		return libbpf_err(-EINVAL);
 4014
 4015	return bpf_map__set_max_entries(map, max_entries);
 4016}
 4017
 4018static int
 4019bpf_object__probe_loading(struct bpf_object *obj)
 4020{
 4021	struct bpf_load_program_attr attr;
 4022	char *cp, errmsg[STRERR_BUFSIZE];
 4023	struct bpf_insn insns[] = {
 4024		BPF_MOV64_IMM(BPF_REG_0, 0),
 4025		BPF_EXIT_INSN(),
 4026	};
 4027	int ret;
 4028
 4029	if (obj->gen_loader)
 4030		return 0;
 4031
 4032	/* make sure basic loading works */
 4033
 4034	memset(&attr, 0, sizeof(attr));
 4035	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 4036	attr.insns = insns;
 4037	attr.insns_cnt = ARRAY_SIZE(insns);
 4038	attr.license = "GPL";
 4039
 4040	ret = bpf_load_program_xattr(&attr, NULL, 0);
 4041	if (ret < 0) {
 4042		attr.prog_type = BPF_PROG_TYPE_TRACEPOINT;
 4043		ret = bpf_load_program_xattr(&attr, NULL, 0);
 4044	}
 4045	if (ret < 0) {
 4046		ret = errno;
 4047		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
 4048		pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
 4049			"program. Make sure your kernel supports BPF "
 4050			"(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
 4051			"set to big enough value.\n", __func__, cp, ret);
 4052		return -ret;
 4053	}
 4054	close(ret);
 4055
 4056	return 0;
 4057}
 4058
 4059static int probe_fd(int fd)
 4060{
 4061	if (fd >= 0)
 4062		close(fd);
 4063	return fd >= 0;
 4064}
 4065
 4066static int probe_kern_prog_name(void)
 4067{
 4068	struct bpf_load_program_attr attr;
 4069	struct bpf_insn insns[] = {
 4070		BPF_MOV64_IMM(BPF_REG_0, 0),
 4071		BPF_EXIT_INSN(),
 4072	};
 4073	int ret;
 4074
 4075	/* make sure loading with name works */
 4076
 4077	memset(&attr, 0, sizeof(attr));
 4078	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 4079	attr.insns = insns;
 4080	attr.insns_cnt = ARRAY_SIZE(insns);
 4081	attr.license = "GPL";
 4082	attr.name = "test";
 4083	ret = bpf_load_program_xattr(&attr, NULL, 0);
 4084	return probe_fd(ret);
 4085}
 4086
 4087static int probe_kern_global_data(void)
 4088{
 4089	struct bpf_load_program_attr prg_attr;
 4090	struct bpf_create_map_attr map_attr;
 4091	char *cp, errmsg[STRERR_BUFSIZE];
 4092	struct bpf_insn insns[] = {
 4093		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
 4094		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
 4095		BPF_MOV64_IMM(BPF_REG_0, 0),
 4096		BPF_EXIT_INSN(),
 4097	};
 4098	int ret, map;
 4099
 4100	memset(&map_attr, 0, sizeof(map_attr));
 4101	map_attr.map_type = BPF_MAP_TYPE_ARRAY;
 4102	map_attr.key_size = sizeof(int);
 4103	map_attr.value_size = 32;
 4104	map_attr.max_entries = 1;
 4105
 4106	map = bpf_create_map_xattr(&map_attr);
 4107	if (map < 0) {
 4108		ret = -errno;
 4109		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
 4110		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
 4111			__func__, cp, -ret);
 4112		return ret;
 4113	}
 4114
 4115	insns[0].imm = map;
 4116
 4117	memset(&prg_attr, 0, sizeof(prg_attr));
 4118	prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 4119	prg_attr.insns = insns;
 4120	prg_attr.insns_cnt = ARRAY_SIZE(insns);
 4121	prg_attr.license = "GPL";
 4122
 4123	ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
 4124	close(map);
 4125	return probe_fd(ret);
 4126}
 4127
 4128static int probe_kern_btf(void)
 4129{
 4130	static const char strs[] = "\0int";
 4131	__u32 types[] = {
 4132		/* int */
 4133		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
 4134	};
 4135
 4136	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
 4137					     strs, sizeof(strs)));
 4138}
 4139
 4140static int probe_kern_btf_func(void)
 4141{
 4142	static const char strs[] = "\0int\0x\0a";
 4143	/* void x(int a) {} */
 4144	__u32 types[] = {
 4145		/* int */
 4146		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
 4147		/* FUNC_PROTO */                                /* [2] */
 4148		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
 4149		BTF_PARAM_ENC(7, 1),
 4150		/* FUNC x */                                    /* [3] */
 4151		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
 4152	};
 4153
 4154	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
 4155					     strs, sizeof(strs)));
 4156}
 4157
 4158static int probe_kern_btf_func_global(void)
 4159{
 4160	static const char strs[] = "\0int\0x\0a";
 4161	/* static void x(int a) {} */
 4162	__u32 types[] = {
 4163		/* int */
 4164		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
 4165		/* FUNC_PROTO */                                /* [2] */
 4166		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
 4167		BTF_PARAM_ENC(7, 1),
 4168		/* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
 4169		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
 4170	};
 4171
 4172	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
 4173					     strs, sizeof(strs)));
 4174}
 4175
 4176static int probe_kern_btf_datasec(void)
 4177{
 4178	static const char strs[] = "\0x\0.data";
 4179	/* static int a; */
 4180	__u32 types[] = {
 4181		/* int */
 4182		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
 4183		/* VAR x */                                     /* [2] */
 4184		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
 4185		BTF_VAR_STATIC,
 4186		/* DATASEC val */                               /* [3] */
 4187		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
 4188		BTF_VAR_SECINFO_ENC(2, 0, 4),
 4189	};
 4190
 4191	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
 4192					     strs, sizeof(strs)));
 4193}
 4194
 4195static int probe_kern_btf_float(void)
 4196{
 4197	static const char strs[] = "\0float";
 4198	__u32 types[] = {
 4199		/* float */
 4200		BTF_TYPE_FLOAT_ENC(1, 4),
 4201	};
 4202
 4203	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
 4204					     strs, sizeof(strs)));
 4205}
 4206
 4207static int probe_kern_array_mmap(void)
 4208{
 4209	struct bpf_create_map_attr attr = {
 4210		.map_type = BPF_MAP_TYPE_ARRAY,
 4211		.map_flags = BPF_F_MMAPABLE,
 4212		.key_size = sizeof(int),
 4213		.value_size = sizeof(int),
 4214		.max_entries = 1,
 4215	};
 4216
 4217	return probe_fd(bpf_create_map_xattr(&attr));
 4218}
 4219
 4220static int probe_kern_exp_attach_type(void)
 4221{
 4222	struct bpf_load_program_attr attr;
 4223	struct bpf_insn insns[] = {
 4224		BPF_MOV64_IMM(BPF_REG_0, 0),
 4225		BPF_EXIT_INSN(),
 4226	};
 4227
 4228	memset(&attr, 0, sizeof(attr));
 4229	/* use any valid combination of program type and (optional)
 4230	 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
 4231	 * to see if kernel supports expected_attach_type field for
 4232	 * BPF_PROG_LOAD command
 4233	 */
 4234	attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
 4235	attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
 4236	attr.insns = insns;
 4237	attr.insns_cnt = ARRAY_SIZE(insns);
 4238	attr.license = "GPL";
 4239
 4240	return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
 4241}
 4242
 4243static int probe_kern_probe_read_kernel(void)
 4244{
 4245	struct bpf_load_program_attr attr;
 4246	struct bpf_insn insns[] = {
 4247		BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),	/* r1 = r10 (fp) */
 4248		BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),	/* r1 += -8 */
 4249		BPF_MOV64_IMM(BPF_REG_2, 8),		/* r2 = 8 */
 4250		BPF_MOV64_IMM(BPF_REG_3, 0),		/* r3 = 0 */
 4251		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
 4252		BPF_EXIT_INSN(),
 4253	};
 4254
 4255	memset(&attr, 0, sizeof(attr));
 4256	attr.prog_type = BPF_PROG_TYPE_KPROBE;
 4257	attr.insns = insns;
 4258	attr.insns_cnt = ARRAY_SIZE(insns);
 4259	attr.license = "GPL";
 4260
 4261	return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
 4262}
 4263
 4264static int probe_prog_bind_map(void)
 4265{
 4266	struct bpf_load_program_attr prg_attr;
 4267	struct bpf_create_map_attr map_attr;
 4268	char *cp, errmsg[STRERR_BUFSIZE];
 4269	struct bpf_insn insns[] = {
 4270		BPF_MOV64_IMM(BPF_REG_0, 0),
 4271		BPF_EXIT_INSN(),
 4272	};
 4273	int ret, map, prog;
 4274
 4275	memset(&map_attr, 0, sizeof(map_attr));
 4276	map_attr.map_type = BPF_MAP_TYPE_ARRAY;
 4277	map_attr.key_size = sizeof(int);
 4278	map_attr.value_size = 32;
 4279	map_attr.max_entries = 1;
 4280
 4281	map = bpf_create_map_xattr(&map_attr);
 4282	if (map < 0) {
 4283		ret = -errno;
 4284		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
 4285		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
 4286			__func__, cp, -ret);
 4287		return ret;
 4288	}
 4289
 4290	memset(&prg_attr, 0, sizeof(prg_attr));
 4291	prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 4292	prg_attr.insns = insns;
 4293	prg_attr.insns_cnt = ARRAY_SIZE(insns);
 4294	prg_attr.license = "GPL";
 4295
 4296	prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
 4297	if (prog < 0) {
 4298		close(map);
 4299		return 0;
 4300	}
 4301
 4302	ret = bpf_prog_bind_map(prog, map, NULL);
 4303
 4304	close(map);
 4305	close(prog);
 4306
 4307	return ret >= 0;
 4308}
 4309
 4310static int probe_module_btf(void)
 4311{
 4312	static const char strs[] = "\0int";
 4313	__u32 types[] = {
 4314		/* int */
 4315		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
 4316	};
 4317	struct bpf_btf_info info;
 4318	__u32 len = sizeof(info);
 4319	char name[16];
 4320	int fd, err;
 4321
 4322	fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
 4323	if (fd < 0)
 4324		return 0; /* BTF not supported at all */
 4325
 4326	memset(&info, 0, sizeof(info));
 4327	info.name = ptr_to_u64(name);
 4328	info.name_len = sizeof(name);
 4329
 4330	/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
 4331	 * kernel's module BTF support coincides with support for
 4332	 * name/name_len fields in struct bpf_btf_info.
 4333	 */
 4334	err = bpf_obj_get_info_by_fd(fd, &info, &len);
 4335	close(fd);
 4336	return !err;
 4337}
 4338
 4339enum kern_feature_result {
 4340	FEAT_UNKNOWN = 0,
 4341	FEAT_SUPPORTED = 1,
 4342	FEAT_MISSING = 2,
 4343};
 4344
 4345typedef int (*feature_probe_fn)(void);
 4346
 4347static struct kern_feature_desc {
 4348	const char *desc;
 4349	feature_probe_fn probe;
 4350	enum kern_feature_result res;
 4351} feature_probes[__FEAT_CNT] = {
 4352	[FEAT_PROG_NAME] = {
 4353		"BPF program name", probe_kern_prog_name,
 4354	},
 4355	[FEAT_GLOBAL_DATA] = {
 4356		"global variables", probe_kern_global_data,
 4357	},
 4358	[FEAT_BTF] = {
 4359		"minimal BTF", probe_kern_btf,
 4360	},
 4361	[FEAT_BTF_FUNC] = {
 4362		"BTF functions", probe_kern_btf_func,
 4363	},
 4364	[FEAT_BTF_GLOBAL_FUNC] = {
 4365		"BTF global function", probe_kern_btf_func_global,
 4366	},
 4367	[FEAT_BTF_DATASEC] = {
 4368		"BTF data section and variable", probe_kern_btf_datasec,
 4369	},
 4370	[FEAT_ARRAY_MMAP] = {
 4371		"ARRAY map mmap()", probe_kern_array_mmap,
 4372	},
 4373	[FEAT_EXP_ATTACH_TYPE] = {
 4374		"BPF_PROG_LOAD expected_attach_type attribute",
 4375		probe_kern_exp_attach_type,
 4376	},
 4377	[FEAT_PROBE_READ_KERN] = {
 4378		"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
 4379	},
 4380	[FEAT_PROG_BIND_MAP] = {
 4381		"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
 4382	},
 4383	[FEAT_MODULE_BTF] = {
 4384		"module BTF support", probe_module_btf,
 4385	},
 4386	[FEAT_BTF_FLOAT] = {
 4387		"BTF_KIND_FLOAT support", probe_kern_btf_float,
 4388	},
 4389};
 4390
 4391static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
 4392{
 4393	struct kern_feature_desc *feat = &feature_probes[feat_id];
 4394	int ret;
 4395
 4396	if (obj->gen_loader)
 4397		/* To generate loader program assume the latest kernel
 4398		 * to avoid doing extra prog_load, map_create syscalls.
 4399		 */
 4400		return true;
 4401
 4402	if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
 4403		ret = feat->probe();
 4404		if (ret > 0) {
 4405			WRITE_ONCE(feat->res, FEAT_SUPPORTED);
 4406		} else if (ret == 0) {
 4407			WRITE_ONCE(feat->res, FEAT_MISSING);
 4408		} else {
 4409			pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
 4410			WRITE_ONCE(feat->res, FEAT_MISSING);
 4411		}
 4412	}
 4413
 4414	return READ_ONCE(feat->res) == FEAT_SUPPORTED;
 4415}
 4416
 4417static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
 4418{
 4419	struct bpf_map_info map_info = {};
 4420	char msg[STRERR_BUFSIZE];
 4421	__u32 map_info_len;
 4422	int err;
 4423
 4424	map_info_len = sizeof(map_info);
 4425
 4426	err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
 4427	if (err && errno == EINVAL)
 4428		err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
 4429	if (err) {
 4430		pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
 4431			libbpf_strerror_r(errno, msg, sizeof(msg)));
 4432		return false;
 4433	}
 4434
 4435	return (map_info.type == map->def.type &&
 4436		map_info.key_size == map->def.key_size &&
 4437		map_info.value_size == map->def.value_size &&
 4438		map_info.max_entries == map->def.max_entries &&
 4439		map_info.map_flags == map->def.map_flags);
 4440}
 4441
 4442static int
 4443bpf_object__reuse_map(struct bpf_map *map)
 4444{
 4445	char *cp, errmsg[STRERR_BUFSIZE];
 4446	int err, pin_fd;
 4447
 4448	pin_fd = bpf_obj_get(map->pin_path);
 4449	if (pin_fd < 0) {
 4450		err = -errno;
 4451		if (err == -ENOENT) {
 4452			pr_debug("found no pinned map to reuse at '%s'\n",
 4453				 map->pin_path);
 4454			return 0;
 4455		}
 4456
 4457		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
 4458		pr_warn("couldn't retrieve pinned map '%s': %s\n",
 4459			map->pin_path, cp);
 4460		return err;
 4461	}
 4462
 4463	if (!map_is_reuse_compat(map, pin_fd)) {
 4464		pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
 4465			map->pin_path);
 4466		close(pin_fd);
 4467		return -EINVAL;
 4468	}
 4469
 4470	err = bpf_map__reuse_fd(map, pin_fd);
 4471	if (err) {
 4472		close(pin_fd);
 4473		return err;
 4474	}
 4475	map->pinned = true;
 4476	pr_debug("reused pinned map at '%s'\n", map->pin_path);
 4477
 4478	return 0;
 4479}
 4480
 4481static int
 4482bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
 4483{
 4484	enum libbpf_map_type map_type = map->libbpf_type;
 4485	char *cp, errmsg[STRERR_BUFSIZE];
 4486	int err, zero = 0;
 4487
 4488	if (obj->gen_loader) {
 4489		bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
 4490					 map->mmaped, map->def.value_size);
 4491		if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
 4492			bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
 4493		return 0;
 4494	}
 4495	err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
 4496	if (err) {
 4497		err = -errno;
 4498		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
 4499		pr_warn("Error setting initial map(%s) contents: %s\n",
 4500			map->name, cp);
 4501		return err;
 4502	}
 4503
 4504	/* Freeze .rodata and .kconfig map as read-only from syscall side. */
 4505	if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
 4506		err = bpf_map_freeze(map->fd);
 4507		if (err) {
 4508			err = -errno;
 4509			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
 4510			pr_warn("Error freezing map(%s) as read-only: %s\n",
 4511				map->name, cp);
 4512			return err;
 4513		}
 4514	}
 4515	return 0;
 4516}
 4517
 4518static void bpf_map__destroy(struct bpf_map *map);
 4519
 4520static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
 4521{
 4522	struct bpf_create_map_attr create_attr;
 4523	struct bpf_map_def *def = &map->def;
 4524	int err = 0;
 4525
 4526	memset(&create_attr, 0, sizeof(create_attr));
 4527
 4528	if (kernel_supports(obj, FEAT_PROG_NAME))
 4529		create_attr.name = map->name;
 4530	create_attr.map_ifindex = map->map_ifindex;
 4531	create_attr.map_type = def->type;
 4532	create_attr.map_flags = def->map_flags;
 4533	create_attr.key_size = def->key_size;
 4534	create_attr.value_size = def->value_size;
 4535	create_attr.numa_node = map->numa_node;
 4536
 4537	if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
 4538		int nr_cpus;
 4539
 4540		nr_cpus = libbpf_num_possible_cpus();
 4541		if (nr_cpus < 0) {
 4542			pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
 4543				map->name, nr_cpus);
 4544			return nr_cpus;
 4545		}
 4546		pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
 4547		create_attr.max_entries = nr_cpus;
 4548	} else {
 4549		create_attr.max_entries = def->max_entries;
 4550	}
 4551
 4552	if (bpf_map__is_struct_ops(map))
 4553		create_attr.btf_vmlinux_value_type_id =
 4554			map->btf_vmlinux_value_type_id;
 4555
 4556	create_attr.btf_fd = 0;
 4557	create_attr.btf_key_type_id = 0;
 4558	create_attr.btf_value_type_id = 0;
 4559	if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
 4560		create_attr.btf_fd = btf__fd(obj->btf);
 4561		create_attr.btf_key_type_id = map->btf_key_type_id;
 4562		create_attr.btf_value_type_id = map->btf_value_type_id;
 4563	}
 4564
 4565	if (bpf_map_type__is_map_in_map(def->type)) {
 4566		if (map->inner_map) {
 4567			err = bpf_object__create_map(obj, map->inner_map, true);
 4568			if (err) {
 4569				pr_warn("map '%s': failed to create inner map: %d\n",
 4570					map->name, err);
 4571				return err;
 4572			}
 4573			map->inner_map_fd = bpf_map__fd(map->inner_map);
 4574		}
 4575		if (map->inner_map_fd >= 0)
 4576			create_attr.inner_map_fd = map->inner_map_fd;
 4577	}
 4578
 4579	if (obj->gen_loader) {
 4580		bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps);
 4581		/* Pretend to have valid FD to pass various fd >= 0 checks.
 4582		 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
 4583		 */
 4584		map->fd = 0;
 4585	} else {
 4586		map->fd = bpf_create_map_xattr(&create_attr);
 4587	}
 4588	if (map->fd < 0 && (create_attr.btf_key_type_id ||
 4589			    create_attr.btf_value_type_id)) {
 4590		char *cp, errmsg[STRERR_BUFSIZE];
 4591
 4592		err = -errno;
 4593		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
 4594		pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
 4595			map->name, cp, err);
 4596		create_attr.btf_fd = 0;
 4597		create_attr.btf_key_type_id = 0;
 4598		create_attr.btf_value_type_id = 0;
 4599		map->btf_key_type_id = 0;
 4600		map->btf_value_type_id = 0;
 4601		map->fd = bpf_create_map_xattr(&create_attr);
 4602	}
 4603
 4604	err = map->fd < 0 ? -errno : 0;
 4605
 4606	if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
 4607		if (obj->gen_loader)
 4608			map->inner_map->fd = -1;
 4609		bpf_map__destroy(map->inner_map);
 4610		zfree(&map->inner_map);
 4611	}
 4612
 4613	return err;
 4614}
 4615
 4616static int init_map_slots(struct bpf_object *obj, struct bpf_map *map)
 4617{
 4618	const struct bpf_map *targ_map;
 4619	unsigned int i;
 4620	int fd, err = 0;
 4621
 4622	for (i = 0; i < map->init_slots_sz; i++) {
 4623		if (!map->init_slots[i])
 4624			continue;
 4625
 4626		targ_map = map->init_slots[i];
 4627		fd = bpf_map__fd(targ_map);
 4628		if (obj->gen_loader) {
 4629			pr_warn("// TODO map_update_elem: idx %td key %d value==map_idx %td\n",
 4630				map - obj->maps, i, targ_map - obj->maps);
 4631			return -ENOTSUP;
 4632		} else {
 4633			err = bpf_map_update_elem(map->fd, &i, &fd, 0);
 4634		}
 4635		if (err) {
 4636			err = -errno;
 4637			pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
 4638				map->name, i, targ_map->name,
 4639				fd, err);
 
 4640			return err;
 4641		}
 4642		pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
 4643			 map->name, i, targ_map->name, fd);
 4644	}
 4645
 4646	zfree(&map->init_slots);
 4647	map->init_slots_sz = 0;
 4648
 4649	return 0;
 4650}
 4651
 4652static int
 4653bpf_object__create_maps(struct bpf_object *obj)
 
 4654{
 4655	struct bpf_map *map;
 4656	char *cp, errmsg[STRERR_BUFSIZE];
 4657	unsigned int i, j;
 4658	int err;
 4659	bool retried;
 4660
 4661	for (i = 0; i < obj->nr_maps; i++) {
 4662		map = &obj->maps[i];
 4663
 4664		retried = false;
 4665retry:
 4666		if (map->pin_path) {
 4667			err = bpf_object__reuse_map(map);
 4668			if (err) {
 4669				pr_warn("map '%s': error reusing pinned map\n",
 4670					map->name);
 4671				goto err_out;
 4672			}
 4673			if (retried && map->fd < 0) {
 4674				pr_warn("map '%s': cannot find pinned map\n",
 4675					map->name);
 4676				err = -ENOENT;
 4677				goto err_out;
 4678			}
 4679		}
 4680
 4681		if (map->fd >= 0) {
 4682			pr_debug("map '%s': skipping creation (preset fd=%d)\n",
 4683				 map->name, map->fd);
 4684		} else {
 4685			err = bpf_object__create_map(obj, map, false);
 4686			if (err)
 4687				goto err_out;
 4688
 4689			pr_debug("map '%s': created successfully, fd=%d\n",
 4690				 map->name, map->fd);
 4691
 4692			if (bpf_map__is_internal(map)) {
 4693				err = bpf_object__populate_internal_map(obj, map);
 4694				if (err < 0) {
 4695					zclose(map->fd);
 4696					goto err_out;
 4697				}
 4698			}
 4699
 4700			if (map->init_slots_sz) {
 4701				err = init_map_slots(obj, map);
 4702				if (err < 0) {
 4703					zclose(map->fd);
 4704					goto err_out;
 4705				}
 4706			}
 4707		}
 4708
 4709		if (map->pin_path && !map->pinned) {
 4710			err = bpf_map__pin(map, NULL);
 4711			if (err) {
 4712				zclose(map->fd);
 4713				if (!retried && err == -EEXIST) {
 4714					retried = true;
 4715					goto retry;
 4716				}
 4717				pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
 4718					map->name, map->pin_path, err);
 4719				goto err_out;
 4720			}
 4721		}
 4722	}
 4723
 4724	return 0;
 4725
 4726err_out:
 4727	cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
 4728	pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
 4729	pr_perm_msg(err);
 4730	for (j = 0; j < i; j++)
 4731		zclose(obj->maps[j].fd);
 4732	return err;
 4733}
 4734
 4735#define BPF_CORE_SPEC_MAX_LEN 64
 4736
 4737/* represents BPF CO-RE field or array element accessor */
 4738struct bpf_core_accessor {
 4739	__u32 type_id;		/* struct/union type or array element type */
 4740	__u32 idx;		/* field index or array index */
 4741	const char *name;	/* field name or NULL for array accessor */
 4742};
 4743
 4744struct bpf_core_spec {
 4745	const struct btf *btf;
 4746	/* high-level spec: named fields and array indices only */
 4747	struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
 4748	/* original unresolved (no skip_mods_or_typedefs) root type ID */
 4749	__u32 root_type_id;
 4750	/* CO-RE relocation kind */
 4751	enum bpf_core_relo_kind relo_kind;
 4752	/* high-level spec length */
 4753	int len;
 4754	/* raw, low-level spec: 1-to-1 with accessor spec string */
 4755	int raw_spec[BPF_CORE_SPEC_MAX_LEN];
 4756	/* raw spec length */
 4757	int raw_len;
 4758	/* field bit offset represented by spec */
 4759	__u32 bit_offset;
 4760};
 4761
 4762static bool str_is_empty(const char *s)
 4763{
 4764	return !s || !s[0];
 4765}
 4766
 4767static bool is_flex_arr(const struct btf *btf,
 4768			const struct bpf_core_accessor *acc,
 4769			const struct btf_array *arr)
 4770{
 4771	const struct btf_type *t;
 4772
 4773	/* not a flexible array, if not inside a struct or has non-zero size */
 4774	if (!acc->name || arr->nelems > 0)
 4775		return false;
 4776
 4777	/* has to be the last member of enclosing struct */
 4778	t = btf__type_by_id(btf, acc->type_id);
 4779	return acc->idx == btf_vlen(t) - 1;
 4780}
 4781
 4782static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
 4783{
 4784	switch (kind) {
 4785	case BPF_FIELD_BYTE_OFFSET: return "byte_off";
 4786	case BPF_FIELD_BYTE_SIZE: return "byte_sz";
 4787	case BPF_FIELD_EXISTS: return "field_exists";
 4788	case BPF_FIELD_SIGNED: return "signed";
 4789	case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
 4790	case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
 4791	case BPF_TYPE_ID_LOCAL: return "local_type_id";
 4792	case BPF_TYPE_ID_TARGET: return "target_type_id";
 4793	case BPF_TYPE_EXISTS: return "type_exists";
 4794	case BPF_TYPE_SIZE: return "type_size";
 4795	case BPF_ENUMVAL_EXISTS: return "enumval_exists";
 4796	case BPF_ENUMVAL_VALUE: return "enumval_value";
 4797	default: return "unknown";
 4798	}
 4799}
 4800
 4801static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
 4802{
 4803	switch (kind) {
 4804	case BPF_FIELD_BYTE_OFFSET:
 4805	case BPF_FIELD_BYTE_SIZE:
 4806	case BPF_FIELD_EXISTS:
 4807	case BPF_FIELD_SIGNED:
 4808	case BPF_FIELD_LSHIFT_U64:
 4809	case BPF_FIELD_RSHIFT_U64:
 4810		return true;
 4811	default:
 4812		return false;
 4813	}
 4814}
 4815
 4816static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
 4817{
 4818	switch (kind) {
 4819	case BPF_TYPE_ID_LOCAL:
 4820	case BPF_TYPE_ID_TARGET:
 4821	case BPF_TYPE_EXISTS:
 4822	case BPF_TYPE_SIZE:
 4823		return true;
 4824	default:
 4825		return false;
 4826	}
 4827}
 4828
 4829static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
 4830{
 4831	switch (kind) {
 4832	case BPF_ENUMVAL_EXISTS:
 4833	case BPF_ENUMVAL_VALUE:
 4834		return true;
 4835	default:
 4836		return false;
 4837	}
 4838}
 4839
 4840/*
 4841 * Turn bpf_core_relo into a low- and high-level spec representation,
 4842 * validating correctness along the way, as well as calculating resulting
 4843 * field bit offset, specified by accessor string. Low-level spec captures
 4844 * every single level of nestedness, including traversing anonymous
 4845 * struct/union members. High-level one only captures semantically meaningful
 4846 * "turning points": named fields and array indicies.
 4847 * E.g., for this case:
 4848 *
 4849 *   struct sample {
 4850 *       int __unimportant;
 4851 *       struct {
 4852 *           int __1;
 4853 *           int __2;
 4854 *           int a[7];
 4855 *       };
 4856 *   };
 4857 *
 4858 *   struct sample *s = ...;
 4859 *
 4860 *   int x = &s->a[3]; // access string = '0:1:2:3'
 4861 *
 4862 * Low-level spec has 1:1 mapping with each element of access string (it's
 4863 * just a parsed access string representation): [0, 1, 2, 3].
 4864 *
 4865 * High-level spec will capture only 3 points:
 4866 *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
 4867 *   - field 'a' access (corresponds to '2' in low-level spec);
 4868 *   - array element #3 access (corresponds to '3' in low-level spec).
 4869 *
 4870 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
 4871 * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
 4872 * spec and raw_spec are kept empty.
 4873 *
 4874 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
 4875 * string to specify enumerator's value index that need to be relocated.
 4876 */
 4877static int bpf_core_parse_spec(const struct btf *btf,
 4878			       __u32 type_id,
 4879			       const char *spec_str,
 4880			       enum bpf_core_relo_kind relo_kind,
 4881			       struct bpf_core_spec *spec)
 4882{
 4883	int access_idx, parsed_len, i;
 4884	struct bpf_core_accessor *acc;
 4885	const struct btf_type *t;
 4886	const char *name;
 4887	__u32 id;
 4888	__s64 sz;
 4889
 4890	if (str_is_empty(spec_str) || *spec_str == ':')
 4891		return -EINVAL;
 4892
 4893	memset(spec, 0, sizeof(*spec));
 4894	spec->btf = btf;
 4895	spec->root_type_id = type_id;
 4896	spec->relo_kind = relo_kind;
 4897
 4898	/* type-based relocations don't have a field access string */
 4899	if (core_relo_is_type_based(relo_kind)) {
 4900		if (strcmp(spec_str, "0"))
 4901			return -EINVAL;
 4902		return 0;
 4903	}
 4904
 4905	/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
 4906	while (*spec_str) {
 4907		if (*spec_str == ':')
 4908			++spec_str;
 4909		if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
 4910			return -EINVAL;
 4911		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
 4912			return -E2BIG;
 4913		spec_str += parsed_len;
 4914		spec->raw_spec[spec->raw_len++] = access_idx;
 4915	}
 4916
 4917	if (spec->raw_len == 0)
 4918		return -EINVAL;
 4919
 4920	t = skip_mods_and_typedefs(btf, type_id, &id);
 4921	if (!t)
 4922		return -EINVAL;
 4923
 4924	access_idx = spec->raw_spec[0];
 4925	acc = &spec->spec[0];
 4926	acc->type_id = id;
 4927	acc->idx = access_idx;
 4928	spec->len++;
 4929
 4930	if (core_relo_is_enumval_based(relo_kind)) {
 4931		if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
 4932			return -EINVAL;
 4933
 4934		/* record enumerator name in a first accessor */
 4935		acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
 4936		return 0;
 4937	}
 4938
 4939	if (!core_relo_is_field_based(relo_kind))
 4940		return -EINVAL;
 4941
 4942	sz = btf__resolve_size(btf, id);
 4943	if (sz < 0)
 4944		return sz;
 4945	spec->bit_offset = access_idx * sz * 8;
 4946
 4947	for (i = 1; i < spec->raw_len; i++) {
 4948		t = skip_mods_and_typedefs(btf, id, &id);
 4949		if (!t)
 4950			return -EINVAL;
 4951
 4952		access_idx = spec->raw_spec[i];
 4953		acc = &spec->spec[spec->len];
 4954
 4955		if (btf_is_composite(t)) {
 4956			const struct btf_member *m;
 4957			__u32 bit_offset;
 4958
 4959			if (access_idx >= btf_vlen(t))
 4960				return -EINVAL;
 4961
 4962			bit_offset = btf_member_bit_offset(t, access_idx);
 4963			spec->bit_offset += bit_offset;
 4964
 4965			m = btf_members(t) + access_idx;
 4966			if (m->name_off) {
 4967				name = btf__name_by_offset(btf, m->name_off);
 4968				if (str_is_empty(name))
 4969					return -EINVAL;
 4970
 4971				acc->type_id = id;
 4972				acc->idx = access_idx;
 4973				acc->name = name;
 4974				spec->len++;
 4975			}
 4976
 4977			id = m->type;
 4978		} else if (btf_is_array(t)) {
 4979			const struct btf_array *a = btf_array(t);
 4980			bool flex;
 4981
 4982			t = skip_mods_and_typedefs(btf, a->type, &id);
 4983			if (!t)
 4984				return -EINVAL;
 4985
 4986			flex = is_flex_arr(btf, acc - 1, a);
 4987			if (!flex && access_idx >= a->nelems)
 4988				return -EINVAL;
 4989
 4990			spec->spec[spec->len].type_id = id;
 4991			spec->spec[spec->len].idx = access_idx;
 4992			spec->len++;
 4993
 4994			sz = btf__resolve_size(btf, id);
 4995			if (sz < 0)
 4996				return sz;
 4997			spec->bit_offset += access_idx * sz * 8;
 4998		} else {
 4999			pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
 5000				type_id, spec_str, i, id, btf_kind_str(t));
 5001			return -EINVAL;
 5002		}
 5003	}
 5004
 5005	return 0;
 5006}
 5007
 5008static bool bpf_core_is_flavor_sep(const char *s)
 5009{
 5010	/* check X___Y name pattern, where X and Y are not underscores */
 5011	return s[0] != '_' &&				      /* X */
 5012	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
 5013	       s[4] != '_';				      /* Y */
 5014}
 5015
 5016/* Given 'some_struct_name___with_flavor' return the length of a name prefix
 5017 * before last triple underscore. Struct name part after last triple
 5018 * underscore is ignored by BPF CO-RE relocation during relocation matching.
 5019 */
 5020static size_t bpf_core_essential_name_len(const char *name)
 5021{
 5022	size_t n = strlen(name);
 5023	int i;
 5024
 5025	for (i = n - 5; i >= 0; i--) {
 5026		if (bpf_core_is_flavor_sep(name + i))
 5027			return i + 1;
 5028	}
 5029	return n;
 5030}
 5031
 5032struct core_cand
 5033{
 5034	const struct btf *btf;
 5035	const struct btf_type *t;
 5036	const char *name;
 5037	__u32 id;
 5038};
 5039
 5040/* dynamically sized list of type IDs and its associated struct btf */
 5041struct core_cand_list {
 5042	struct core_cand *cands;
 5043	int len;
 5044};
 5045
 5046static void bpf_core_free_cands(struct core_cand_list *cands)
 5047{
 5048	free(cands->cands);
 5049	free(cands);
 5050}
 5051
 5052static int bpf_core_add_cands(struct core_cand *local_cand,
 5053			      size_t local_essent_len,
 5054			      const struct btf *targ_btf,
 5055			      const char *targ_btf_name,
 5056			      int targ_start_id,
 5057			      struct core_cand_list *cands)
 5058{
 5059	struct core_cand *new_cands, *cand;
 5060	const struct btf_type *t;
 5061	const char *targ_name;
 5062	size_t targ_essent_len;
 5063	int n, i;
 5064
 5065	n = btf__get_nr_types(targ_btf);
 5066	for (i = targ_start_id; i <= n; i++) {
 5067		t = btf__type_by_id(targ_btf, i);
 5068		if (btf_kind(t) != btf_kind(local_cand->t))
 5069			continue;
 5070
 5071		targ_name = btf__name_by_offset(targ_btf, t->name_off);
 5072		if (str_is_empty(targ_name))
 5073			continue;
 5074
 5075		targ_essent_len = bpf_core_essential_name_len(targ_name);
 5076		if (targ_essent_len != local_essent_len)
 5077			continue;
 5078
 5079		if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
 5080			continue;
 5081
 5082		pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
 5083			 local_cand->id, btf_kind_str(local_cand->t),
 5084			 local_cand->name, i, btf_kind_str(t), targ_name,
 5085			 targ_btf_name);
 5086		new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
 5087					      sizeof(*cands->cands));
 5088		if (!new_cands)
 5089			return -ENOMEM;
 5090
 5091		cand = &new_cands[cands->len];
 5092		cand->btf = targ_btf;
 5093		cand->t = t;
 5094		cand->name = targ_name;
 5095		cand->id = i;
 5096
 5097		cands->cands = new_cands;
 5098		cands->len++;
 5099	}
 5100	return 0;
 5101}
 5102
 5103static int load_module_btfs(struct bpf_object *obj)
 5104{
 5105	struct bpf_btf_info info;
 5106	struct module_btf *mod_btf;
 5107	struct btf *btf;
 5108	char name[64];
 5109	__u32 id = 0, len;
 5110	int err, fd;
 5111
 5112	if (obj->btf_modules_loaded)
 5113		return 0;
 5114
 5115	if (obj->gen_loader)
 5116		return 0;
 5117
 5118	/* don't do this again, even if we find no module BTFs */
 5119	obj->btf_modules_loaded = true;
 5120
 5121	/* kernel too old to support module BTFs */
 5122	if (!kernel_supports(obj, FEAT_MODULE_BTF))
 5123		return 0;
 5124
 5125	while (true) {
 5126		err = bpf_btf_get_next_id(id, &id);
 5127		if (err && errno == ENOENT)
 5128			return 0;
 5129		if (err) {
 5130			err = -errno;
 5131			pr_warn("failed to iterate BTF objects: %d\n", err);
 5132			return err;
 5133		}
 5134
 5135		fd = bpf_btf_get_fd_by_id(id);
 5136		if (fd < 0) {
 5137			if (errno == ENOENT)
 5138				continue; /* expected race: BTF was unloaded */
 5139			err = -errno;
 5140			pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
 5141			return err;
 5142		}
 5143
 5144		len = sizeof(info);
 5145		memset(&info, 0, sizeof(info));
 5146		info.name = ptr_to_u64(name);
 5147		info.name_len = sizeof(name);
 5148
 5149		err = bpf_obj_get_info_by_fd(fd, &info, &len);
 5150		if (err) {
 5151			err = -errno;
 5152			pr_warn("failed to get BTF object #%d info: %d\n", id, err);
 5153			goto err_out;
 5154		}
 5155
 5156		/* ignore non-module BTFs */
 5157		if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
 5158			close(fd);
 5159			continue;
 5160		}
 5161
 5162		btf = btf_get_from_fd(fd, obj->btf_vmlinux);
 5163		err = libbpf_get_error(btf);
 5164		if (err) {
 5165			pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
 5166				name, id, err);
 5167			goto err_out;
 5168		}
 5169
 5170		err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
 5171				        sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
 5172		if (err)
 5173			goto err_out;
 5174
 5175		mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
 5176
 5177		mod_btf->btf = btf;
 5178		mod_btf->id = id;
 5179		mod_btf->fd = fd;
 5180		mod_btf->name = strdup(name);
 5181		if (!mod_btf->name) {
 5182			err = -ENOMEM;
 5183			goto err_out;
 5184		}
 5185		continue;
 5186
 5187err_out:
 5188		close(fd);
 5189		return err;
 5190	}
 5191
 5192	return 0;
 5193}
 5194
 5195static struct core_cand_list *
 5196bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
 5197{
 5198	struct core_cand local_cand = {};
 5199	struct core_cand_list *cands;
 5200	const struct btf *main_btf;
 5201	size_t local_essent_len;
 5202	int err, i;
 5203
 5204	local_cand.btf = local_btf;
 5205	local_cand.t = btf__type_by_id(local_btf, local_type_id);
 5206	if (!local_cand.t)
 5207		return ERR_PTR(-EINVAL);
 5208
 5209	local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
 5210	if (str_is_empty(local_cand.name))
 5211		return ERR_PTR(-EINVAL);
 5212	local_essent_len = bpf_core_essential_name_len(local_cand.name);
 5213
 5214	cands = calloc(1, sizeof(*cands));
 5215	if (!cands)
 5216		return ERR_PTR(-ENOMEM);
 5217
 5218	/* Attempt to find target candidates in vmlinux BTF first */
 5219	main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
 5220	err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
 5221	if (err)
 5222		goto err_out;
 5223
 5224	/* if vmlinux BTF has any candidate, don't got for module BTFs */
 5225	if (cands->len)
 5226		return cands;
 5227
 5228	/* if vmlinux BTF was overridden, don't attempt to load module BTFs */
 5229	if (obj->btf_vmlinux_override)
 5230		return cands;
 5231
 5232	/* now look through module BTFs, trying to still find candidates */
 5233	err = load_module_btfs(obj);
 5234	if (err)
 5235		goto err_out;
 5236
 5237	for (i = 0; i < obj->btf_module_cnt; i++) {
 5238		err = bpf_core_add_cands(&local_cand, local_essent_len,
 5239					 obj->btf_modules[i].btf,
 5240					 obj->btf_modules[i].name,
 5241					 btf__get_nr_types(obj->btf_vmlinux) + 1,
 5242					 cands);
 5243		if (err)
 5244			goto err_out;
 5245	}
 5246
 5247	return cands;
 5248err_out:
 5249	bpf_core_free_cands(cands);
 5250	return ERR_PTR(err);
 5251}
 5252
 5253/* Check two types for compatibility for the purpose of field access
 5254 * relocation. const/volatile/restrict and typedefs are skipped to ensure we
 5255 * are relocating semantically compatible entities:
 5256 *   - any two STRUCTs/UNIONs are compatible and can be mixed;
 5257 *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
 5258 *   - any two PTRs are always compatible;
 5259 *   - for ENUMs, names should be the same (ignoring flavor suffix) or at
 5260 *     least one of enums should be anonymous;
 5261 *   - for ENUMs, check sizes, names are ignored;
 5262 *   - for INT, size and signedness are ignored;
 5263 *   - any two FLOATs are always compatible;
 5264 *   - for ARRAY, dimensionality is ignored, element types are checked for
 5265 *     compatibility recursively;
 5266 *   - everything else shouldn't be ever a target of relocation.
 5267 * These rules are not set in stone and probably will be adjusted as we get
 5268 * more experience with using BPF CO-RE relocations.
 5269 */
 5270static int bpf_core_fields_are_compat(const struct btf *local_btf,
 5271				      __u32 local_id,
 5272				      const struct btf *targ_btf,
 5273				      __u32 targ_id)
 5274{
 5275	const struct btf_type *local_type, *targ_type;
 5276
 5277recur:
 5278	local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
 5279	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
 5280	if (!local_type || !targ_type)
 5281		return -EINVAL;
 5282
 5283	if (btf_is_composite(local_type) && btf_is_composite(targ_type))
 5284		return 1;
 5285	if (btf_kind(local_type) != btf_kind(targ_type))
 5286		return 0;
 5287
 5288	switch (btf_kind(local_type)) {
 5289	case BTF_KIND_PTR:
 5290	case BTF_KIND_FLOAT:
 5291		return 1;
 5292	case BTF_KIND_FWD:
 5293	case BTF_KIND_ENUM: {
 5294		const char *local_name, *targ_name;
 5295		size_t local_len, targ_len;
 5296
 5297		local_name = btf__name_by_offset(local_btf,
 5298						 local_type->name_off);
 5299		targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
 5300		local_len = bpf_core_essential_name_len(local_name);
 5301		targ_len = bpf_core_essential_name_len(targ_name);
 5302		/* one of them is anonymous or both w/ same flavor-less names */
 5303		return local_len == 0 || targ_len == 0 ||
 5304		       (local_len == targ_len &&
 5305			strncmp(local_name, targ_name, local_len) == 0);
 5306	}
 5307	case BTF_KIND_INT:
 5308		/* just reject deprecated bitfield-like integers; all other
 5309		 * integers are by default compatible between each other
 5310		 */
 5311		return btf_int_offset(local_type) == 0 &&
 5312		       btf_int_offset(targ_type) == 0;
 5313	case BTF_KIND_ARRAY:
 5314		local_id = btf_array(local_type)->type;
 5315		targ_id = btf_array(targ_type)->type;
 5316		goto recur;
 5317	default:
 5318		pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
 5319			btf_kind(local_type), local_id, targ_id);
 5320		return 0;
 5321	}
 5322}
 5323
 5324/*
 5325 * Given single high-level named field accessor in local type, find
 5326 * corresponding high-level accessor for a target type. Along the way,
 5327 * maintain low-level spec for target as well. Also keep updating target
 5328 * bit offset.
 5329 *
 5330 * Searching is performed through recursive exhaustive enumeration of all
 5331 * fields of a struct/union. If there are any anonymous (embedded)
 5332 * structs/unions, they are recursively searched as well. If field with
 5333 * desired name is found, check compatibility between local and target types,
 5334 * before returning result.
 5335 *
 5336 * 1 is returned, if field is found.
 5337 * 0 is returned if no compatible field is found.
 5338 * <0 is returned on error.
 5339 */
 5340static int bpf_core_match_member(const struct btf *local_btf,
 5341				 const struct bpf_core_accessor *local_acc,
 5342				 const struct btf *targ_btf,
 5343				 __u32 targ_id,
 5344				 struct bpf_core_spec *spec,
 5345				 __u32 *next_targ_id)
 5346{
 5347	const struct btf_type *local_type, *targ_type;
 5348	const struct btf_member *local_member, *m;
 5349	const char *local_name, *targ_name;
 5350	__u32 local_id;
 5351	int i, n, found;
 5352
 5353	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
 5354	if (!targ_type)
 5355		return -EINVAL;
 5356	if (!btf_is_composite(targ_type))
 5357		return 0;
 5358
 5359	local_id = local_acc->type_id;
 5360	local_type = btf__type_by_id(local_btf, local_id);
 5361	local_member = btf_members(local_type) + local_acc->idx;
 5362	local_name = btf__name_by_offset(local_btf, local_member->name_off);
 5363
 5364	n = btf_vlen(targ_type);
 5365	m = btf_members(targ_type);
 5366	for (i = 0; i < n; i++, m++) {
 5367		__u32 bit_offset;
 5368
 5369		bit_offset = btf_member_bit_offset(targ_type, i);
 5370
 5371		/* too deep struct/union/array nesting */
 5372		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
 5373			return -E2BIG;
 5374
 5375		/* speculate this member will be the good one */
 5376		spec->bit_offset += bit_offset;
 5377		spec->raw_spec[spec->raw_len++] = i;
 5378
 5379		targ_name = btf__name_by_offset(targ_btf, m->name_off);
 5380		if (str_is_empty(targ_name)) {
 5381			/* embedded struct/union, we need to go deeper */
 5382			found = bpf_core_match_member(local_btf, local_acc,
 5383						      targ_btf, m->type,
 5384						      spec, next_targ_id);
 5385			if (found) /* either found or error */
 5386				return found;
 5387		} else if (strcmp(local_name, targ_name) == 0) {
 5388			/* matching named field */
 5389			struct bpf_core_accessor *targ_acc;
 5390
 5391			targ_acc = &spec->spec[spec->len++];
 5392			targ_acc->type_id = targ_id;
 5393			targ_acc->idx = i;
 5394			targ_acc->name = targ_name;
 5395
 5396			*next_targ_id = m->type;
 5397			found = bpf_core_fields_are_compat(local_btf,
 5398							   local_member->type,
 5399							   targ_btf, m->type);
 5400			if (!found)
 5401				spec->len--; /* pop accessor */
 5402			return found;
 5403		}
 5404		/* member turned out not to be what we looked for */
 5405		spec->bit_offset -= bit_offset;
 5406		spec->raw_len--;
 5407	}
 5408
 5409	return 0;
 5410}
 5411
 5412/* Check local and target types for compatibility. This check is used for
 5413 * type-based CO-RE relocations and follow slightly different rules than
 5414 * field-based relocations. This function assumes that root types were already
 5415 * checked for name match. Beyond that initial root-level name check, names
 5416 * are completely ignored. Compatibility rules are as follows:
 5417 *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
 5418 *     kind should match for local and target types (i.e., STRUCT is not
 5419 *     compatible with UNION);
 5420 *   - for ENUMs, the size is ignored;
 5421 *   - for INT, size and signedness are ignored;
 5422 *   - for ARRAY, dimensionality is ignored, element types are checked for
 5423 *     compatibility recursively;
 5424 *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
 5425 *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
 5426 *   - FUNC_PROTOs are compatible if they have compatible signature: same
 5427 *     number of input args and compatible return and argument types.
 5428 * These rules are not set in stone and probably will be adjusted as we get
 5429 * more experience with using BPF CO-RE relocations.
 5430 */
 5431static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
 5432				     const struct btf *targ_btf, __u32 targ_id)
 5433{
 5434	const struct btf_type *local_type, *targ_type;
 5435	int depth = 32; /* max recursion depth */
 5436
 5437	/* caller made sure that names match (ignoring flavor suffix) */
 5438	local_type = btf__type_by_id(local_btf, local_id);
 5439	targ_type = btf__type_by_id(targ_btf, targ_id);
 5440	if (btf_kind(local_type) != btf_kind(targ_type))
 5441		return 0;
 5442
 5443recur:
 5444	depth--;
 5445	if (depth < 0)
 5446		return -EINVAL;
 5447
 5448	local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
 5449	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
 5450	if (!local_type || !targ_type)
 5451		return -EINVAL;
 5452
 5453	if (btf_kind(local_type) != btf_kind(targ_type))
 5454		return 0;
 5455
 5456	switch (btf_kind(local_type)) {
 5457	case BTF_KIND_UNKN:
 5458	case BTF_KIND_STRUCT:
 5459	case BTF_KIND_UNION:
 5460	case BTF_KIND_ENUM:
 5461	case BTF_KIND_FWD:
 5462		return 1;
 5463	case BTF_KIND_INT:
 5464		/* just reject deprecated bitfield-like integers; all other
 5465		 * integers are by default compatible between each other
 5466		 */
 5467		return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
 5468	case BTF_KIND_PTR:
 5469		local_id = local_type->type;
 5470		targ_id = targ_type->type;
 5471		goto recur;
 5472	case BTF_KIND_ARRAY:
 5473		local_id = btf_array(local_type)->type;
 5474		targ_id = btf_array(targ_type)->type;
 5475		goto recur;
 5476	case BTF_KIND_FUNC_PROTO: {
 5477		struct btf_param *local_p = btf_params(local_type);
 5478		struct btf_param *targ_p = btf_params(targ_type);
 5479		__u16 local_vlen = btf_vlen(local_type);
 5480		__u16 targ_vlen = btf_vlen(targ_type);
 5481		int i, err;
 5482
 5483		if (local_vlen != targ_vlen)
 5484			return 0;
 5485
 5486		for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
 5487			skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
 5488			skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
 5489			err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
 5490			if (err <= 0)
 5491				return err;
 5492		}
 5493
 5494		/* tail recurse for return type check */
 5495		skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
 5496		skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
 5497		goto recur;
 5498	}
 5499	default:
 5500		pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
 5501			btf_kind_str(local_type), local_id, targ_id);
 5502		return 0;
 5503	}
 5504}
 5505
 5506/*
 5507 * Try to match local spec to a target type and, if successful, produce full
 5508 * target spec (high-level, low-level + bit offset).
 5509 */
 5510static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
 5511			       const struct btf *targ_btf, __u32 targ_id,
 5512			       struct bpf_core_spec *targ_spec)
 5513{
 5514	const struct btf_type *targ_type;
 5515	const struct bpf_core_accessor *local_acc;
 5516	struct bpf_core_accessor *targ_acc;
 5517	int i, sz, matched;
 5518
 5519	memset(targ_spec, 0, sizeof(*targ_spec));
 5520	targ_spec->btf = targ_btf;
 5521	targ_spec->root_type_id = targ_id;
 5522	targ_spec->relo_kind = local_spec->relo_kind;
 5523
 5524	if (core_relo_is_type_based(local_spec->relo_kind)) {
 5525		return bpf_core_types_are_compat(local_spec->btf,
 5526						 local_spec->root_type_id,
 5527						 targ_btf, targ_id);
 5528	}
 5529
 5530	local_acc = &local_spec->spec[0];
 5531	targ_acc = &targ_spec->spec[0];
 5532
 5533	if (core_relo_is_enumval_based(local_spec->relo_kind)) {
 5534		size_t local_essent_len, targ_essent_len;
 5535		const struct btf_enum *e;
 5536		const char *targ_name;
 5537
 5538		/* has to resolve to an enum */
 5539		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
 5540		if (!btf_is_enum(targ_type))
 5541			return 0;
 5542
 5543		local_essent_len = bpf_core_essential_name_len(local_acc->name);
 5544
 5545		for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
 5546			targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
 5547			targ_essent_len = bpf_core_essential_name_len(targ_name);
 5548			if (targ_essent_len != local_essent_len)
 5549				continue;
 5550			if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
 5551				targ_acc->type_id = targ_id;
 5552				targ_acc->idx = i;
 5553				targ_acc->name = targ_name;
 5554				targ_spec->len++;
 5555				targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
 5556				targ_spec->raw_len++;
 5557				return 1;
 5558			}
 5559		}
 5560		return 0;
 5561	}
 5562
 5563	if (!core_relo_is_field_based(local_spec->relo_kind))
 5564		return -EINVAL;
 5565
 5566	for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
 5567		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
 5568						   &targ_id);
 5569		if (!targ_type)
 5570			return -EINVAL;
 5571
 5572		if (local_acc->name) {
 5573			matched = bpf_core_match_member(local_spec->btf,
 5574							local_acc,
 5575							targ_btf, targ_id,
 5576							targ_spec, &targ_id);
 5577			if (matched <= 0)
 5578				return matched;
 5579		} else {
 5580			/* for i=0, targ_id is already treated as array element
 5581			 * type (because it's the original struct), for others
 5582			 * we should find array element type first
 5583			 */
 5584			if (i > 0) {
 5585				const struct btf_array *a;
 5586				bool flex;
 5587
 5588				if (!btf_is_array(targ_type))
 5589					return 0;
 5590
 5591				a = btf_array(targ_type);
 5592				flex = is_flex_arr(targ_btf, targ_acc - 1, a);
 5593				if (!flex && local_acc->idx >= a->nelems)
 5594					return 0;
 5595				if (!skip_mods_and_typedefs(targ_btf, a->type,
 5596							    &targ_id))
 5597					return -EINVAL;
 5598			}
 5599
 5600			/* too deep struct/union/array nesting */
 5601			if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
 5602				return -E2BIG;
 5603
 5604			targ_acc->type_id = targ_id;
 5605			targ_acc->idx = local_acc->idx;
 5606			targ_acc->name = NULL;
 5607			targ_spec->len++;
 5608			targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
 5609			targ_spec->raw_len++;
 5610
 5611			sz = btf__resolve_size(targ_btf, targ_id);
 5612			if (sz < 0)
 5613				return sz;
 5614			targ_spec->bit_offset += local_acc->idx * sz * 8;
 5615		}
 5616	}
 5617
 5618	return 1;
 5619}
 5620
 5621static int bpf_core_calc_field_relo(const struct bpf_program *prog,
 5622				    const struct bpf_core_relo *relo,
 5623				    const struct bpf_core_spec *spec,
 5624				    __u32 *val, __u32 *field_sz, __u32 *type_id,
 5625				    bool *validate)
 5626{
 5627	const struct bpf_core_accessor *acc;
 5628	const struct btf_type *t;
 5629	__u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
 5630	const struct btf_member *m;
 5631	const struct btf_type *mt;
 5632	bool bitfield;
 5633	__s64 sz;
 5634
 5635	*field_sz = 0;
 5636
 5637	if (relo->kind == BPF_FIELD_EXISTS) {
 5638		*val = spec ? 1 : 0;
 5639		return 0;
 5640	}
 5641
 5642	if (!spec)
 5643		return -EUCLEAN; /* request instruction poisoning */
 5644
 5645	acc = &spec->spec[spec->len - 1];
 5646	t = btf__type_by_id(spec->btf, acc->type_id);
 5647
 5648	/* a[n] accessor needs special handling */
 5649	if (!acc->name) {
 5650		if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
 5651			*val = spec->bit_offset / 8;
 5652			/* remember field size for load/store mem size */
 5653			sz = btf__resolve_size(spec->btf, acc->type_id);
 5654			if (sz < 0)
 5655				return -EINVAL;
 5656			*field_sz = sz;
 5657			*type_id = acc->type_id;
 5658		} else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
 5659			sz = btf__resolve_size(spec->btf, acc->type_id);
 5660			if (sz < 0)
 5661				return -EINVAL;
 5662			*val = sz;
 5663		} else {
 5664			pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
 5665				prog->name, relo->kind, relo->insn_off / 8);
 5666			return -EINVAL;
 5667		}
 5668		if (validate)
 5669			*validate = true;
 5670		return 0;
 5671	}
 5672
 5673	m = btf_members(t) + acc->idx;
 5674	mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
 5675	bit_off = spec->bit_offset;
 5676	bit_sz = btf_member_bitfield_size(t, acc->idx);
 5677
 5678	bitfield = bit_sz > 0;
 5679	if (bitfield) {
 5680		byte_sz = mt->size;
 5681		byte_off = bit_off / 8 / byte_sz * byte_sz;
 5682		/* figure out smallest int size necessary for bitfield load */
 5683		while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
 5684			if (byte_sz >= 8) {
 5685				/* bitfield can't be read with 64-bit read */
 5686				pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
 5687					prog->name, relo->kind, relo->insn_off / 8);
 5688				return -E2BIG;
 5689			}
 5690			byte_sz *= 2;
 5691			byte_off = bit_off / 8 / byte_sz * byte_sz;
 5692		}
 5693	} else {
 5694		sz = btf__resolve_size(spec->btf, field_type_id);
 5695		if (sz < 0)
 5696			return -EINVAL;
 5697		byte_sz = sz;
 5698		byte_off = spec->bit_offset / 8;
 5699		bit_sz = byte_sz * 8;
 5700	}
 5701
 5702	/* for bitfields, all the relocatable aspects are ambiguous and we
 5703	 * might disagree with compiler, so turn off validation of expected
 5704	 * value, except for signedness
 5705	 */
 5706	if (validate)
 5707		*validate = !bitfield;
 5708
 5709	switch (relo->kind) {
 5710	case BPF_FIELD_BYTE_OFFSET:
 5711		*val = byte_off;
 5712		if (!bitfield) {
 5713			*field_sz = byte_sz;
 5714			*type_id = field_type_id;
 5715		}
 5716		break;
 5717	case BPF_FIELD_BYTE_SIZE:
 5718		*val = byte_sz;
 5719		break;
 5720	case BPF_FIELD_SIGNED:
 5721		/* enums will be assumed unsigned */
 5722		*val = btf_is_enum(mt) ||
 5723		       (btf_int_encoding(mt) & BTF_INT_SIGNED);
 5724		if (validate)
 5725			*validate = true; /* signedness is never ambiguous */
 5726		break;
 5727	case BPF_FIELD_LSHIFT_U64:
 5728#if __BYTE_ORDER == __LITTLE_ENDIAN
 5729		*val = 64 - (bit_off + bit_sz - byte_off  * 8);
 5730#else
 5731		*val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
 5732#endif
 5733		break;
 5734	case BPF_FIELD_RSHIFT_U64:
 5735		*val = 64 - bit_sz;
 5736		if (validate)
 5737			*validate = true; /* right shift is never ambiguous */
 5738		break;
 5739	case BPF_FIELD_EXISTS:
 5740	default:
 5741		return -EOPNOTSUPP;
 5742	}
 5743
 5744	return 0;
 5745}
 5746
 5747static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
 5748				   const struct bpf_core_spec *spec,
 5749				   __u32 *val)
 5750{
 5751	__s64 sz;
 5752
 5753	/* type-based relos return zero when target type is not found */
 5754	if (!spec) {
 5755		*val = 0;
 5756		return 0;
 5757	}
 5758
 5759	switch (relo->kind) {
 5760	case BPF_TYPE_ID_TARGET:
 5761		*val = spec->root_type_id;
 5762		break;
 5763	case BPF_TYPE_EXISTS:
 5764		*val = 1;
 5765		break;
 5766	case BPF_TYPE_SIZE:
 5767		sz = btf__resolve_size(spec->btf, spec->root_type_id);
 5768		if (sz < 0)
 5769			return -EINVAL;
 5770		*val = sz;
 5771		break;
 5772	case BPF_TYPE_ID_LOCAL:
 5773	/* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
 5774	default:
 5775		return -EOPNOTSUPP;
 5776	}
 5777
 5778	return 0;
 5779}
 5780
 5781static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
 5782				      const struct bpf_core_spec *spec,
 5783				      __u32 *val)
 5784{
 5785	const struct btf_type *t;
 5786	const struct btf_enum *e;
 5787
 5788	switch (relo->kind) {
 5789	case BPF_ENUMVAL_EXISTS:
 5790		*val = spec ? 1 : 0;
 5791		break;
 5792	case BPF_ENUMVAL_VALUE:
 5793		if (!spec)
 5794			return -EUCLEAN; /* request instruction poisoning */
 5795		t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
 5796		e = btf_enum(t) + spec->spec[0].idx;
 5797		*val = e->val;
 5798		break;
 5799	default:
 5800		return -EOPNOTSUPP;
 5801	}
 5802
 5803	return 0;
 5804}
 5805
 5806struct bpf_core_relo_res
 5807{
 5808	/* expected value in the instruction, unless validate == false */
 5809	__u32 orig_val;
 5810	/* new value that needs to be patched up to */
 5811	__u32 new_val;
 5812	/* relocation unsuccessful, poison instruction, but don't fail load */
 5813	bool poison;
 5814	/* some relocations can't be validated against orig_val */
 5815	bool validate;
 5816	/* for field byte offset relocations or the forms:
 5817	 *     *(T *)(rX + <off>) = rY
 5818	 *     rX = *(T *)(rY + <off>),
 5819	 * we remember original and resolved field size to adjust direct
 5820	 * memory loads of pointers and integers; this is necessary for 32-bit
 5821	 * host kernel architectures, but also allows to automatically
 5822	 * relocate fields that were resized from, e.g., u32 to u64, etc.
 5823	 */
 5824	bool fail_memsz_adjust;
 5825	__u32 orig_sz;
 5826	__u32 orig_type_id;
 5827	__u32 new_sz;
 5828	__u32 new_type_id;
 5829};
 5830
 5831/* Calculate original and target relocation values, given local and target
 5832 * specs and relocation kind. These values are calculated for each candidate.
 5833 * If there are multiple candidates, resulting values should all be consistent
 5834 * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
 5835 * If instruction has to be poisoned, *poison will be set to true.
 5836 */
 5837static int bpf_core_calc_relo(const struct bpf_program *prog,
 5838			      const struct bpf_core_relo *relo,
 5839			      int relo_idx,
 5840			      const struct bpf_core_spec *local_spec,
 5841			      const struct bpf_core_spec *targ_spec,
 5842			      struct bpf_core_relo_res *res)
 5843{
 5844	int err = -EOPNOTSUPP;
 5845
 5846	res->orig_val = 0;
 5847	res->new_val = 0;
 5848	res->poison = false;
 5849	res->validate = true;
 5850	res->fail_memsz_adjust = false;
 5851	res->orig_sz = res->new_sz = 0;
 5852	res->orig_type_id = res->new_type_id = 0;
 5853
 5854	if (core_relo_is_field_based(relo->kind)) {
 5855		err = bpf_core_calc_field_relo(prog, relo, local_spec,
 5856					       &res->orig_val, &res->orig_sz,
 5857					       &res->orig_type_id, &res->validate);
 5858		err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
 5859						      &res->new_val, &res->new_sz,
 5860						      &res->new_type_id, NULL);
 5861		if (err)
 5862			goto done;
 5863		/* Validate if it's safe to adjust load/store memory size.
 5864		 * Adjustments are performed only if original and new memory
 5865		 * sizes differ.
 5866		 */
 5867		res->fail_memsz_adjust = false;
 5868		if (res->orig_sz != res->new_sz) {
 5869			const struct btf_type *orig_t, *new_t;
 5870
 5871			orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
 5872			new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
 5873
 5874			/* There are two use cases in which it's safe to
 5875			 * adjust load/store's mem size:
 5876			 *   - reading a 32-bit kernel pointer, while on BPF
 5877			 *   size pointers are always 64-bit; in this case
 5878			 *   it's safe to "downsize" instruction size due to
 5879			 *   pointer being treated as unsigned integer with
 5880			 *   zero-extended upper 32-bits;
 5881			 *   - reading unsigned integers, again due to
 5882			 *   zero-extension is preserving the value correctly.
 5883			 *
 5884			 * In all other cases it's incorrect to attempt to
 5885			 * load/store field because read value will be
 5886			 * incorrect, so we poison relocated instruction.
 5887			 */
 5888			if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
 5889				goto done;
 5890			if (btf_is_int(orig_t) && btf_is_int(new_t) &&
 5891			    btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
 5892			    btf_int_encoding(new_t) != BTF_INT_SIGNED)
 5893				goto done;
 5894
 5895			/* mark as invalid mem size adjustment, but this will
 5896			 * only be checked for LDX/STX/ST insns
 5897			 */
 5898			res->fail_memsz_adjust = true;
 5899		}
 5900	} else if (core_relo_is_type_based(relo->kind)) {
 5901		err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
 5902		err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
 5903	} else if (core_relo_is_enumval_based(relo->kind)) {
 5904		err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
 5905		err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
 5906	}
 5907
 5908done:
 5909	if (err == -EUCLEAN) {
 5910		/* EUCLEAN is used to signal instruction poisoning request */
 5911		res->poison = true;
 5912		err = 0;
 5913	} else if (err == -EOPNOTSUPP) {
 5914		/* EOPNOTSUPP means unknown/unsupported relocation */
 5915		pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
 5916			prog->name, relo_idx, core_relo_kind_str(relo->kind),
 5917			relo->kind, relo->insn_off / 8);
 5918	}
 5919
 5920	return err;
 5921}
 5922
 5923/*
 5924 * Turn instruction for which CO_RE relocation failed into invalid one with
 5925 * distinct signature.
 5926 */
 5927static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
 5928				 int insn_idx, struct bpf_insn *insn)
 5929{
 5930	pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
 5931		 prog->name, relo_idx, insn_idx);
 5932	insn->code = BPF_JMP | BPF_CALL;
 5933	insn->dst_reg = 0;
 5934	insn->src_reg = 0;
 5935	insn->off = 0;
 5936	/* if this instruction is reachable (not a dead code),
 5937	 * verifier will complain with the following message:
 5938	 * invalid func unknown#195896080
 5939	 */
 5940	insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
 5941}
 5942
 5943static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
 5944{
 5945	switch (BPF_SIZE(insn->code)) {
 5946	case BPF_DW: return 8;
 5947	case BPF_W: return 4;
 5948	case BPF_H: return 2;
 5949	case BPF_B: return 1;
 5950	default: return -1;
 5951	}
 5952}
 5953
 5954static int insn_bytes_to_bpf_size(__u32 sz)
 5955{
 5956	switch (sz) {
 5957	case 8: return BPF_DW;
 5958	case 4: return BPF_W;
 5959	case 2: return BPF_H;
 5960	case 1: return BPF_B;
 5961	default: return -1;
 5962	}
 5963}
 5964
 5965/*
 5966 * Patch relocatable BPF instruction.
 5967 *
 5968 * Patched value is determined by relocation kind and target specification.
 5969 * For existence relocations target spec will be NULL if field/type is not found.
 5970 * Expected insn->imm value is determined using relocation kind and local
 5971 * spec, and is checked before patching instruction. If actual insn->imm value
 5972 * is wrong, bail out with error.
 5973 *
 5974 * Currently supported classes of BPF instruction are:
 5975 * 1. rX = <imm> (assignment with immediate operand);
 5976 * 2. rX += <imm> (arithmetic operations with immediate operand);
 5977 * 3. rX = <imm64> (load with 64-bit immediate value);
 5978 * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
 5979 * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
 5980 * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
 5981 */
 5982static int bpf_core_patch_insn(struct bpf_program *prog,
 5983			       const struct bpf_core_relo *relo,
 5984			       int relo_idx,
 5985			       const struct bpf_core_relo_res *res)
 5986{
 5987	__u32 orig_val, new_val;
 5988	struct bpf_insn *insn;
 5989	int insn_idx;
 5990	__u8 class;
 5991
 5992	if (relo->insn_off % BPF_INSN_SZ)
 5993		return -EINVAL;
 5994	insn_idx = relo->insn_off / BPF_INSN_SZ;
 5995	/* adjust insn_idx from section frame of reference to the local
 5996	 * program's frame of reference; (sub-)program code is not yet
 5997	 * relocated, so it's enough to just subtract in-section offset
 5998	 */
 5999	insn_idx = insn_idx - prog->sec_insn_off;
 6000	insn = &prog->insns[insn_idx];
 6001	class = BPF_CLASS(insn->code);
 6002
 6003	if (res->poison) {
 6004poison:
 6005		/* poison second part of ldimm64 to avoid confusing error from
 6006		 * verifier about "unknown opcode 00"
 6007		 */
 6008		if (is_ldimm64_insn(insn))
 6009			bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
 6010		bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
 6011		return 0;
 6012	}
 6013
 6014	orig_val = res->orig_val;
 6015	new_val = res->new_val;
 6016
 6017	switch (class) {
 6018	case BPF_ALU:
 6019	case BPF_ALU64:
 6020		if (BPF_SRC(insn->code) != BPF_K)
 6021			return -EINVAL;
 6022		if (res->validate && insn->imm != orig_val) {
 6023			pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
 6024				prog->name, relo_idx,
 6025				insn_idx, insn->imm, orig_val, new_val);
 6026			return -EINVAL;
 6027		}
 6028		orig_val = insn->imm;
 6029		insn->imm = new_val;
 6030		pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
 6031			 prog->name, relo_idx, insn_idx,
 6032			 orig_val, new_val);
 6033		break;
 6034	case BPF_LDX:
 6035	case BPF_ST:
 6036	case BPF_STX:
 6037		if (res->validate && insn->off != orig_val) {
 6038			pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
 6039				prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
 6040			return -EINVAL;
 6041		}
 6042		if (new_val > SHRT_MAX) {
 6043			pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
 6044				prog->name, relo_idx, insn_idx, new_val);
 6045			return -ERANGE;
 6046		}
 6047		if (res->fail_memsz_adjust) {
 6048			pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
 6049				"Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
 6050				prog->name, relo_idx, insn_idx);
 6051			goto poison;
 6052		}
 6053
 6054		orig_val = insn->off;
 6055		insn->off = new_val;
 6056		pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
 6057			 prog->name, relo_idx, insn_idx, orig_val, new_val);
 6058
 6059		if (res->new_sz != res->orig_sz) {
 6060			int insn_bytes_sz, insn_bpf_sz;
 6061
 6062			insn_bytes_sz = insn_bpf_size_to_bytes(insn);
 6063			if (insn_bytes_sz != res->orig_sz) {
 6064				pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
 6065					prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
 6066				return -EINVAL;
 6067			}
 6068
 6069			insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
 6070			if (insn_bpf_sz < 0) {
 6071				pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
 6072					prog->name, relo_idx, insn_idx, res->new_sz);
 6073				return -EINVAL;
 6074			}
 6075
 6076			insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
 6077			pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
 6078				 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
 6079		}
 6080		break;
 6081	case BPF_LD: {
 6082		__u64 imm;
 6083
 6084		if (!is_ldimm64_insn(insn) ||
 6085		    insn[0].src_reg != 0 || insn[0].off != 0 ||
 6086		    insn_idx + 1 >= prog->insns_cnt ||
 6087		    insn[1].code != 0 || insn[1].dst_reg != 0 ||
 6088		    insn[1].src_reg != 0 || insn[1].off != 0) {
 6089			pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
 6090				prog->name, relo_idx, insn_idx);
 6091			return -EINVAL;
 6092		}
 6093
 6094		imm = insn[0].imm + ((__u64)insn[1].imm << 32);
 6095		if (res->validate && imm != orig_val) {
 6096			pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
 6097				prog->name, relo_idx,
 6098				insn_idx, (unsigned long long)imm,
 6099				orig_val, new_val);
 6100			return -EINVAL;
 6101		}
 6102
 6103		insn[0].imm = new_val;
 6104		insn[1].imm = 0; /* currently only 32-bit values are supported */
 6105		pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
 6106			 prog->name, relo_idx, insn_idx,
 6107			 (unsigned long long)imm, new_val);
 6108		break;
 6109	}
 6110	default:
 6111		pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
 6112			prog->name, relo_idx, insn_idx, insn->code,
 6113			insn->src_reg, insn->dst_reg, insn->off, insn->imm);
 6114		return -EINVAL;
 6115	}
 6116
 6117	return 0;
 6118}
 6119
 6120/* Output spec definition in the format:
 6121 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
 6122 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
 6123 */
 6124static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
 6125{
 6126	const struct btf_type *t;
 6127	const struct btf_enum *e;
 6128	const char *s;
 6129	__u32 type_id;
 6130	int i;
 6131
 6132	type_id = spec->root_type_id;
 6133	t = btf__type_by_id(spec->btf, type_id);
 6134	s = btf__name_by_offset(spec->btf, t->name_off);
 6135
 6136	libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
 6137
 6138	if (core_relo_is_type_based(spec->relo_kind))
 6139		return;
 6140
 6141	if (core_relo_is_enumval_based(spec->relo_kind)) {
 6142		t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
 6143		e = btf_enum(t) + spec->raw_spec[0];
 6144		s = btf__name_by_offset(spec->btf, e->name_off);
 6145
 6146		libbpf_print(level, "::%s = %u", s, e->val);
 6147		return;
 6148	}
 6149
 6150	if (core_relo_is_field_based(spec->relo_kind)) {
 6151		for (i = 0; i < spec->len; i++) {
 6152			if (spec->spec[i].name)
 6153				libbpf_print(level, ".%s", spec->spec[i].name);
 6154			else if (i > 0 || spec->spec[i].idx > 0)
 6155				libbpf_print(level, "[%u]", spec->spec[i].idx);
 6156		}
 6157
 6158		libbpf_print(level, " (");
 6159		for (i = 0; i < spec->raw_len; i++)
 6160			libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
 6161
 6162		if (spec->bit_offset % 8)
 6163			libbpf_print(level, " @ offset %u.%u)",
 6164				     spec->bit_offset / 8, spec->bit_offset % 8);
 6165		else
 6166			libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
 6167		return;
 6168	}
 6169}
 6170
 6171static size_t bpf_core_hash_fn(const void *key, void *ctx)
 6172{
 6173	return (size_t)key;
 6174}
 6175
 6176static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
 6177{
 6178	return k1 == k2;
 6179}
 6180
 6181static void *u32_as_hash_key(__u32 x)
 6182{
 6183	return (void *)(uintptr_t)x;
 6184}
 6185
 6186/*
 6187 * CO-RE relocate single instruction.
 6188 *
 6189 * The outline and important points of the algorithm:
 6190 * 1. For given local type, find corresponding candidate target types.
 6191 *    Candidate type is a type with the same "essential" name, ignoring
 6192 *    everything after last triple underscore (___). E.g., `sample`,
 6193 *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
 6194 *    for each other. Names with triple underscore are referred to as
 6195 *    "flavors" and are useful, among other things, to allow to
 6196 *    specify/support incompatible variations of the same kernel struct, which
 6197 *    might differ between different kernel versions and/or build
 6198 *    configurations.
 6199 *
 6200 *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
 6201 *    converter, when deduplicated BTF of a kernel still contains more than
 6202 *    one different types with the same name. In that case, ___2, ___3, etc
 6203 *    are appended starting from second name conflict. But start flavors are
 6204 *    also useful to be defined "locally", in BPF program, to extract same
 6205 *    data from incompatible changes between different kernel
 6206 *    versions/configurations. For instance, to handle field renames between
 6207 *    kernel versions, one can use two flavors of the struct name with the
 6208 *    same common name and use conditional relocations to extract that field,
 6209 *    depending on target kernel version.
 6210 * 2. For each candidate type, try to match local specification to this
 6211 *    candidate target type. Matching involves finding corresponding
 6212 *    high-level spec accessors, meaning that all named fields should match,
 6213 *    as well as all array accesses should be within the actual bounds. Also,
 6214 *    types should be compatible (see bpf_core_fields_are_compat for details).
 6215 * 3. It is supported and expected that there might be multiple flavors
 6216 *    matching the spec. As long as all the specs resolve to the same set of
 6217 *    offsets across all candidates, there is no error. If there is any
 6218 *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
 6219 *    imprefection of BTF deduplication, which can cause slight duplication of
 6220 *    the same BTF type, if some directly or indirectly referenced (by
 6221 *    pointer) type gets resolved to different actual types in different
 6222 *    object files. If such situation occurs, deduplicated BTF will end up
 6223 *    with two (or more) structurally identical types, which differ only in
 6224 *    types they refer to through pointer. This should be OK in most cases and
 6225 *    is not an error.
 6226 * 4. Candidate types search is performed by linearly scanning through all
 6227 *    types in target BTF. It is anticipated that this is overall more
 6228 *    efficient memory-wise and not significantly worse (if not better)
 6229 *    CPU-wise compared to prebuilding a map from all local type names to
 6230 *    a list of candidate type names. It's also sped up by caching resolved
 6231 *    list of matching candidates per each local "root" type ID, that has at
 6232 *    least one bpf_core_relo associated with it. This list is shared
 6233 *    between multiple relocations for the same type ID and is updated as some
 6234 *    of the candidates are pruned due to structural incompatibility.
 6235 */
 6236static int bpf_core_apply_relo(struct bpf_program *prog,
 6237			       const struct bpf_core_relo *relo,
 6238			       int relo_idx,
 6239			       const struct btf *local_btf,
 6240			       struct hashmap *cand_cache)
 6241{
 6242	struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
 6243	const void *type_key = u32_as_hash_key(relo->type_id);
 6244	struct bpf_core_relo_res cand_res, targ_res;
 6245	const struct btf_type *local_type;
 6246	const char *local_name;
 6247	struct core_cand_list *cands = NULL;
 6248	__u32 local_id;
 6249	const char *spec_str;
 6250	int i, j, err;
 6251
 6252	local_id = relo->type_id;
 6253	local_type = btf__type_by_id(local_btf, local_id);
 6254	if (!local_type)
 6255		return -EINVAL;
 6256
 6257	local_name = btf__name_by_offset(local_btf, local_type->name_off);
 6258	if (!local_name)
 6259		return -EINVAL;
 6260
 6261	spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
 6262	if (str_is_empty(spec_str))
 6263		return -EINVAL;
 6264
 6265	if (prog->obj->gen_loader) {
 6266		pr_warn("// TODO core_relo: prog %td insn[%d] %s %s kind %d\n",
 6267			prog - prog->obj->programs, relo->insn_off / 8,
 6268			local_name, spec_str, relo->kind);
 6269		return -ENOTSUP;
 6270	}
 6271	err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
 6272	if (err) {
 6273		pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
 6274			prog->name, relo_idx, local_id, btf_kind_str(local_type),
 6275			str_is_empty(local_name) ? "<anon>" : local_name,
 6276			spec_str, err);
 6277		return -EINVAL;
 6278	}
 6279
 6280	pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
 6281		 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
 6282	bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
 6283	libbpf_print(LIBBPF_DEBUG, "\n");
 6284
 6285	/* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
 6286	if (relo->kind == BPF_TYPE_ID_LOCAL) {
 6287		targ_res.validate = true;
 6288		targ_res.poison = false;
 6289		targ_res.orig_val = local_spec.root_type_id;
 6290		targ_res.new_val = local_spec.root_type_id;
 6291		goto patch_insn;
 6292	}
 6293
 6294	/* libbpf doesn't support candidate search for anonymous types */
 6295	if (str_is_empty(spec_str)) {
 6296		pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
 6297			prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
 6298		return -EOPNOTSUPP;
 6299	}
 6300
 6301	if (!hashmap__find(cand_cache, type_key, (void **)&cands)) {
 6302		cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
 6303		if (IS_ERR(cands)) {
 6304			pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
 6305				prog->name, relo_idx, local_id, btf_kind_str(local_type),
 6306				local_name, PTR_ERR(cands));
 6307			return PTR_ERR(cands);
 6308		}
 6309		err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
 6310		if (err) {
 6311			bpf_core_free_cands(cands);
 6312			return err;
 6313		}
 6314	}
 6315
 6316	for (i = 0, j = 0; i < cands->len; i++) {
 6317		err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
 6318					  cands->cands[i].id, &cand_spec);
 6319		if (err < 0) {
 6320			pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
 6321				prog->name, relo_idx, i);
 6322			bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
 6323			libbpf_print(LIBBPF_WARN, ": %d\n", err);
 6324			return err;
 6325		}
 6326
 6327		pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
 6328			 relo_idx, err == 0 ? "non-matching" : "matching", i);
 6329		bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
 6330		libbpf_print(LIBBPF_DEBUG, "\n");
 6331
 6332		if (err == 0)
 6333			continue;
 6334
 6335		err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
 6336		if (err)
 6337			return err;
 6338
 6339		if (j == 0) {
 6340			targ_res = cand_res;
 6341			targ_spec = cand_spec;
 6342		} else if (cand_spec.bit_offset != targ_spec.bit_offset) {
 6343			/* if there are many field relo candidates, they
 6344			 * should all resolve to the same bit offset
 6345			 */
 6346			pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
 6347				prog->name, relo_idx, cand_spec.bit_offset,
 6348				targ_spec.bit_offset);
 6349			return -EINVAL;
 6350		} else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
 6351			/* all candidates should result in the same relocation
 6352			 * decision and value, otherwise it's dangerous to
 6353			 * proceed due to ambiguity
 6354			 */
 6355			pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
 6356				prog->name, relo_idx,
 6357				cand_res.poison ? "failure" : "success", cand_res.new_val,
 6358				targ_res.poison ? "failure" : "success", targ_res.new_val);
 6359			return -EINVAL;
 6360		}
 6361
 6362		cands->cands[j++] = cands->cands[i];
 6363	}
 6364
 6365	/*
 6366	 * For BPF_FIELD_EXISTS relo or when used BPF program has field
 6367	 * existence checks or kernel version/config checks, it's expected
 6368	 * that we might not find any candidates. In this case, if field
 6369	 * wasn't found in any candidate, the list of candidates shouldn't
 6370	 * change at all, we'll just handle relocating appropriately,
 6371	 * depending on relo's kind.
 6372	 */
 6373	if (j > 0)
 6374		cands->len = j;
 6375
 6376	/*
 6377	 * If no candidates were found, it might be both a programmer error,
 6378	 * as well as expected case, depending whether instruction w/
 6379	 * relocation is guarded in some way that makes it unreachable (dead
 6380	 * code) if relocation can't be resolved. This is handled in
 6381	 * bpf_core_patch_insn() uniformly by replacing that instruction with
 6382	 * BPF helper call insn (using invalid helper ID). If that instruction
 6383	 * is indeed unreachable, then it will be ignored and eliminated by
 6384	 * verifier. If it was an error, then verifier will complain and point
 6385	 * to a specific instruction number in its log.
 6386	 */
 6387	if (j == 0) {
 6388		pr_debug("prog '%s': relo #%d: no matching targets found\n",
 6389			 prog->name, relo_idx);
 6390
 6391		/* calculate single target relo result explicitly */
 6392		err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
 6393		if (err)
 6394			return err;
 6395	}
 6396
 6397patch_insn:
 6398	/* bpf_core_patch_insn() should know how to handle missing targ_spec */
 6399	err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
 6400	if (err) {
 6401		pr_warn("prog '%s': relo #%d: failed to patch insn #%zu: %d\n",
 6402			prog->name, relo_idx, relo->insn_off / BPF_INSN_SZ, err);
 6403		return -EINVAL;
 6404	}
 6405
 6406	return 0;
 6407}
 6408
 6409static int
 6410bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
 6411{
 6412	const struct btf_ext_info_sec *sec;
 6413	const struct bpf_core_relo *rec;
 6414	const struct btf_ext_info *seg;
 6415	struct hashmap_entry *entry;
 6416	struct hashmap *cand_cache = NULL;
 6417	struct bpf_program *prog;
 6418	const char *sec_name;
 6419	int i, err = 0, insn_idx, sec_idx;
 6420
 6421	if (obj->btf_ext->core_relo_info.len == 0)
 6422		return 0;
 6423
 6424	if (targ_btf_path) {
 6425		obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
 6426		err = libbpf_get_error(obj->btf_vmlinux_override);
 6427		if (err) {
 6428			pr_warn("failed to parse target BTF: %d\n", err);
 6429			return err;
 6430		}
 6431	}
 6432
 6433	cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
 6434	if (IS_ERR(cand_cache)) {
 6435		err = PTR_ERR(cand_cache);
 6436		goto out;
 6437	}
 6438
 6439	seg = &obj->btf_ext->core_relo_info;
 6440	for_each_btf_ext_sec(seg, sec) {
 6441		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
 6442		if (str_is_empty(sec_name)) {
 6443			err = -EINVAL;
 6444			goto out;
 6445		}
 6446		/* bpf_object's ELF is gone by now so it's not easy to find
 6447		 * section index by section name, but we can find *any*
 6448		 * bpf_program within desired section name and use it's
 6449		 * prog->sec_idx to do a proper search by section index and
 6450		 * instruction offset
 6451		 */
 6452		prog = NULL;
 6453		for (i = 0; i < obj->nr_programs; i++) {
 6454			prog = &obj->programs[i];
 6455			if (strcmp(prog->sec_name, sec_name) == 0)
 6456				break;
 6457		}
 6458		if (!prog) {
 6459			pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
 6460			return -ENOENT;
 6461		}
 6462		sec_idx = prog->sec_idx;
 6463
 6464		pr_debug("sec '%s': found %d CO-RE relocations\n",
 6465			 sec_name, sec->num_info);
 6466
 6467		for_each_btf_ext_rec(seg, sec, i, rec) {
 6468			insn_idx = rec->insn_off / BPF_INSN_SZ;
 6469			prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
 6470			if (!prog) {
 6471				pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
 6472					sec_name, insn_idx, i);
 6473				err = -EINVAL;
 6474				goto out;
 6475			}
 6476			/* no need to apply CO-RE relocation if the program is
 6477			 * not going to be loaded
 6478			 */
 6479			if (!prog->load)
 6480				continue;
 6481
 6482			err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
 6483			if (err) {
 6484				pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
 6485					prog->name, i, err);
 6486				goto out;
 6487			}
 6488		}
 6489	}
 6490
 6491out:
 6492	/* obj->btf_vmlinux and module BTFs are freed after object load */
 6493	btf__free(obj->btf_vmlinux_override);
 6494	obj->btf_vmlinux_override = NULL;
 6495
 6496	if (!IS_ERR_OR_NULL(cand_cache)) {
 6497		hashmap__for_each_entry(cand_cache, entry, i) {
 6498			bpf_core_free_cands(entry->value);
 6499		}
 6500		hashmap__free(cand_cache);
 6501	}
 6502	return err;
 6503}
 6504
 6505/* Relocate data references within program code:
 6506 *  - map references;
 6507 *  - global variable references;
 6508 *  - extern references.
 6509 */
 6510static int
 6511bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
 6512{
 6513	int i;
 6514
 6515	for (i = 0; i < prog->nr_reloc; i++) {
 6516		struct reloc_desc *relo = &prog->reloc_desc[i];
 6517		struct bpf_insn *insn = &prog->insns[relo->insn_idx];
 6518		struct extern_desc *ext;
 6519
 6520		switch (relo->type) {
 6521		case RELO_LD64:
 6522			if (obj->gen_loader) {
 6523				insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
 6524				insn[0].imm = relo->map_idx;
 6525			} else {
 6526				insn[0].src_reg = BPF_PSEUDO_MAP_FD;
 6527				insn[0].imm = obj->maps[relo->map_idx].fd;
 6528			}
 6529			break;
 6530		case RELO_DATA:
 6531			insn[1].imm = insn[0].imm + relo->sym_off;
 6532			if (obj->gen_loader) {
 6533				insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
 6534				insn[0].imm = relo->map_idx;
 6535			} else {
 6536				insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
 6537				insn[0].imm = obj->maps[relo->map_idx].fd;
 6538			}
 6539			break;
 6540		case RELO_EXTERN_VAR:
 6541			ext = &obj->externs[relo->sym_off];
 6542			if (ext->type == EXT_KCFG) {
 6543				if (obj->gen_loader) {
 6544					insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
 6545					insn[0].imm = obj->kconfig_map_idx;
 6546				} else {
 6547					insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
 6548					insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
 6549				}
 6550				insn[1].imm = ext->kcfg.data_off;
 6551			} else /* EXT_KSYM */ {
 6552				if (ext->ksym.type_id) { /* typed ksyms */
 6553					insn[0].src_reg = BPF_PSEUDO_BTF_ID;
 6554					insn[0].imm = ext->ksym.kernel_btf_id;
 6555					insn[1].imm = ext->ksym.kernel_btf_obj_fd;
 6556				} else { /* typeless ksyms */
 6557					insn[0].imm = (__u32)ext->ksym.addr;
 6558					insn[1].imm = ext->ksym.addr >> 32;
 6559				}
 6560			}
 6561			break;
 6562		case RELO_EXTERN_FUNC:
 6563			ext = &obj->externs[relo->sym_off];
 6564			insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
 6565			insn[0].imm = ext->ksym.kernel_btf_id;
 6566			break;
 6567		case RELO_SUBPROG_ADDR:
 6568			if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
 6569				pr_warn("prog '%s': relo #%d: bad insn\n",
 6570					prog->name, i);
 6571				return -EINVAL;
 6572			}
 6573			/* handled already */
 6574			break;
 6575		case RELO_CALL:
 6576			/* handled already */
 6577			break;
 6578		default:
 6579			pr_warn("prog '%s': relo #%d: bad relo type %d\n",
 6580				prog->name, i, relo->type);
 6581			return -EINVAL;
 6582		}
 6583	}
 6584
 6585	return 0;
 6586}
 6587
 6588static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
 6589				    const struct bpf_program *prog,
 6590				    const struct btf_ext_info *ext_info,
 6591				    void **prog_info, __u32 *prog_rec_cnt,
 6592				    __u32 *prog_rec_sz)
 6593{
 6594	void *copy_start = NULL, *copy_end = NULL;
 6595	void *rec, *rec_end, *new_prog_info;
 6596	const struct btf_ext_info_sec *sec;
 6597	size_t old_sz, new_sz;
 6598	const char *sec_name;
 6599	int i, off_adj;
 6600
 6601	for_each_btf_ext_sec(ext_info, sec) {
 6602		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
 6603		if (!sec_name)
 6604			return -EINVAL;
 6605		if (strcmp(sec_name, prog->sec_name) != 0)
 6606			continue;
 6607
 6608		for_each_btf_ext_rec(ext_info, sec, i, rec) {
 6609			__u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
 6610
 6611			if (insn_off < prog->sec_insn_off)
 6612				continue;
 6613			if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
 6614				break;
 6615
 6616			if (!copy_start)
 6617				copy_start = rec;
 6618			copy_end = rec + ext_info->rec_size;
 6619		}
 6620
 6621		if (!copy_start)
 6622			return -ENOENT;
 6623
 6624		/* append func/line info of a given (sub-)program to the main
 6625		 * program func/line info
 6626		 */
 6627		old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
 6628		new_sz = old_sz + (copy_end - copy_start);
 6629		new_prog_info = realloc(*prog_info, new_sz);
 6630		if (!new_prog_info)
 6631			return -ENOMEM;
 6632		*prog_info = new_prog_info;
 6633		*prog_rec_cnt = new_sz / ext_info->rec_size;
 6634		memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
 6635
 6636		/* Kernel instruction offsets are in units of 8-byte
 6637		 * instructions, while .BTF.ext instruction offsets generated
 6638		 * by Clang are in units of bytes. So convert Clang offsets
 6639		 * into kernel offsets and adjust offset according to program
 6640		 * relocated position.
 6641		 */
 6642		off_adj = prog->sub_insn_off - prog->sec_insn_off;
 6643		rec = new_prog_info + old_sz;
 6644		rec_end = new_prog_info + new_sz;
 6645		for (; rec < rec_end; rec += ext_info->rec_size) {
 6646			__u32 *insn_off = rec;
 6647
 6648			*insn_off = *insn_off / BPF_INSN_SZ + off_adj;
 6649		}
 6650		*prog_rec_sz = ext_info->rec_size;
 6651		return 0;
 6652	}
 6653
 6654	return -ENOENT;
 6655}
 6656
 6657static int
 6658reloc_prog_func_and_line_info(const struct bpf_object *obj,
 6659			      struct bpf_program *main_prog,
 6660			      const struct bpf_program *prog)
 6661{
 6662	int err;
 6663
 6664	/* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
 6665	 * supprot func/line info
 6666	 */
 6667	if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
 6668		return 0;
 6669
 6670	/* only attempt func info relocation if main program's func_info
 6671	 * relocation was successful
 6672	 */
 6673	if (main_prog != prog && !main_prog->func_info)
 6674		goto line_info;
 6675
 6676	err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
 6677				       &main_prog->func_info,
 6678				       &main_prog->func_info_cnt,
 6679				       &main_prog->func_info_rec_size);
 6680	if (err) {
 6681		if (err != -ENOENT) {
 6682			pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
 6683				prog->name, err);
 6684			return err;
 6685		}
 6686		if (main_prog->func_info) {
 6687			/*
 6688			 * Some info has already been found but has problem
 6689			 * in the last btf_ext reloc. Must have to error out.
 6690			 */
 6691			pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
 6692			return err;
 6693		}
 6694		/* Have problem loading the very first info. Ignore the rest. */
 6695		pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
 6696			prog->name);
 6697	}
 6698
 6699line_info:
 6700	/* don't relocate line info if main program's relocation failed */
 6701	if (main_prog != prog && !main_prog->line_info)
 6702		return 0;
 6703
 6704	err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
 6705				       &main_prog->line_info,
 6706				       &main_prog->line_info_cnt,
 6707				       &main_prog->line_info_rec_size);
 6708	if (err) {
 6709		if (err != -ENOENT) {
 6710			pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
 6711				prog->name, err);
 6712			return err;
 6713		}
 6714		if (main_prog->line_info) {
 6715			/*
 6716			 * Some info has already been found but has problem
 6717			 * in the last btf_ext reloc. Must have to error out.
 6718			 */
 6719			pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
 6720			return err;
 6721		}
 6722		/* Have problem loading the very first info. Ignore the rest. */
 6723		pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
 6724			prog->name);
 6725	}
 6726	return 0;
 6727}
 6728
 6729static int cmp_relo_by_insn_idx(const void *key, const void *elem)
 6730{
 6731	size_t insn_idx = *(const size_t *)key;
 6732	const struct reloc_desc *relo = elem;
 6733
 6734	if (insn_idx == relo->insn_idx)
 6735		return 0;
 6736	return insn_idx < relo->insn_idx ? -1 : 1;
 6737}
 6738
 6739static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
 6740{
 6741	return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
 6742		       sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
 6743}
 6744
 6745static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
 6746{
 6747	int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
 6748	struct reloc_desc *relos;
 6749	int i;
 6750
 6751	if (main_prog == subprog)
 6752		return 0;
 6753	relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
 6754	if (!relos)
 6755		return -ENOMEM;
 6756	memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
 6757	       sizeof(*relos) * subprog->nr_reloc);
 6758
 6759	for (i = main_prog->nr_reloc; i < new_cnt; i++)
 6760		relos[i].insn_idx += subprog->sub_insn_off;
 6761	/* After insn_idx adjustment the 'relos' array is still sorted
 6762	 * by insn_idx and doesn't break bsearch.
 6763	 */
 6764	main_prog->reloc_desc = relos;
 6765	main_prog->nr_reloc = new_cnt;
 6766	return 0;
 6767}
 6768
 6769static int
 6770bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
 6771		       struct bpf_program *prog)
 6772{
 6773	size_t sub_insn_idx, insn_idx, new_cnt;
 6774	struct bpf_program *subprog;
 6775	struct bpf_insn *insns, *insn;
 6776	struct reloc_desc *relo;
 6777	int err;
 6778
 6779	err = reloc_prog_func_and_line_info(obj, main_prog, prog);
 6780	if (err)
 6781		return err;
 6782
 6783	for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
 6784		insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
 6785		if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
 6786			continue;
 6787
 6788		relo = find_prog_insn_relo(prog, insn_idx);
 6789		if (relo && relo->type == RELO_EXTERN_FUNC)
 6790			/* kfunc relocations will be handled later
 6791			 * in bpf_object__relocate_data()
 6792			 */
 6793			continue;
 6794		if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
 6795			pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
 6796				prog->name, insn_idx, relo->type);
 6797			return -LIBBPF_ERRNO__RELOC;
 6798		}
 6799		if (relo) {
 6800			/* sub-program instruction index is a combination of
 6801			 * an offset of a symbol pointed to by relocation and
 6802			 * call instruction's imm field; for global functions,
 6803			 * call always has imm = -1, but for static functions
 6804			 * relocation is against STT_SECTION and insn->imm
 6805			 * points to a start of a static function
 6806			 *
 6807			 * for subprog addr relocation, the relo->sym_off + insn->imm is
 6808			 * the byte offset in the corresponding section.
 6809			 */
 6810			if (relo->type == RELO_CALL)
 6811				sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
 6812			else
 6813				sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
 6814		} else if (insn_is_pseudo_func(insn)) {
 6815			/*
 6816			 * RELO_SUBPROG_ADDR relo is always emitted even if both
 6817			 * functions are in the same section, so it shouldn't reach here.
 6818			 */
 6819			pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
 6820				prog->name, insn_idx);
 6821			return -LIBBPF_ERRNO__RELOC;
 6822		} else {
 6823			/* if subprogram call is to a static function within
 6824			 * the same ELF section, there won't be any relocation
 6825			 * emitted, but it also means there is no additional
 6826			 * offset necessary, insns->imm is relative to
 6827			 * instruction's original position within the section
 6828			 */
 6829			sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
 6830		}
 6831
 6832		/* we enforce that sub-programs should be in .text section */
 6833		subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
 6834		if (!subprog) {
 6835			pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
 6836				prog->name);
 6837			return -LIBBPF_ERRNO__RELOC;
 6838		}
 6839
 6840		/* if it's the first call instruction calling into this
 6841		 * subprogram (meaning this subprog hasn't been processed
 6842		 * yet) within the context of current main program:
 6843		 *   - append it at the end of main program's instructions blog;
 6844		 *   - process is recursively, while current program is put on hold;
 6845		 *   - if that subprogram calls some other not yet processes
 6846		 *   subprogram, same thing will happen recursively until
 6847		 *   there are no more unprocesses subprograms left to append
 6848		 *   and relocate.
 6849		 */
 6850		if (subprog->sub_insn_off == 0) {
 6851			subprog->sub_insn_off = main_prog->insns_cnt;
 6852
 6853			new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
 6854			insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
 6855			if (!insns) {
 6856				pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
 6857				return -ENOMEM;
 6858			}
 6859			main_prog->insns = insns;
 6860			main_prog->insns_cnt = new_cnt;
 6861
 6862			memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
 6863			       subprog->insns_cnt * sizeof(*insns));
 6864
 6865			pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
 6866				 main_prog->name, subprog->insns_cnt, subprog->name);
 6867
 6868			/* The subprog insns are now appended. Append its relos too. */
 6869			err = append_subprog_relos(main_prog, subprog);
 6870			if (err)
 6871				return err;
 6872			err = bpf_object__reloc_code(obj, main_prog, subprog);
 6873			if (err)
 6874				return err;
 6875		}
 6876
 6877		/* main_prog->insns memory could have been re-allocated, so
 6878		 * calculate pointer again
 6879		 */
 6880		insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
 6881		/* calculate correct instruction position within current main
 6882		 * prog; each main prog can have a different set of
 6883		 * subprograms appended (potentially in different order as
 6884		 * well), so position of any subprog can be different for
 6885		 * different main programs */
 6886		insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
 6887
 6888		pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
 6889			 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
 6890	}
 6891
 
 
 6892	return 0;
 6893}
 6894
 6895/*
 6896 * Relocate sub-program calls.
 6897 *
 6898 * Algorithm operates as follows. Each entry-point BPF program (referred to as
 6899 * main prog) is processed separately. For each subprog (non-entry functions,
 6900 * that can be called from either entry progs or other subprogs) gets their
 6901 * sub_insn_off reset to zero. This serves as indicator that this subprogram
 6902 * hasn't been yet appended and relocated within current main prog. Once its
 6903 * relocated, sub_insn_off will point at the position within current main prog
 6904 * where given subprog was appended. This will further be used to relocate all
 6905 * the call instructions jumping into this subprog.
 6906 *
 6907 * We start with main program and process all call instructions. If the call
 6908 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
 6909 * is zero), subprog instructions are appended at the end of main program's
 6910 * instruction array. Then main program is "put on hold" while we recursively
 6911 * process newly appended subprogram. If that subprogram calls into another
 6912 * subprogram that hasn't been appended, new subprogram is appended again to
 6913 * the *main* prog's instructions (subprog's instructions are always left
 6914 * untouched, as they need to be in unmodified state for subsequent main progs
 6915 * and subprog instructions are always sent only as part of a main prog) and
 6916 * the process continues recursively. Once all the subprogs called from a main
 6917 * prog or any of its subprogs are appended (and relocated), all their
 6918 * positions within finalized instructions array are known, so it's easy to
 6919 * rewrite call instructions with correct relative offsets, corresponding to
 6920 * desired target subprog.
 6921 *
 6922 * Its important to realize that some subprogs might not be called from some
 6923 * main prog and any of its called/used subprogs. Those will keep their
 6924 * subprog->sub_insn_off as zero at all times and won't be appended to current
 6925 * main prog and won't be relocated within the context of current main prog.
 6926 * They might still be used from other main progs later.
 6927 *
 6928 * Visually this process can be shown as below. Suppose we have two main
 6929 * programs mainA and mainB and BPF object contains three subprogs: subA,
 6930 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
 6931 * subC both call subB:
 6932 *
 6933 *        +--------+ +-------+
 6934 *        |        v v       |
 6935 *     +--+---+ +--+-+-+ +---+--+
 6936 *     | subA | | subB | | subC |
 6937 *     +--+---+ +------+ +---+--+
 6938 *        ^                  ^
 6939 *        |                  |
 6940 *    +---+-------+   +------+----+
 6941 *    |   mainA   |   |   mainB   |
 6942 *    +-----------+   +-----------+
 6943 *
 6944 * We'll start relocating mainA, will find subA, append it and start
 6945 * processing sub A recursively:
 6946 *
 6947 *    +-----------+------+
 6948 *    |   mainA   | subA |
 6949 *    +-----------+------+
 6950 *
 6951 * At this point we notice that subB is used from subA, so we append it and
 6952 * relocate (there are no further subcalls from subB):
 6953 *
 6954 *    +-----------+------+------+
 6955 *    |   mainA   | subA | subB |
 6956 *    +-----------+------+------+
 6957 *
 6958 * At this point, we relocate subA calls, then go one level up and finish with
 6959 * relocatin mainA calls. mainA is done.
 6960 *
 6961 * For mainB process is similar but results in different order. We start with
 6962 * mainB and skip subA and subB, as mainB never calls them (at least
 6963 * directly), but we see subC is needed, so we append and start processing it:
 6964 *
 6965 *    +-----------+------+
 6966 *    |   mainB   | subC |
 6967 *    +-----------+------+
 6968 * Now we see subC needs subB, so we go back to it, append and relocate it:
 6969 *
 6970 *    +-----------+------+------+
 6971 *    |   mainB   | subC | subB |
 6972 *    +-----------+------+------+
 6973 *
 6974 * At this point we unwind recursion, relocate calls in subC, then in mainB.
 6975 */
 6976static int
 6977bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
 6978{
 6979	struct bpf_program *subprog;
 6980	int i, err;
 6981
 6982	/* mark all subprogs as not relocated (yet) within the context of
 6983	 * current main program
 6984	 */
 6985	for (i = 0; i < obj->nr_programs; i++) {
 6986		subprog = &obj->programs[i];
 6987		if (!prog_is_subprog(obj, subprog))
 6988			continue;
 6989
 6990		subprog->sub_insn_off = 0;
 6991	}
 6992
 6993	err = bpf_object__reloc_code(obj, prog, prog);
 6994	if (err)
 6995		return err;
 6996
 6997
 6998	return 0;
 6999}
 7000
 7001static void
 7002bpf_object__free_relocs(struct bpf_object *obj)
 7003{
 7004	struct bpf_program *prog;
 7005	int i;
 7006
 7007	/* free up relocation descriptors */
 7008	for (i = 0; i < obj->nr_programs; i++) {
 7009		prog = &obj->programs[i];
 7010		zfree(&prog->reloc_desc);
 7011		prog->nr_reloc = 0;
 7012	}
 7013}
 7014
 7015static int
 7016bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
 7017{
 7018	struct bpf_program *prog;
 7019	size_t i, j;
 7020	int err;
 7021
 7022	if (obj->btf_ext) {
 7023		err = bpf_object__relocate_core(obj, targ_btf_path);
 7024		if (err) {
 7025			pr_warn("failed to perform CO-RE relocations: %d\n",
 7026				err);
 7027			return err;
 7028		}
 7029	}
 7030
 7031	/* Before relocating calls pre-process relocations and mark
 7032	 * few ld_imm64 instructions that points to subprogs.
 7033	 * Otherwise bpf_object__reloc_code() later would have to consider
 7034	 * all ld_imm64 insns as relocation candidates. That would
 7035	 * reduce relocation speed, since amount of find_prog_insn_relo()
 7036	 * would increase and most of them will fail to find a relo.
 7037	 */
 7038	for (i = 0; i < obj->nr_programs; i++) {
 7039		prog = &obj->programs[i];
 7040		for (j = 0; j < prog->nr_reloc; j++) {
 7041			struct reloc_desc *relo = &prog->reloc_desc[j];
 7042			struct bpf_insn *insn = &prog->insns[relo->insn_idx];
 7043
 7044			/* mark the insn, so it's recognized by insn_is_pseudo_func() */
 7045			if (relo->type == RELO_SUBPROG_ADDR)
 7046				insn[0].src_reg = BPF_PSEUDO_FUNC;
 7047		}
 7048	}
 7049
 7050	/* relocate subprogram calls and append used subprograms to main
 7051	 * programs; each copy of subprogram code needs to be relocated
 7052	 * differently for each main program, because its code location might
 7053	 * have changed.
 7054	 * Append subprog relos to main programs to allow data relos to be
 7055	 * processed after text is completely relocated.
 7056	 */
 7057	for (i = 0; i < obj->nr_programs; i++) {
 7058		prog = &obj->programs[i];
 7059		/* sub-program's sub-calls are relocated within the context of
 7060		 * its main program only
 7061		 */
 7062		if (prog_is_subprog(obj, prog))
 7063			continue;
 7064
 7065		err = bpf_object__relocate_calls(obj, prog);
 7066		if (err) {
 7067			pr_warn("prog '%s': failed to relocate calls: %d\n",
 7068				prog->name, err);
 7069			return err;
 7070		}
 7071	}
 7072	/* Process data relos for main programs */
 7073	for (i = 0; i < obj->nr_programs; i++) {
 7074		prog = &obj->programs[i];
 7075		if (prog_is_subprog(obj, prog))
 7076			continue;
 7077		err = bpf_object__relocate_data(obj, prog);
 7078		if (err) {
 7079			pr_warn("prog '%s': failed to relocate data references: %d\n",
 7080				prog->name, err);
 7081			return err;
 7082		}
 7083	}
 7084	if (!obj->gen_loader)
 7085		bpf_object__free_relocs(obj);
 7086	return 0;
 7087}
 7088
 7089static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
 7090					    GElf_Shdr *shdr, Elf_Data *data);
 7091
 7092static int bpf_object__collect_map_relos(struct bpf_object *obj,
 7093					 GElf_Shdr *shdr, Elf_Data *data)
 7094{
 7095	const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
 7096	int i, j, nrels, new_sz;
 7097	const struct btf_var_secinfo *vi = NULL;
 7098	const struct btf_type *sec, *var, *def;
 7099	struct bpf_map *map = NULL, *targ_map;
 7100	const struct btf_member *member;
 7101	const char *name, *mname;
 7102	Elf_Data *symbols;
 7103	unsigned int moff;
 7104	GElf_Sym sym;
 7105	GElf_Rel rel;
 7106	void *tmp;
 7107
 7108	if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
 7109		return -EINVAL;
 7110	sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
 7111	if (!sec)
 7112		return -EINVAL;
 7113
 7114	symbols = obj->efile.symbols;
 7115	nrels = shdr->sh_size / shdr->sh_entsize;
 7116	for (i = 0; i < nrels; i++) {
 7117		if (!gelf_getrel(data, i, &rel)) {
 7118			pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
 7119			return -LIBBPF_ERRNO__FORMAT;
 7120		}
 7121		if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
 7122			pr_warn(".maps relo #%d: symbol %zx not found\n",
 7123				i, (size_t)GELF_R_SYM(rel.r_info));
 7124			return -LIBBPF_ERRNO__FORMAT;
 7125		}
 7126		name = elf_sym_str(obj, sym.st_name) ?: "<?>";
 7127		if (sym.st_shndx != obj->efile.btf_maps_shndx) {
 7128			pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
 7129				i, name);
 7130			return -LIBBPF_ERRNO__RELOC;
 7131		}
 7132
 7133		pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
 7134			 i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
 7135			 (size_t)rel.r_offset, sym.st_name, name);
 7136
 7137		for (j = 0; j < obj->nr_maps; j++) {
 7138			map = &obj->maps[j];
 7139			if (map->sec_idx != obj->efile.btf_maps_shndx)
 7140				continue;
 7141
 7142			vi = btf_var_secinfos(sec) + map->btf_var_idx;
 7143			if (vi->offset <= rel.r_offset &&
 7144			    rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
 7145				break;
 7146		}
 7147		if (j == obj->nr_maps) {
 7148			pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
 7149				i, name, (size_t)rel.r_offset);
 7150			return -EINVAL;
 7151		}
 7152
 7153		if (!bpf_map_type__is_map_in_map(map->def.type))
 7154			return -EINVAL;
 7155		if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
 7156		    map->def.key_size != sizeof(int)) {
 7157			pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
 7158				i, map->name, sizeof(int));
 7159			return -EINVAL;
 7160		}
 7161
 7162		targ_map = bpf_object__find_map_by_name(obj, name);
 7163		if (!targ_map)
 7164			return -ESRCH;
 7165
 7166		var = btf__type_by_id(obj->btf, vi->type);
 7167		def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
 7168		if (btf_vlen(def) == 0)
 7169			return -EINVAL;
 7170		member = btf_members(def) + btf_vlen(def) - 1;
 7171		mname = btf__name_by_offset(obj->btf, member->name_off);
 7172		if (strcmp(mname, "values"))
 7173			return -EINVAL;
 7174
 7175		moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
 7176		if (rel.r_offset - vi->offset < moff)
 7177			return -EINVAL;
 7178
 7179		moff = rel.r_offset - vi->offset - moff;
 7180		/* here we use BPF pointer size, which is always 64 bit, as we
 7181		 * are parsing ELF that was built for BPF target
 7182		 */
 7183		if (moff % bpf_ptr_sz)
 7184			return -EINVAL;
 7185		moff /= bpf_ptr_sz;
 7186		if (moff >= map->init_slots_sz) {
 7187			new_sz = moff + 1;
 7188			tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
 7189			if (!tmp)
 7190				return -ENOMEM;
 7191			map->init_slots = tmp;
 7192			memset(map->init_slots + map->init_slots_sz, 0,
 7193			       (new_sz - map->init_slots_sz) * host_ptr_sz);
 7194			map->init_slots_sz = new_sz;
 7195		}
 7196		map->init_slots[moff] = targ_map;
 7197
 7198		pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
 7199			 i, map->name, moff, name);
 7200	}
 7201
 7202	return 0;
 7203}
 7204
 7205static int cmp_relocs(const void *_a, const void *_b)
 7206{
 7207	const struct reloc_desc *a = _a;
 7208	const struct reloc_desc *b = _b;
 7209
 7210	if (a->insn_idx != b->insn_idx)
 7211		return a->insn_idx < b->insn_idx ? -1 : 1;
 7212
 7213	/* no two relocations should have the same insn_idx, but ... */
 7214	if (a->type != b->type)
 7215		return a->type < b->type ? -1 : 1;
 7216
 7217	return 0;
 7218}
 7219
 7220static int bpf_object__collect_relos(struct bpf_object *obj)
 7221{
 7222	int i, err;
 7223
 7224	for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
 7225		GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
 7226		Elf_Data *data = obj->efile.reloc_sects[i].data;
 7227		int idx = shdr->sh_info;
 
 7228
 7229		if (shdr->sh_type != SHT_REL) {
 7230			pr_warn("internal error at %d\n", __LINE__);
 7231			return -LIBBPF_ERRNO__INTERNAL;
 7232		}
 7233
 7234		if (idx == obj->efile.st_ops_shndx)
 7235			err = bpf_object__collect_st_ops_relos(obj, shdr, data);
 7236		else if (idx == obj->efile.btf_maps_shndx)
 7237			err = bpf_object__collect_map_relos(obj, shdr, data);
 7238		else
 7239			err = bpf_object__collect_prog_relos(obj, shdr, data);
 
 
 
 7240		if (err)
 7241			return err;
 7242	}
 7243
 7244	for (i = 0; i < obj->nr_programs; i++) {
 7245		struct bpf_program *p = &obj->programs[i];
 7246		
 7247		if (!p->nr_reloc)
 7248			continue;
 7249
 7250		qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
 7251	}
 7252	return 0;
 7253}
 7254
 7255static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
 7256{
 7257	if (BPF_CLASS(insn->code) == BPF_JMP &&
 7258	    BPF_OP(insn->code) == BPF_CALL &&
 7259	    BPF_SRC(insn->code) == BPF_K &&
 7260	    insn->src_reg == 0 &&
 7261	    insn->dst_reg == 0) {
 7262		    *func_id = insn->imm;
 7263		    return true;
 7264	}
 7265	return false;
 7266}
 7267
 7268static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
 7269{
 7270	struct bpf_insn *insn = prog->insns;
 7271	enum bpf_func_id func_id;
 7272	int i;
 7273
 7274	if (obj->gen_loader)
 7275		return 0;
 7276
 7277	for (i = 0; i < prog->insns_cnt; i++, insn++) {
 7278		if (!insn_is_helper_call(insn, &func_id))
 7279			continue;
 7280
 7281		/* on kernels that don't yet support
 7282		 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
 7283		 * to bpf_probe_read() which works well for old kernels
 7284		 */
 7285		switch (func_id) {
 7286		case BPF_FUNC_probe_read_kernel:
 7287		case BPF_FUNC_probe_read_user:
 7288			if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
 7289				insn->imm = BPF_FUNC_probe_read;
 7290			break;
 7291		case BPF_FUNC_probe_read_kernel_str:
 7292		case BPF_FUNC_probe_read_user_str:
 7293			if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
 7294				insn->imm = BPF_FUNC_probe_read_str;
 7295			break;
 7296		default:
 7297			break;
 7298		}
 7299	}
 7300	return 0;
 7301}
 7302
 7303static int
 7304load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
 7305	     char *license, __u32 kern_version, int *pfd)
 
 7306{
 7307	struct bpf_prog_load_params load_attr = {};
 7308	char *cp, errmsg[STRERR_BUFSIZE];
 7309	size_t log_buf_size = 0;
 7310	char *log_buf = NULL;
 7311	int btf_fd, ret;
 7312
 7313	if (prog->type == BPF_PROG_TYPE_UNSPEC) {
 7314		/*
 7315		 * The program type must be set.  Most likely we couldn't find a proper
 7316		 * section definition at load time, and thus we didn't infer the type.
 7317		 */
 7318		pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
 7319			prog->name, prog->sec_name);
 7320		return -EINVAL;
 7321	}
 7322
 7323	if (!insns || !insns_cnt)
 7324		return -EINVAL;
 7325
 7326	load_attr.prog_type = prog->type;
 7327	/* old kernels might not support specifying expected_attach_type */
 7328	if (!kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
 7329	    prog->sec_def->is_exp_attach_type_optional)
 7330		load_attr.expected_attach_type = 0;
 7331	else
 7332		load_attr.expected_attach_type = prog->expected_attach_type;
 7333	if (kernel_supports(prog->obj, FEAT_PROG_NAME))
 7334		load_attr.name = prog->name;
 7335	load_attr.insns = insns;
 7336	load_attr.insn_cnt = insns_cnt;
 7337	load_attr.license = license;
 7338	load_attr.attach_btf_id = prog->attach_btf_id;
 7339	if (prog->attach_prog_fd)
 7340		load_attr.attach_prog_fd = prog->attach_prog_fd;
 7341	else
 7342		load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
 7343	load_attr.attach_btf_id = prog->attach_btf_id;
 7344	load_attr.kern_version = kern_version;
 7345	load_attr.prog_ifindex = prog->prog_ifindex;
 7346
 7347	/* specify func_info/line_info only if kernel supports them */
 7348	btf_fd = bpf_object__btf_fd(prog->obj);
 7349	if (btf_fd >= 0 && kernel_supports(prog->obj, FEAT_BTF_FUNC)) {
 7350		load_attr.prog_btf_fd = btf_fd;
 7351		load_attr.func_info = prog->func_info;
 7352		load_attr.func_info_rec_size = prog->func_info_rec_size;
 7353		load_attr.func_info_cnt = prog->func_info_cnt;
 7354		load_attr.line_info = prog->line_info;
 7355		load_attr.line_info_rec_size = prog->line_info_rec_size;
 7356		load_attr.line_info_cnt = prog->line_info_cnt;
 7357	}
 7358	load_attr.log_level = prog->log_level;
 7359	load_attr.prog_flags = prog->prog_flags;
 7360
 7361	if (prog->obj->gen_loader) {
 7362		bpf_gen__prog_load(prog->obj->gen_loader, &load_attr,
 7363				   prog - prog->obj->programs);
 7364		*pfd = -1;
 7365		return 0;
 7366	}
 7367retry_load:
 7368	if (log_buf_size) {
 7369		log_buf = malloc(log_buf_size);
 7370		if (!log_buf)
 7371			return -ENOMEM;
 7372
 7373		*log_buf = 0;
 7374	}
 
 7375
 7376	load_attr.log_buf = log_buf;
 7377	load_attr.log_buf_sz = log_buf_size;
 7378	ret = libbpf__bpf_prog_load(&load_attr);
 7379
 7380	if (ret >= 0) {
 7381		if (log_buf && load_attr.log_level)
 7382			pr_debug("verifier log:\n%s", log_buf);
 7383
 7384		if (prog->obj->rodata_map_idx >= 0 &&
 7385		    kernel_supports(prog->obj, FEAT_PROG_BIND_MAP)) {
 7386			struct bpf_map *rodata_map =
 7387				&prog->obj->maps[prog->obj->rodata_map_idx];
 7388
 7389			if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
 7390				cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
 7391				pr_warn("prog '%s': failed to bind .rodata map: %s\n",
 7392					prog->name, cp);
 7393				/* Don't fail hard if can't bind rodata. */
 7394			}
 7395		}
 7396
 7397		*pfd = ret;
 7398		ret = 0;
 7399		goto out;
 7400	}
 7401
 7402	if (!log_buf || errno == ENOSPC) {
 7403		log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
 7404				   log_buf_size << 1);
 7405
 7406		free(log_buf);
 7407		goto retry_load;
 7408	}
 7409	ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
 7410	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
 7411	pr_warn("load bpf program failed: %s\n", cp);
 7412	pr_perm_msg(ret);
 7413
 7414	if (log_buf && log_buf[0] != '\0') {
 7415		ret = -LIBBPF_ERRNO__VERIFY;
 7416		pr_warn("-- BEGIN DUMP LOG ---\n");
 7417		pr_warn("\n%s\n", log_buf);
 7418		pr_warn("-- END LOG --\n");
 7419	} else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
 7420		pr_warn("Program too large (%zu insns), at most %d insns\n",
 7421			load_attr.insn_cnt, BPF_MAXINSNS);
 7422		ret = -LIBBPF_ERRNO__PROG2BIG;
 7423	} else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
 7424		/* Wrong program type? */
 7425		int fd;
 
 7426
 7427		load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
 7428		load_attr.expected_attach_type = 0;
 7429		load_attr.log_buf = NULL;
 7430		load_attr.log_buf_sz = 0;
 7431		fd = libbpf__bpf_prog_load(&load_attr);
 7432		if (fd >= 0) {
 7433			close(fd);
 7434			ret = -LIBBPF_ERRNO__PROGTYPE;
 7435			goto out;
 7436		}
 
 
 
 7437	}
 7438
 7439out:
 7440	free(log_buf);
 7441	return ret;
 7442}
 7443
 7444static int bpf_program__record_externs(struct bpf_program *prog)
 7445{
 7446	struct bpf_object *obj = prog->obj;
 7447	int i;
 7448
 7449	for (i = 0; i < prog->nr_reloc; i++) {
 7450		struct reloc_desc *relo = &prog->reloc_desc[i];
 7451		struct extern_desc *ext = &obj->externs[relo->sym_off];
 7452
 7453		switch (relo->type) {
 7454		case RELO_EXTERN_VAR:
 7455			if (ext->type != EXT_KSYM)
 7456				continue;
 7457			if (!ext->ksym.type_id) {
 7458				pr_warn("typeless ksym %s is not supported yet\n",
 7459					ext->name);
 7460				return -ENOTSUP;
 7461			}
 7462			bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR,
 7463					       relo->insn_idx);
 7464			break;
 7465		case RELO_EXTERN_FUNC:
 7466			bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC,
 7467					       relo->insn_idx);
 7468			break;
 7469		default:
 7470			continue;
 7471		}
 7472	}
 7473	return 0;
 7474}
 7475
 7476static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
 7477
 7478int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
 7479{
 7480	int err = 0, fd, i;
 7481
 7482	if (prog->obj->loaded) {
 7483		pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
 7484		return libbpf_err(-EINVAL);
 7485	}
 7486
 7487	if ((prog->type == BPF_PROG_TYPE_TRACING ||
 7488	     prog->type == BPF_PROG_TYPE_LSM ||
 7489	     prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
 7490		int btf_obj_fd = 0, btf_type_id = 0;
 7491
 7492		err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
 7493		if (err)
 7494			return libbpf_err(err);
 7495
 7496		prog->attach_btf_obj_fd = btf_obj_fd;
 7497		prog->attach_btf_id = btf_type_id;
 7498	}
 7499
 7500	if (prog->instances.nr < 0 || !prog->instances.fds) {
 7501		if (prog->preprocessor) {
 7502			pr_warn("Internal error: can't load program '%s'\n",
 7503				prog->name);
 7504			return libbpf_err(-LIBBPF_ERRNO__INTERNAL);
 7505		}
 7506
 7507		prog->instances.fds = malloc(sizeof(int));
 7508		if (!prog->instances.fds) {
 7509			pr_warn("Not enough memory for BPF fds\n");
 7510			return libbpf_err(-ENOMEM);
 7511		}
 7512		prog->instances.nr = 1;
 7513		prog->instances.fds[0] = -1;
 7514	}
 7515
 7516	if (!prog->preprocessor) {
 7517		if (prog->instances.nr != 1) {
 7518			pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
 7519				prog->name, prog->instances.nr);
 7520		}
 7521		if (prog->obj->gen_loader)
 7522			bpf_program__record_externs(prog);
 7523		err = load_program(prog, prog->insns, prog->insns_cnt,
 7524				   license, kern_ver, &fd);
 7525		if (!err)
 7526			prog->instances.fds[0] = fd;
 7527		goto out;
 7528	}
 7529
 7530	for (i = 0; i < prog->instances.nr; i++) {
 7531		struct bpf_prog_prep_result result;
 7532		bpf_program_prep_t preprocessor = prog->preprocessor;
 7533
 7534		memset(&result, 0, sizeof(result));
 7535		err = preprocessor(prog, i, prog->insns,
 7536				   prog->insns_cnt, &result);
 7537		if (err) {
 7538			pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
 7539				i, prog->name);
 7540			goto out;
 7541		}
 7542
 7543		if (!result.new_insn_ptr || !result.new_insn_cnt) {
 7544			pr_debug("Skip loading the %dth instance of program '%s'\n",
 7545				 i, prog->name);
 7546			prog->instances.fds[i] = -1;
 7547			if (result.pfd)
 7548				*result.pfd = -1;
 7549			continue;
 7550		}
 7551
 7552		err = load_program(prog, result.new_insn_ptr,
 7553				   result.new_insn_cnt, license, kern_ver, &fd);
 
 
 
 7554		if (err) {
 7555			pr_warn("Loading the %dth instance of program '%s' failed\n",
 7556				i, prog->name);
 7557			goto out;
 7558		}
 7559
 7560		if (result.pfd)
 7561			*result.pfd = fd;
 7562		prog->instances.fds[i] = fd;
 7563	}
 7564out:
 7565	if (err)
 7566		pr_warn("failed to load program '%s'\n", prog->name);
 
 7567	zfree(&prog->insns);
 7568	prog->insns_cnt = 0;
 7569	return libbpf_err(err);
 7570}
 7571
 7572static int
 7573bpf_object__load_progs(struct bpf_object *obj, int log_level)
 7574{
 7575	struct bpf_program *prog;
 7576	size_t i;
 7577	int err;
 7578
 7579	for (i = 0; i < obj->nr_programs; i++) {
 7580		prog = &obj->programs[i];
 7581		err = bpf_object__sanitize_prog(obj, prog);
 
 
 
 7582		if (err)
 7583			return err;
 7584	}
 
 
 7585
 7586	for (i = 0; i < obj->nr_programs; i++) {
 7587		prog = &obj->programs[i];
 7588		if (prog_is_subprog(obj, prog))
 7589			continue;
 7590		if (!prog->load) {
 7591			pr_debug("prog '%s': skipped loading\n", prog->name);
 7592			continue;
 7593		}
 7594		prog->log_level |= log_level;
 7595		err = bpf_program__load(prog, obj->license, obj->kern_version);
 7596		if (err)
 7597			return err;
 7598	}
 7599	if (obj->gen_loader)
 7600		bpf_object__free_relocs(obj);
 7601	return 0;
 7602}
 7603
 7604static const struct bpf_sec_def *find_sec_def(const char *sec_name);
 7605
 7606static struct bpf_object *
 7607__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
 7608		   const struct bpf_object_open_opts *opts)
 7609{
 7610	const char *obj_name, *kconfig;
 7611	struct bpf_program *prog;
 7612	struct bpf_object *obj;
 7613	char tmp_name[64];
 7614	int err;
 7615
 7616	if (elf_version(EV_CURRENT) == EV_NONE) {
 7617		pr_warn("failed to init libelf for %s\n",
 7618			path ? : "(mem buf)");
 7619		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
 7620	}
 7621
 7622	if (!OPTS_VALID(opts, bpf_object_open_opts))
 7623		return ERR_PTR(-EINVAL);
 7624
 7625	obj_name = OPTS_GET(opts, object_name, NULL);
 7626	if (obj_buf) {
 7627		if (!obj_name) {
 7628			snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
 7629				 (unsigned long)obj_buf,
 7630				 (unsigned long)obj_buf_sz);
 7631			obj_name = tmp_name;
 7632		}
 7633		path = obj_name;
 7634		pr_debug("loading object '%s' from buffer\n", obj_name);
 7635	}
 7636
 7637	obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
 7638	if (IS_ERR(obj))
 7639		return obj;
 7640
 7641	kconfig = OPTS_GET(opts, kconfig, NULL);
 7642	if (kconfig) {
 7643		obj->kconfig = strdup(kconfig);
 7644		if (!obj->kconfig) {
 7645			err = -ENOMEM;
 7646			goto out;
 7647		}
 7648	}
 7649
 7650	err = bpf_object__elf_init(obj);
 7651	err = err ? : bpf_object__check_endianness(obj);
 7652	err = err ? : bpf_object__elf_collect(obj);
 7653	err = err ? : bpf_object__collect_externs(obj);
 7654	err = err ? : bpf_object__finalize_btf(obj);
 7655	err = err ? : bpf_object__init_maps(obj, opts);
 7656	err = err ? : bpf_object__collect_relos(obj);
 7657	if (err)
 7658		goto out;
 7659	bpf_object__elf_finish(obj);
 7660
 7661	bpf_object__for_each_program(prog, obj) {
 7662		prog->sec_def = find_sec_def(prog->sec_name);
 7663		if (!prog->sec_def) {
 7664			/* couldn't guess, but user might manually specify */
 7665			pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
 7666				prog->name, prog->sec_name);
 7667			continue;
 7668		}
 7669
 7670		if (prog->sec_def->is_sleepable)
 7671			prog->prog_flags |= BPF_F_SLEEPABLE;
 7672		bpf_program__set_type(prog, prog->sec_def->prog_type);
 7673		bpf_program__set_expected_attach_type(prog,
 7674				prog->sec_def->expected_attach_type);
 7675
 7676		if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
 7677		    prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
 7678			prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
 7679	}
 7680
 7681	return obj;
 7682out:
 7683	bpf_object__close(obj);
 7684	return ERR_PTR(err);
 7685}
 7686
 7687static struct bpf_object *
 7688__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
 7689{
 7690	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
 7691		.relaxed_maps = flags & MAPS_RELAX_COMPAT,
 7692	);
 7693
 7694	/* param validation */
 7695	if (!attr->file)
 7696		return NULL;
 7697
 7698	pr_debug("loading %s\n", attr->file);
 7699	return __bpf_object__open(attr->file, NULL, 0, &opts);
 7700}
 7701
 7702struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
 7703{
 7704	return libbpf_ptr(__bpf_object__open_xattr(attr, 0));
 7705}
 7706
 7707struct bpf_object *bpf_object__open(const char *path)
 7708{
 7709	struct bpf_object_open_attr attr = {
 7710		.file		= path,
 7711		.prog_type	= BPF_PROG_TYPE_UNSPEC,
 7712	};
 7713
 7714	return libbpf_ptr(__bpf_object__open_xattr(&attr, 0));
 7715}
 7716
 7717struct bpf_object *
 7718bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
 7719{
 7720	if (!path)
 7721		return libbpf_err_ptr(-EINVAL);
 7722
 7723	pr_debug("loading %s\n", path);
 7724
 7725	return libbpf_ptr(__bpf_object__open(path, NULL, 0, opts));
 7726}
 7727
 7728struct bpf_object *
 7729bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
 7730		     const struct bpf_object_open_opts *opts)
 7731{
 7732	if (!obj_buf || obj_buf_sz == 0)
 7733		return libbpf_err_ptr(-EINVAL);
 7734
 7735	return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, opts));
 7736}
 
 7737
 7738struct bpf_object *
 7739bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
 7740			const char *name)
 7741{
 7742	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
 7743		.object_name = name,
 7744		/* wrong default, but backwards-compatible */
 7745		.relaxed_maps = true,
 7746	);
 7747
 7748	/* returning NULL is wrong, but backwards-compatible */
 7749	if (!obj_buf || obj_buf_sz == 0)
 7750		return errno = EINVAL, NULL;
 7751
 7752	return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts));
 7753}
 7754
 7755int bpf_object__unload(struct bpf_object *obj)
 7756{
 7757	size_t i;
 7758
 7759	if (!obj)
 7760		return libbpf_err(-EINVAL);
 7761
 7762	for (i = 0; i < obj->nr_maps; i++) {
 7763		zclose(obj->maps[i].fd);
 7764		if (obj->maps[i].st_ops)
 7765			zfree(&obj->maps[i].st_ops->kern_vdata);
 7766	}
 7767
 7768	for (i = 0; i < obj->nr_programs; i++)
 7769		bpf_program__unload(&obj->programs[i]);
 7770
 7771	return 0;
 7772}
 7773
 7774static int bpf_object__sanitize_maps(struct bpf_object *obj)
 7775{
 7776	struct bpf_map *m;
 7777
 7778	bpf_object__for_each_map(m, obj) {
 7779		if (!bpf_map__is_internal(m))
 7780			continue;
 7781		if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) {
 7782			pr_warn("kernel doesn't support global data\n");
 7783			return -ENOTSUP;
 7784		}
 7785		if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
 7786			m->def.map_flags ^= BPF_F_MMAPABLE;
 7787	}
 7788
 7789	return 0;
 7790}
 7791
 7792static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
 7793{
 7794	char sym_type, sym_name[500];
 7795	unsigned long long sym_addr;
 7796	const struct btf_type *t;
 7797	struct extern_desc *ext;
 7798	int ret, err = 0;
 7799	FILE *f;
 7800
 7801	f = fopen("/proc/kallsyms", "r");
 7802	if (!f) {
 7803		err = -errno;
 7804		pr_warn("failed to open /proc/kallsyms: %d\n", err);
 7805		return err;
 7806	}
 7807
 7808	while (true) {
 7809		ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
 7810			     &sym_addr, &sym_type, sym_name);
 7811		if (ret == EOF && feof(f))
 7812			break;
 7813		if (ret != 3) {
 7814			pr_warn("failed to read kallsyms entry: %d\n", ret);
 7815			err = -EINVAL;
 7816			goto out;
 7817		}
 7818
 7819		ext = find_extern_by_name(obj, sym_name);
 7820		if (!ext || ext->type != EXT_KSYM)
 7821			continue;
 7822
 7823		t = btf__type_by_id(obj->btf, ext->btf_id);
 7824		if (!btf_is_var(t))
 7825			continue;
 7826
 7827		if (ext->is_set && ext->ksym.addr != sym_addr) {
 7828			pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
 7829				sym_name, ext->ksym.addr, sym_addr);
 7830			err = -EINVAL;
 7831			goto out;
 7832		}
 7833		if (!ext->is_set) {
 7834			ext->is_set = true;
 7835			ext->ksym.addr = sym_addr;
 7836			pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
 7837		}
 7838	}
 7839
 7840out:
 7841	fclose(f);
 7842	return err;
 7843}
 7844
 7845static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
 7846			    __u16 kind, struct btf **res_btf,
 7847			    int *res_btf_fd)
 7848{
 7849	int i, id, btf_fd, err;
 7850	struct btf *btf;
 7851
 7852	btf = obj->btf_vmlinux;
 7853	btf_fd = 0;
 7854	id = btf__find_by_name_kind(btf, ksym_name, kind);
 7855
 7856	if (id == -ENOENT) {
 7857		err = load_module_btfs(obj);
 7858		if (err)
 7859			return err;
 7860
 7861		for (i = 0; i < obj->btf_module_cnt; i++) {
 7862			btf = obj->btf_modules[i].btf;
 7863			/* we assume module BTF FD is always >0 */
 7864			btf_fd = obj->btf_modules[i].fd;
 7865			id = btf__find_by_name_kind(btf, ksym_name, kind);
 7866			if (id != -ENOENT)
 7867				break;
 7868		}
 7869	}
 7870	if (id <= 0) {
 7871		pr_warn("extern (%s ksym) '%s': failed to find BTF ID in kernel BTF(s).\n",
 7872			__btf_kind_str(kind), ksym_name);
 7873		return -ESRCH;
 7874	}
 7875
 7876	*res_btf = btf;
 7877	*res_btf_fd = btf_fd;
 7878	return id;
 7879}
 7880
 7881static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
 7882					       struct extern_desc *ext)
 7883{
 7884	const struct btf_type *targ_var, *targ_type;
 7885	__u32 targ_type_id, local_type_id;
 7886	const char *targ_var_name;
 7887	int id, btf_fd = 0, err;
 7888	struct btf *btf = NULL;
 7889
 7890	id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &btf_fd);
 7891	if (id < 0)
 7892		return id;
 7893
 7894	/* find local type_id */
 7895	local_type_id = ext->ksym.type_id;
 7896
 7897	/* find target type_id */
 7898	targ_var = btf__type_by_id(btf, id);
 7899	targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
 7900	targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
 7901
 7902	err = bpf_core_types_are_compat(obj->btf, local_type_id,
 7903					btf, targ_type_id);
 7904	if (err <= 0) {
 7905		const struct btf_type *local_type;
 7906		const char *targ_name, *local_name;
 7907
 7908		local_type = btf__type_by_id(obj->btf, local_type_id);
 7909		local_name = btf__name_by_offset(obj->btf, local_type->name_off);
 7910		targ_name = btf__name_by_offset(btf, targ_type->name_off);
 7911
 7912		pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
 7913			ext->name, local_type_id,
 7914			btf_kind_str(local_type), local_name, targ_type_id,
 7915			btf_kind_str(targ_type), targ_name);
 7916		return -EINVAL;
 7917	}
 7918
 7919	ext->is_set = true;
 7920	ext->ksym.kernel_btf_obj_fd = btf_fd;
 7921	ext->ksym.kernel_btf_id = id;
 7922	pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
 7923		 ext->name, id, btf_kind_str(targ_var), targ_var_name);
 7924
 7925	return 0;
 7926}
 7927
 7928static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
 7929						struct extern_desc *ext)
 7930{
 7931	int local_func_proto_id, kfunc_proto_id, kfunc_id;
 7932	const struct btf_type *kern_func;
 7933	struct btf *kern_btf = NULL;
 7934	int ret, kern_btf_fd = 0;
 7935
 7936	local_func_proto_id = ext->ksym.type_id;
 7937
 7938	kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC,
 7939				    &kern_btf, &kern_btf_fd);
 7940	if (kfunc_id < 0) {
 7941		pr_warn("extern (func ksym) '%s': not found in kernel BTF\n",
 7942			ext->name);
 7943		return kfunc_id;
 7944	}
 7945
 7946	if (kern_btf != obj->btf_vmlinux) {
 7947		pr_warn("extern (func ksym) '%s': function in kernel module is not supported\n",
 7948			ext->name);
 7949		return -ENOTSUP;
 7950	}
 7951
 7952	kern_func = btf__type_by_id(kern_btf, kfunc_id);
 7953	kfunc_proto_id = kern_func->type;
 7954
 7955	ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
 7956					kern_btf, kfunc_proto_id);
 7957	if (ret <= 0) {
 7958		pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n",
 7959			ext->name, local_func_proto_id, kfunc_proto_id);
 7960		return -EINVAL;
 7961	}
 7962
 7963	ext->is_set = true;
 7964	ext->ksym.kernel_btf_obj_fd = kern_btf_fd;
 7965	ext->ksym.kernel_btf_id = kfunc_id;
 7966	pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
 7967		 ext->name, kfunc_id);
 7968
 7969	return 0;
 7970}
 7971
 7972static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
 7973{
 7974	const struct btf_type *t;
 7975	struct extern_desc *ext;
 7976	int i, err;
 7977
 7978	for (i = 0; i < obj->nr_extern; i++) {
 7979		ext = &obj->externs[i];
 7980		if (ext->type != EXT_KSYM || !ext->ksym.type_id)
 7981			continue;
 7982
 7983		if (obj->gen_loader) {
 7984			ext->is_set = true;
 7985			ext->ksym.kernel_btf_obj_fd = 0;
 7986			ext->ksym.kernel_btf_id = 0;
 7987			continue;
 7988		}
 7989		t = btf__type_by_id(obj->btf, ext->btf_id);
 7990		if (btf_is_var(t))
 7991			err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
 7992		else
 7993			err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
 7994		if (err)
 7995			return err;
 7996	}
 7997	return 0;
 7998}
 7999
 8000static int bpf_object__resolve_externs(struct bpf_object *obj,
 8001				       const char *extra_kconfig)
 8002{
 8003	bool need_config = false, need_kallsyms = false;
 8004	bool need_vmlinux_btf = false;
 8005	struct extern_desc *ext;
 8006	void *kcfg_data = NULL;
 8007	int err, i;
 8008
 8009	if (obj->nr_extern == 0)
 8010		return 0;
 8011
 8012	if (obj->kconfig_map_idx >= 0)
 8013		kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
 8014
 8015	for (i = 0; i < obj->nr_extern; i++) {
 8016		ext = &obj->externs[i];
 8017
 8018		if (ext->type == EXT_KCFG &&
 8019		    strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
 8020			void *ext_val = kcfg_data + ext->kcfg.data_off;
 8021			__u32 kver = get_kernel_version();
 8022
 8023			if (!kver) {
 8024				pr_warn("failed to get kernel version\n");
 8025				return -EINVAL;
 8026			}
 8027			err = set_kcfg_value_num(ext, ext_val, kver);
 8028			if (err)
 8029				return err;
 8030			pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
 8031		} else if (ext->type == EXT_KCFG &&
 8032			   strncmp(ext->name, "CONFIG_", 7) == 0) {
 8033			need_config = true;
 8034		} else if (ext->type == EXT_KSYM) {
 8035			if (ext->ksym.type_id)
 8036				need_vmlinux_btf = true;
 8037			else
 8038				need_kallsyms = true;
 8039		} else {
 8040			pr_warn("unrecognized extern '%s'\n", ext->name);
 8041			return -EINVAL;
 8042		}
 8043	}
 8044	if (need_config && extra_kconfig) {
 8045		err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
 8046		if (err)
 8047			return -EINVAL;
 8048		need_config = false;
 8049		for (i = 0; i < obj->nr_extern; i++) {
 8050			ext = &obj->externs[i];
 8051			if (ext->type == EXT_KCFG && !ext->is_set) {
 8052				need_config = true;
 8053				break;
 8054			}
 8055		}
 8056	}
 8057	if (need_config) {
 8058		err = bpf_object__read_kconfig_file(obj, kcfg_data);
 8059		if (err)
 8060			return -EINVAL;
 8061	}
 8062	if (need_kallsyms) {
 8063		err = bpf_object__read_kallsyms_file(obj);
 8064		if (err)
 8065			return -EINVAL;
 8066	}
 8067	if (need_vmlinux_btf) {
 8068		err = bpf_object__resolve_ksyms_btf_id(obj);
 8069		if (err)
 8070			return -EINVAL;
 8071	}
 8072	for (i = 0; i < obj->nr_extern; i++) {
 8073		ext = &obj->externs[i];
 8074
 8075		if (!ext->is_set && !ext->is_weak) {
 8076			pr_warn("extern %s (strong) not resolved\n", ext->name);
 8077			return -ESRCH;
 8078		} else if (!ext->is_set) {
 8079			pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
 8080				 ext->name);
 8081		}
 8082	}
 8083
 8084	return 0;
 8085}
 8086
 8087int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
 8088{
 8089	struct bpf_object *obj;
 8090	int err, i;
 8091
 8092	if (!attr)
 8093		return libbpf_err(-EINVAL);
 8094	obj = attr->obj;
 8095	if (!obj)
 8096		return libbpf_err(-EINVAL);
 8097
 8098	if (obj->loaded) {
 8099		pr_warn("object '%s': load can't be attempted twice\n", obj->name);
 8100		return libbpf_err(-EINVAL);
 8101	}
 8102
 8103	if (obj->gen_loader)
 8104		bpf_gen__init(obj->gen_loader, attr->log_level);
 8105
 8106	err = bpf_object__probe_loading(obj);
 8107	err = err ? : bpf_object__load_vmlinux_btf(obj, false);
 8108	err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
 8109	err = err ? : bpf_object__sanitize_and_load_btf(obj);
 8110	err = err ? : bpf_object__sanitize_maps(obj);
 8111	err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
 8112	err = err ? : bpf_object__create_maps(obj);
 8113	err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
 8114	err = err ? : bpf_object__load_progs(obj, attr->log_level);
 8115
 8116	if (obj->gen_loader) {
 8117		/* reset FDs */
 8118		if (obj->btf)
 8119			btf__set_fd(obj->btf, -1);
 8120		for (i = 0; i < obj->nr_maps; i++)
 8121			obj->maps[i].fd = -1;
 8122		if (!err)
 8123			err = bpf_gen__finish(obj->gen_loader);
 8124	}
 8125
 8126	/* clean up module BTFs */
 8127	for (i = 0; i < obj->btf_module_cnt; i++) {
 8128		close(obj->btf_modules[i].fd);
 8129		btf__free(obj->btf_modules[i].btf);
 8130		free(obj->btf_modules[i].name);
 8131	}
 8132	free(obj->btf_modules);
 8133
 8134	/* clean up vmlinux BTF */
 8135	btf__free(obj->btf_vmlinux);
 8136	obj->btf_vmlinux = NULL;
 8137
 8138	obj->loaded = true; /* doesn't matter if successfully or not */
 8139
 8140	if (err)
 8141		goto out;
 
 8142
 8143	return 0;
 8144out:
 8145	/* unpin any maps that were auto-pinned during load */
 8146	for (i = 0; i < obj->nr_maps; i++)
 8147		if (obj->maps[i].pinned && !obj->maps[i].reused)
 8148			bpf_map__unpin(&obj->maps[i], NULL);
 8149
 8150	bpf_object__unload(obj);
 8151	pr_warn("failed to load object '%s'\n", obj->path);
 8152	return libbpf_err(err);
 8153}
 8154
 8155int bpf_object__load(struct bpf_object *obj)
 8156{
 8157	struct bpf_object_load_attr attr = {
 8158		.obj = obj,
 8159	};
 8160
 8161	return bpf_object__load_xattr(&attr);
 8162}
 8163
 8164static int make_parent_dir(const char *path)
 8165{
 8166	char *cp, errmsg[STRERR_BUFSIZE];
 8167	char *dname, *dir;
 8168	int err = 0;
 8169
 8170	dname = strdup(path);
 8171	if (dname == NULL)
 8172		return -ENOMEM;
 8173
 8174	dir = dirname(dname);
 8175	if (mkdir(dir, 0700) && errno != EEXIST)
 8176		err = -errno;
 8177
 8178	free(dname);
 8179	if (err) {
 8180		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
 8181		pr_warn("failed to mkdir %s: %s\n", path, cp);
 8182	}
 8183	return err;
 8184}
 8185
 8186static int check_path(const char *path)
 8187{
 8188	char *cp, errmsg[STRERR_BUFSIZE];
 8189	struct statfs st_fs;
 8190	char *dname, *dir;
 8191	int err = 0;
 8192
 8193	if (path == NULL)
 8194		return -EINVAL;
 8195
 8196	dname = strdup(path);
 8197	if (dname == NULL)
 8198		return -ENOMEM;
 8199
 8200	dir = dirname(dname);
 8201	if (statfs(dir, &st_fs)) {
 8202		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
 8203		pr_warn("failed to statfs %s: %s\n", dir, cp);
 8204		err = -errno;
 8205	}
 8206	free(dname);
 8207
 8208	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
 8209		pr_warn("specified path %s is not on BPF FS\n", path);
 8210		err = -EINVAL;
 8211	}
 8212
 8213	return err;
 8214}
 8215
 8216int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
 8217			      int instance)
 8218{
 8219	char *cp, errmsg[STRERR_BUFSIZE];
 8220	int err;
 8221
 8222	err = make_parent_dir(path);
 8223	if (err)
 8224		return libbpf_err(err);
 8225
 8226	err = check_path(path);
 8227	if (err)
 8228		return libbpf_err(err);
 8229
 8230	if (prog == NULL) {
 8231		pr_warn("invalid program pointer\n");
 8232		return libbpf_err(-EINVAL);
 8233	}
 8234
 8235	if (instance < 0 || instance >= prog->instances.nr) {
 8236		pr_warn("invalid prog instance %d of prog %s (max %d)\n",
 8237			instance, prog->name, prog->instances.nr);
 8238		return libbpf_err(-EINVAL);
 8239	}
 8240
 8241	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
 8242		err = -errno;
 8243		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
 8244		pr_warn("failed to pin program: %s\n", cp);
 8245		return libbpf_err(err);
 8246	}
 8247	pr_debug("pinned program '%s'\n", path);
 8248
 8249	return 0;
 8250}
 8251
 8252int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
 8253				int instance)
 8254{
 8255	int err;
 
 
 
 8256
 8257	err = check_path(path);
 8258	if (err)
 8259		return libbpf_err(err);
 8260
 8261	if (prog == NULL) {
 8262		pr_warn("invalid program pointer\n");
 8263		return libbpf_err(-EINVAL);
 8264	}
 8265
 8266	if (instance < 0 || instance >= prog->instances.nr) {
 8267		pr_warn("invalid prog instance %d of prog %s (max %d)\n",
 8268			instance, prog->name, prog->instances.nr);
 8269		return libbpf_err(-EINVAL);
 8270	}
 8271
 8272	err = unlink(path);
 8273	if (err != 0)
 8274		return libbpf_err(-errno);
 8275
 8276	pr_debug("unpinned program '%s'\n", path);
 8277
 8278	return 0;
 8279}
 8280
 8281int bpf_program__pin(struct bpf_program *prog, const char *path)
 8282{
 8283	int i, err;
 8284
 8285	err = make_parent_dir(path);
 8286	if (err)
 8287		return libbpf_err(err);
 8288
 8289	err = check_path(path);
 8290	if (err)
 8291		return libbpf_err(err);
 8292
 8293	if (prog == NULL) {
 8294		pr_warn("invalid program pointer\n");
 8295		return libbpf_err(-EINVAL);
 8296	}
 8297
 8298	if (prog->instances.nr <= 0) {
 8299		pr_warn("no instances of prog %s to pin\n", prog->name);
 8300		return libbpf_err(-EINVAL);
 8301	}
 8302
 8303	if (prog->instances.nr == 1) {
 8304		/* don't create subdirs when pinning single instance */
 8305		return bpf_program__pin_instance(prog, path, 0);
 8306	}
 8307
 8308	for (i = 0; i < prog->instances.nr; i++) {
 8309		char buf[PATH_MAX];
 8310		int len;
 8311
 8312		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
 8313		if (len < 0) {
 8314			err = -EINVAL;
 8315			goto err_unpin;
 8316		} else if (len >= PATH_MAX) {
 8317			err = -ENAMETOOLONG;
 8318			goto err_unpin;
 8319		}
 8320
 8321		err = bpf_program__pin_instance(prog, buf, i);
 8322		if (err)
 8323			goto err_unpin;
 8324	}
 8325
 8326	return 0;
 8327
 8328err_unpin:
 8329	for (i = i - 1; i >= 0; i--) {
 8330		char buf[PATH_MAX];
 8331		int len;
 8332
 8333		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
 8334		if (len < 0)
 8335			continue;
 8336		else if (len >= PATH_MAX)
 8337			continue;
 8338
 8339		bpf_program__unpin_instance(prog, buf, i);
 8340	}
 8341
 8342	rmdir(path);
 8343
 8344	return libbpf_err(err);
 8345}
 8346
 8347int bpf_program__unpin(struct bpf_program *prog, const char *path)
 8348{
 8349	int i, err;
 8350
 8351	err = check_path(path);
 8352	if (err)
 8353		return libbpf_err(err);
 8354
 8355	if (prog == NULL) {
 8356		pr_warn("invalid program pointer\n");
 8357		return libbpf_err(-EINVAL);
 8358	}
 8359
 8360	if (prog->instances.nr <= 0) {
 8361		pr_warn("no instances of prog %s to pin\n", prog->name);
 8362		return libbpf_err(-EINVAL);
 8363	}
 8364
 8365	if (prog->instances.nr == 1) {
 8366		/* don't create subdirs when pinning single instance */
 8367		return bpf_program__unpin_instance(prog, path, 0);
 8368	}
 8369
 8370	for (i = 0; i < prog->instances.nr; i++) {
 8371		char buf[PATH_MAX];
 8372		int len;
 8373
 8374		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
 8375		if (len < 0)
 8376			return libbpf_err(-EINVAL);
 8377		else if (len >= PATH_MAX)
 8378			return libbpf_err(-ENAMETOOLONG);
 8379
 8380		err = bpf_program__unpin_instance(prog, buf, i);
 8381		if (err)
 8382			return err;
 8383	}
 8384
 8385	err = rmdir(path);
 8386	if (err)
 8387		return libbpf_err(-errno);
 8388
 8389	return 0;
 8390}
 8391
 8392int bpf_map__pin(struct bpf_map *map, const char *path)
 8393{
 8394	char *cp, errmsg[STRERR_BUFSIZE];
 8395	int err;
 8396
 8397	if (map == NULL) {
 8398		pr_warn("invalid map pointer\n");
 8399		return libbpf_err(-EINVAL);
 8400	}
 8401
 8402	if (map->pin_path) {
 8403		if (path && strcmp(path, map->pin_path)) {
 8404			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
 8405				bpf_map__name(map), map->pin_path, path);
 8406			return libbpf_err(-EINVAL);
 8407		} else if (map->pinned) {
 8408			pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
 8409				 bpf_map__name(map), map->pin_path);
 8410			return 0;
 8411		}
 8412	} else {
 8413		if (!path) {
 8414			pr_warn("missing a path to pin map '%s' at\n",
 8415				bpf_map__name(map));
 8416			return libbpf_err(-EINVAL);
 8417		} else if (map->pinned) {
 8418			pr_warn("map '%s' already pinned\n", bpf_map__name(map));
 8419			return libbpf_err(-EEXIST);
 8420		}
 8421
 8422		map->pin_path = strdup(path);
 8423		if (!map->pin_path) {
 8424			err = -errno;
 8425			goto out_err;
 8426		}
 8427	}
 8428
 8429	err = make_parent_dir(map->pin_path);
 8430	if (err)
 8431		return libbpf_err(err);
 8432
 8433	err = check_path(map->pin_path);
 8434	if (err)
 8435		return libbpf_err(err);
 8436
 8437	if (bpf_obj_pin(map->fd, map->pin_path)) {
 8438		err = -errno;
 8439		goto out_err;
 8440	}
 8441
 8442	map->pinned = true;
 8443	pr_debug("pinned map '%s'\n", map->pin_path);
 8444
 8445	return 0;
 8446
 8447out_err:
 8448	cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
 8449	pr_warn("failed to pin map: %s\n", cp);
 8450	return libbpf_err(err);
 8451}
 8452
 8453int bpf_map__unpin(struct bpf_map *map, const char *path)
 8454{
 8455	int err;
 8456
 8457	if (map == NULL) {
 8458		pr_warn("invalid map pointer\n");
 8459		return libbpf_err(-EINVAL);
 8460	}
 8461
 8462	if (map->pin_path) {
 8463		if (path && strcmp(path, map->pin_path)) {
 8464			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
 8465				bpf_map__name(map), map->pin_path, path);
 8466			return libbpf_err(-EINVAL);
 8467		}
 8468		path = map->pin_path;
 8469	} else if (!path) {
 8470		pr_warn("no path to unpin map '%s' from\n",
 8471			bpf_map__name(map));
 8472		return libbpf_err(-EINVAL);
 8473	}
 8474
 8475	err = check_path(path);
 8476	if (err)
 8477		return libbpf_err(err);
 8478
 8479	err = unlink(path);
 8480	if (err != 0)
 8481		return libbpf_err(-errno);
 8482
 8483	map->pinned = false;
 8484	pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
 8485
 8486	return 0;
 8487}
 8488
 8489int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
 8490{
 8491	char *new = NULL;
 8492
 8493	if (path) {
 8494		new = strdup(path);
 8495		if (!new)
 8496			return libbpf_err(-errno);
 8497	}
 8498
 8499	free(map->pin_path);
 8500	map->pin_path = new;
 8501	return 0;
 8502}
 8503
 8504const char *bpf_map__get_pin_path(const struct bpf_map *map)
 8505{
 8506	return map->pin_path;
 8507}
 8508
 8509bool bpf_map__is_pinned(const struct bpf_map *map)
 8510{
 8511	return map->pinned;
 8512}
 8513
 8514static void sanitize_pin_path(char *s)
 8515{
 8516	/* bpffs disallows periods in path names */
 8517	while (*s) {
 8518		if (*s == '.')
 8519			*s = '_';
 8520		s++;
 8521	}
 8522}
 8523
 8524int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
 8525{
 
 8526	struct bpf_map *map;
 8527	int err;
 8528
 8529	if (!obj)
 8530		return libbpf_err(-ENOENT);
 8531
 8532	if (!obj->loaded) {
 8533		pr_warn("object not yet loaded; load it first\n");
 8534		return libbpf_err(-ENOENT);
 8535	}
 8536
 8537	bpf_object__for_each_map(map, obj) {
 8538		char *pin_path = NULL;
 8539		char buf[PATH_MAX];
 8540
 8541		if (path) {
 8542			int len;
 8543
 8544			len = snprintf(buf, PATH_MAX, "%s/%s", path,
 8545				       bpf_map__name(map));
 8546			if (len < 0) {
 8547				err = -EINVAL;
 8548				goto err_unpin_maps;
 8549			} else if (len >= PATH_MAX) {
 8550				err = -ENAMETOOLONG;
 8551				goto err_unpin_maps;
 8552			}
 8553			sanitize_pin_path(buf);
 8554			pin_path = buf;
 8555		} else if (!map->pin_path) {
 8556			continue;
 8557		}
 8558
 8559		err = bpf_map__pin(map, pin_path);
 8560		if (err)
 8561			goto err_unpin_maps;
 8562	}
 8563
 8564	return 0;
 8565
 8566err_unpin_maps:
 8567	while ((map = bpf_map__prev(map, obj))) {
 8568		if (!map->pin_path)
 8569			continue;
 8570
 8571		bpf_map__unpin(map, NULL);
 8572	}
 8573
 8574	return libbpf_err(err);
 8575}
 8576
 8577int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
 8578{
 8579	struct bpf_map *map;
 8580	int err;
 8581
 8582	if (!obj)
 8583		return libbpf_err(-ENOENT);
 8584
 8585	bpf_object__for_each_map(map, obj) {
 8586		char *pin_path = NULL;
 8587		char buf[PATH_MAX];
 8588
 8589		if (path) {
 8590			int len;
 8591
 8592			len = snprintf(buf, PATH_MAX, "%s/%s", path,
 8593				       bpf_map__name(map));
 8594			if (len < 0)
 8595				return libbpf_err(-EINVAL);
 8596			else if (len >= PATH_MAX)
 8597				return libbpf_err(-ENAMETOOLONG);
 8598			sanitize_pin_path(buf);
 8599			pin_path = buf;
 8600		} else if (!map->pin_path) {
 8601			continue;
 8602		}
 8603
 8604		err = bpf_map__unpin(map, pin_path);
 8605		if (err)
 8606			return libbpf_err(err);
 8607	}
 8608
 8609	return 0;
 8610}
 8611
 8612int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
 8613{
 8614	struct bpf_program *prog;
 8615	int err;
 8616
 8617	if (!obj)
 8618		return libbpf_err(-ENOENT);
 8619
 8620	if (!obj->loaded) {
 8621		pr_warn("object not yet loaded; load it first\n");
 8622		return libbpf_err(-ENOENT);
 8623	}
 8624
 8625	bpf_object__for_each_program(prog, obj) {
 8626		char buf[PATH_MAX];
 8627		int len;
 8628
 8629		len = snprintf(buf, PATH_MAX, "%s/%s", path,
 8630			       prog->pin_name);
 8631		if (len < 0) {
 8632			err = -EINVAL;
 8633			goto err_unpin_programs;
 8634		} else if (len >= PATH_MAX) {
 8635			err = -ENAMETOOLONG;
 8636			goto err_unpin_programs;
 8637		}
 8638
 8639		err = bpf_program__pin(prog, buf);
 8640		if (err)
 8641			goto err_unpin_programs;
 8642	}
 8643
 8644	return 0;
 8645
 8646err_unpin_programs:
 8647	while ((prog = bpf_program__prev(prog, obj))) {
 8648		char buf[PATH_MAX];
 8649		int len;
 8650
 8651		len = snprintf(buf, PATH_MAX, "%s/%s", path,
 8652			       prog->pin_name);
 8653		if (len < 0)
 8654			continue;
 8655		else if (len >= PATH_MAX)
 8656			continue;
 8657
 8658		bpf_program__unpin(prog, buf);
 
 
 8659	}
 8660
 8661	return libbpf_err(err);
 8662}
 8663
 8664int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
 8665{
 8666	struct bpf_program *prog;
 8667	int err;
 8668
 8669	if (!obj)
 8670		return libbpf_err(-ENOENT);
 8671
 8672	bpf_object__for_each_program(prog, obj) {
 8673		char buf[PATH_MAX];
 8674		int len;
 8675
 8676		len = snprintf(buf, PATH_MAX, "%s/%s", path,
 8677			       prog->pin_name);
 8678		if (len < 0)
 8679			return libbpf_err(-EINVAL);
 8680		else if (len >= PATH_MAX)
 8681			return libbpf_err(-ENAMETOOLONG);
 8682
 8683		err = bpf_program__unpin(prog, buf);
 8684		if (err)
 8685			return libbpf_err(err);
 8686	}
 8687
 8688	return 0;
 8689}
 8690
 8691int bpf_object__pin(struct bpf_object *obj, const char *path)
 8692{
 8693	int err;
 8694
 8695	err = bpf_object__pin_maps(obj, path);
 8696	if (err)
 8697		return libbpf_err(err);
 8698
 8699	err = bpf_object__pin_programs(obj, path);
 8700	if (err) {
 8701		bpf_object__unpin_maps(obj, path);
 8702		return libbpf_err(err);
 8703	}
 8704
 8705	return 0;
 8706}
 8707
 8708static void bpf_map__destroy(struct bpf_map *map)
 8709{
 8710	if (map->clear_priv)
 8711		map->clear_priv(map, map->priv);
 8712	map->priv = NULL;
 8713	map->clear_priv = NULL;
 8714
 8715	if (map->inner_map) {
 8716		bpf_map__destroy(map->inner_map);
 8717		zfree(&map->inner_map);
 8718	}
 8719
 8720	zfree(&map->init_slots);
 8721	map->init_slots_sz = 0;
 8722
 8723	if (map->mmaped) {
 8724		munmap(map->mmaped, bpf_map_mmap_sz(map));
 8725		map->mmaped = NULL;
 8726	}
 8727
 8728	if (map->st_ops) {
 8729		zfree(&map->st_ops->data);
 8730		zfree(&map->st_ops->progs);
 8731		zfree(&map->st_ops->kern_func_off);
 8732		zfree(&map->st_ops);
 8733	}
 8734
 8735	zfree(&map->name);
 8736	zfree(&map->pin_path);
 8737
 8738	if (map->fd >= 0)
 8739		zclose(map->fd);
 8740}
 8741
 8742void bpf_object__close(struct bpf_object *obj)
 8743{
 8744	size_t i;
 8745
 8746	if (IS_ERR_OR_NULL(obj))
 8747		return;
 8748
 8749	if (obj->clear_priv)
 8750		obj->clear_priv(obj, obj->priv);
 8751
 8752	bpf_gen__free(obj->gen_loader);
 8753	bpf_object__elf_finish(obj);
 8754	bpf_object__unload(obj);
 8755	btf__free(obj->btf);
 8756	btf_ext__free(obj->btf_ext);
 8757
 8758	for (i = 0; i < obj->nr_maps; i++)
 8759		bpf_map__destroy(&obj->maps[i]);
 8760
 8761	zfree(&obj->kconfig);
 8762	zfree(&obj->externs);
 8763	obj->nr_extern = 0;
 8764
 
 
 
 
 
 
 
 
 8765	zfree(&obj->maps);
 8766	obj->nr_maps = 0;
 8767
 8768	if (obj->programs && obj->nr_programs) {
 8769		for (i = 0; i < obj->nr_programs; i++)
 8770			bpf_program__exit(&obj->programs[i]);
 8771	}
 8772	zfree(&obj->programs);
 8773
 8774	list_del(&obj->list);
 8775	free(obj);
 8776}
 8777
 8778struct bpf_object *
 8779bpf_object__next(struct bpf_object *prev)
 8780{
 8781	struct bpf_object *next;
 8782
 8783	if (!prev)
 8784		next = list_first_entry(&bpf_objects_list,
 8785					struct bpf_object,
 8786					list);
 8787	else
 8788		next = list_next_entry(prev, list);
 8789
 8790	/* Empty list is noticed here so don't need checking on entry. */
 8791	if (&next->list == &bpf_objects_list)
 8792		return NULL;
 8793
 8794	return next;
 8795}
 8796
 8797const char *bpf_object__name(const struct bpf_object *obj)
 8798{
 8799	return obj ? obj->name : libbpf_err_ptr(-EINVAL);
 8800}
 8801
 8802unsigned int bpf_object__kversion(const struct bpf_object *obj)
 8803{
 8804	return obj ? obj->kern_version : 0;
 8805}
 8806
 8807struct btf *bpf_object__btf(const struct bpf_object *obj)
 8808{
 8809	return obj ? obj->btf : NULL;
 8810}
 8811
 8812int bpf_object__btf_fd(const struct bpf_object *obj)
 8813{
 8814	return obj->btf ? btf__fd(obj->btf) : -1;
 8815}
 8816
 8817int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
 8818{
 8819	if (obj->loaded)
 8820		return libbpf_err(-EINVAL);
 8821
 8822	obj->kern_version = kern_version;
 8823
 8824	return 0;
 8825}
 8826
 8827int bpf_object__set_priv(struct bpf_object *obj, void *priv,
 8828			 bpf_object_clear_priv_t clear_priv)
 8829{
 8830	if (obj->priv && obj->clear_priv)
 8831		obj->clear_priv(obj, obj->priv);
 8832
 8833	obj->priv = priv;
 8834	obj->clear_priv = clear_priv;
 8835	return 0;
 8836}
 8837
 8838void *bpf_object__priv(const struct bpf_object *obj)
 8839{
 8840	return obj ? obj->priv : libbpf_err_ptr(-EINVAL);
 8841}
 8842
 8843int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
 
 8844{
 8845	struct bpf_gen *gen;
 8846
 8847	if (!opts)
 8848		return -EFAULT;
 8849	if (!OPTS_VALID(opts, gen_loader_opts))
 8850		return -EINVAL;
 8851	gen = calloc(sizeof(*gen), 1);
 8852	if (!gen)
 8853		return -ENOMEM;
 8854	gen->opts = opts;
 8855	obj->gen_loader = gen;
 8856	return 0;
 8857}
 8858
 8859static struct bpf_program *
 8860__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
 8861		    bool forward)
 8862{
 8863	size_t nr_programs = obj->nr_programs;
 8864	ssize_t idx;
 8865
 8866	if (!nr_programs)
 
 8867		return NULL;
 8868
 8869	if (!p)
 8870		/* Iter from the beginning */
 8871		return forward ? &obj->programs[0] :
 8872			&obj->programs[nr_programs - 1];
 8873
 8874	if (p->obj != obj) {
 8875		pr_warn("error: program handler doesn't match object\n");
 8876		return errno = EINVAL, NULL;
 8877	}
 8878
 8879	idx = (p - obj->programs) + (forward ? 1 : -1);
 8880	if (idx >= obj->nr_programs || idx < 0)
 8881		return NULL;
 8882	return &obj->programs[idx];
 8883}
 8884
 8885struct bpf_program *
 8886bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
 8887{
 8888	struct bpf_program *prog = prev;
 8889
 8890	do {
 8891		prog = __bpf_program__iter(prog, obj, true);
 8892	} while (prog && prog_is_subprog(obj, prog));
 8893
 8894	return prog;
 8895}
 8896
 8897struct bpf_program *
 8898bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
 8899{
 8900	struct bpf_program *prog = next;
 8901
 8902	do {
 8903		prog = __bpf_program__iter(prog, obj, false);
 8904	} while (prog && prog_is_subprog(obj, prog));
 8905
 8906	return prog;
 8907}
 8908
 8909int bpf_program__set_priv(struct bpf_program *prog, void *priv,
 8910			  bpf_program_clear_priv_t clear_priv)
 8911{
 8912	if (prog->priv && prog->clear_priv)
 8913		prog->clear_priv(prog, prog->priv);
 8914
 8915	prog->priv = priv;
 8916	prog->clear_priv = clear_priv;
 8917	return 0;
 8918}
 8919
 8920void *bpf_program__priv(const struct bpf_program *prog)
 8921{
 8922	return prog ? prog->priv : libbpf_err_ptr(-EINVAL);
 8923}
 8924
 8925void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
 8926{
 8927	prog->prog_ifindex = ifindex;
 8928}
 8929
 8930const char *bpf_program__name(const struct bpf_program *prog)
 8931{
 8932	return prog->name;
 8933}
 8934
 8935const char *bpf_program__section_name(const struct bpf_program *prog)
 8936{
 8937	return prog->sec_name;
 8938}
 8939
 8940const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
 8941{
 8942	const char *title;
 8943
 8944	title = prog->sec_name;
 8945	if (needs_copy) {
 8946		title = strdup(title);
 8947		if (!title) {
 8948			pr_warn("failed to strdup program title\n");
 8949			return libbpf_err_ptr(-ENOMEM);
 8950		}
 8951	}
 8952
 8953	return title;
 8954}
 8955
 8956bool bpf_program__autoload(const struct bpf_program *prog)
 8957{
 8958	return prog->load;
 8959}
 8960
 8961int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
 8962{
 8963	if (prog->obj->loaded)
 8964		return libbpf_err(-EINVAL);
 8965
 8966	prog->load = autoload;
 8967	return 0;
 8968}
 8969
 8970int bpf_program__fd(const struct bpf_program *prog)
 8971{
 8972	return bpf_program__nth_fd(prog, 0);
 8973}
 8974
 8975size_t bpf_program__size(const struct bpf_program *prog)
 8976{
 8977	return prog->insns_cnt * BPF_INSN_SZ;
 8978}
 8979
 8980int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
 8981			  bpf_program_prep_t prep)
 8982{
 8983	int *instances_fds;
 8984
 8985	if (nr_instances <= 0 || !prep)
 8986		return libbpf_err(-EINVAL);
 8987
 8988	if (prog->instances.nr > 0 || prog->instances.fds) {
 8989		pr_warn("Can't set pre-processor after loading\n");
 8990		return libbpf_err(-EINVAL);
 8991	}
 8992
 8993	instances_fds = malloc(sizeof(int) * nr_instances);
 8994	if (!instances_fds) {
 8995		pr_warn("alloc memory failed for fds\n");
 8996		return libbpf_err(-ENOMEM);
 8997	}
 8998
 8999	/* fill all fd with -1 */
 9000	memset(instances_fds, -1, sizeof(int) * nr_instances);
 9001
 9002	prog->instances.nr = nr_instances;
 9003	prog->instances.fds = instances_fds;
 9004	prog->preprocessor = prep;
 9005	return 0;
 9006}
 9007
 9008int bpf_program__nth_fd(const struct bpf_program *prog, int n)
 9009{
 9010	int fd;
 9011
 9012	if (!prog)
 9013		return libbpf_err(-EINVAL);
 9014
 9015	if (n >= prog->instances.nr || n < 0) {
 9016		pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
 9017			n, prog->name, prog->instances.nr);
 9018		return libbpf_err(-EINVAL);
 9019	}
 9020
 9021	fd = prog->instances.fds[n];
 9022	if (fd < 0) {
 9023		pr_warn("%dth instance of program '%s' is invalid\n",
 9024			n, prog->name);
 9025		return libbpf_err(-ENOENT);
 9026	}
 9027
 9028	return fd;
 9029}
 9030
 9031enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog)
 9032{
 9033	return prog->type;
 9034}
 9035
 9036void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
 9037{
 9038	prog->type = type;
 9039}
 9040
 9041static bool bpf_program__is_type(const struct bpf_program *prog,
 9042				 enum bpf_prog_type type)
 9043{
 9044	return prog ? (prog->type == type) : false;
 9045}
 9046
 9047#define BPF_PROG_TYPE_FNS(NAME, TYPE)				\
 9048int bpf_program__set_##NAME(struct bpf_program *prog)		\
 9049{								\
 9050	if (!prog)						\
 9051		return libbpf_err(-EINVAL);			\
 9052	bpf_program__set_type(prog, TYPE);			\
 9053	return 0;						\
 9054}								\
 9055								\
 9056bool bpf_program__is_##NAME(const struct bpf_program *prog)	\
 9057{								\
 9058	return bpf_program__is_type(prog, TYPE);		\
 9059}								\
 9060
 9061BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
 9062BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
 9063BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
 9064BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
 9065BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
 9066BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
 9067BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
 9068BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
 9069BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
 9070BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
 9071BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
 9072BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
 9073BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
 9074
 9075enum bpf_attach_type
 9076bpf_program__get_expected_attach_type(const struct bpf_program *prog)
 9077{
 9078	return prog->expected_attach_type;
 9079}
 9080
 9081void bpf_program__set_expected_attach_type(struct bpf_program *prog,
 9082					   enum bpf_attach_type type)
 9083{
 9084	prog->expected_attach_type = type;
 9085}
 9086
 9087#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional,	    \
 9088			  attachable, attach_btf)			    \
 9089	{								    \
 9090		.sec = string,						    \
 9091		.len = sizeof(string) - 1,				    \
 9092		.prog_type = ptype,					    \
 9093		.expected_attach_type = eatype,				    \
 9094		.is_exp_attach_type_optional = eatype_optional,		    \
 9095		.is_attachable = attachable,				    \
 9096		.is_attach_btf = attach_btf,				    \
 9097	}
 9098
 9099/* Programs that can NOT be attached. */
 9100#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
 9101
 9102/* Programs that can be attached. */
 9103#define BPF_APROG_SEC(string, ptype, atype) \
 9104	BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
 9105
 9106/* Programs that must specify expected attach type at load time. */
 9107#define BPF_EAPROG_SEC(string, ptype, eatype) \
 9108	BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
 9109
 9110/* Programs that use BTF to identify attach point */
 9111#define BPF_PROG_BTF(string, ptype, eatype) \
 9112	BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
 9113
 9114/* Programs that can be attached but attach type can't be identified by section
 9115 * name. Kept for backward compatibility.
 9116 */
 9117#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
 9118
 9119#define SEC_DEF(sec_pfx, ptype, ...) {					    \
 9120	.sec = sec_pfx,							    \
 9121	.len = sizeof(sec_pfx) - 1,					    \
 9122	.prog_type = BPF_PROG_TYPE_##ptype,				    \
 9123	__VA_ARGS__							    \
 9124}
 9125
 9126static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
 9127				      struct bpf_program *prog);
 9128static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
 9129				  struct bpf_program *prog);
 9130static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
 9131				      struct bpf_program *prog);
 9132static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
 9133				     struct bpf_program *prog);
 9134static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
 9135				   struct bpf_program *prog);
 9136static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
 9137				    struct bpf_program *prog);
 9138
 9139static const struct bpf_sec_def section_defs[] = {
 9140	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
 9141	BPF_EAPROG_SEC("sk_reuseport/migrate",	BPF_PROG_TYPE_SK_REUSEPORT,
 9142						BPF_SK_REUSEPORT_SELECT_OR_MIGRATE),
 9143	BPF_EAPROG_SEC("sk_reuseport",		BPF_PROG_TYPE_SK_REUSEPORT,
 9144						BPF_SK_REUSEPORT_SELECT),
 9145	SEC_DEF("kprobe/", KPROBE,
 9146		.attach_fn = attach_kprobe),
 9147	BPF_PROG_SEC("uprobe/",			BPF_PROG_TYPE_KPROBE),
 9148	SEC_DEF("kretprobe/", KPROBE,
 9149		.attach_fn = attach_kprobe),
 9150	BPF_PROG_SEC("uretprobe/",		BPF_PROG_TYPE_KPROBE),
 9151	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
 9152	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
 9153	SEC_DEF("tracepoint/", TRACEPOINT,
 9154		.attach_fn = attach_tp),
 9155	SEC_DEF("tp/", TRACEPOINT,
 9156		.attach_fn = attach_tp),
 9157	SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
 9158		.attach_fn = attach_raw_tp),
 9159	SEC_DEF("raw_tp/", RAW_TRACEPOINT,
 9160		.attach_fn = attach_raw_tp),
 9161	SEC_DEF("tp_btf/", TRACING,
 9162		.expected_attach_type = BPF_TRACE_RAW_TP,
 9163		.is_attach_btf = true,
 9164		.attach_fn = attach_trace),
 9165	SEC_DEF("fentry/", TRACING,
 9166		.expected_attach_type = BPF_TRACE_FENTRY,
 9167		.is_attach_btf = true,
 9168		.attach_fn = attach_trace),
 9169	SEC_DEF("fmod_ret/", TRACING,
 9170		.expected_attach_type = BPF_MODIFY_RETURN,
 9171		.is_attach_btf = true,
 9172		.attach_fn = attach_trace),
 9173	SEC_DEF("fexit/", TRACING,
 9174		.expected_attach_type = BPF_TRACE_FEXIT,
 9175		.is_attach_btf = true,
 9176		.attach_fn = attach_trace),
 9177	SEC_DEF("fentry.s/", TRACING,
 9178		.expected_attach_type = BPF_TRACE_FENTRY,
 9179		.is_attach_btf = true,
 9180		.is_sleepable = true,
 9181		.attach_fn = attach_trace),
 9182	SEC_DEF("fmod_ret.s/", TRACING,
 9183		.expected_attach_type = BPF_MODIFY_RETURN,
 9184		.is_attach_btf = true,
 9185		.is_sleepable = true,
 9186		.attach_fn = attach_trace),
 9187	SEC_DEF("fexit.s/", TRACING,
 9188		.expected_attach_type = BPF_TRACE_FEXIT,
 9189		.is_attach_btf = true,
 9190		.is_sleepable = true,
 9191		.attach_fn = attach_trace),
 9192	SEC_DEF("freplace/", EXT,
 9193		.is_attach_btf = true,
 9194		.attach_fn = attach_trace),
 9195	SEC_DEF("lsm/", LSM,
 9196		.is_attach_btf = true,
 9197		.expected_attach_type = BPF_LSM_MAC,
 9198		.attach_fn = attach_lsm),
 9199	SEC_DEF("lsm.s/", LSM,
 9200		.is_attach_btf = true,
 9201		.is_sleepable = true,
 9202		.expected_attach_type = BPF_LSM_MAC,
 9203		.attach_fn = attach_lsm),
 9204	SEC_DEF("iter/", TRACING,
 9205		.expected_attach_type = BPF_TRACE_ITER,
 9206		.is_attach_btf = true,
 9207		.attach_fn = attach_iter),
 9208	SEC_DEF("syscall", SYSCALL,
 9209		.is_sleepable = true),
 9210	BPF_EAPROG_SEC("xdp_devmap/",		BPF_PROG_TYPE_XDP,
 9211						BPF_XDP_DEVMAP),
 9212	BPF_EAPROG_SEC("xdp_cpumap/",		BPF_PROG_TYPE_XDP,
 9213						BPF_XDP_CPUMAP),
 9214	BPF_APROG_SEC("xdp",			BPF_PROG_TYPE_XDP,
 9215						BPF_XDP),
 9216	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
 9217	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
 9218	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
 9219	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
 9220	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
 9221	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
 9222						BPF_CGROUP_INET_INGRESS),
 9223	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
 9224						BPF_CGROUP_INET_EGRESS),
 9225	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
 9226	BPF_EAPROG_SEC("cgroup/sock_create",	BPF_PROG_TYPE_CGROUP_SOCK,
 9227						BPF_CGROUP_INET_SOCK_CREATE),
 9228	BPF_EAPROG_SEC("cgroup/sock_release",	BPF_PROG_TYPE_CGROUP_SOCK,
 9229						BPF_CGROUP_INET_SOCK_RELEASE),
 9230	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
 9231						BPF_CGROUP_INET_SOCK_CREATE),
 9232	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
 9233						BPF_CGROUP_INET4_POST_BIND),
 9234	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
 9235						BPF_CGROUP_INET6_POST_BIND),
 9236	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
 9237						BPF_CGROUP_DEVICE),
 9238	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
 9239						BPF_CGROUP_SOCK_OPS),
 9240	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
 9241						BPF_SK_SKB_STREAM_PARSER),
 9242	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
 9243						BPF_SK_SKB_STREAM_VERDICT),
 9244	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
 9245	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
 9246						BPF_SK_MSG_VERDICT),
 9247	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
 9248						BPF_LIRC_MODE2),
 9249	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
 9250						BPF_FLOW_DISSECTOR),
 9251	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9252						BPF_CGROUP_INET4_BIND),
 9253	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9254						BPF_CGROUP_INET6_BIND),
 9255	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9256						BPF_CGROUP_INET4_CONNECT),
 9257	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9258						BPF_CGROUP_INET6_CONNECT),
 9259	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9260						BPF_CGROUP_UDP4_SENDMSG),
 9261	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9262						BPF_CGROUP_UDP6_SENDMSG),
 9263	BPF_EAPROG_SEC("cgroup/recvmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9264						BPF_CGROUP_UDP4_RECVMSG),
 9265	BPF_EAPROG_SEC("cgroup/recvmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9266						BPF_CGROUP_UDP6_RECVMSG),
 9267	BPF_EAPROG_SEC("cgroup/getpeername4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9268						BPF_CGROUP_INET4_GETPEERNAME),
 9269	BPF_EAPROG_SEC("cgroup/getpeername6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9270						BPF_CGROUP_INET6_GETPEERNAME),
 9271	BPF_EAPROG_SEC("cgroup/getsockname4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9272						BPF_CGROUP_INET4_GETSOCKNAME),
 9273	BPF_EAPROG_SEC("cgroup/getsockname6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
 9274						BPF_CGROUP_INET6_GETSOCKNAME),
 9275	BPF_EAPROG_SEC("cgroup/sysctl",		BPF_PROG_TYPE_CGROUP_SYSCTL,
 9276						BPF_CGROUP_SYSCTL),
 9277	BPF_EAPROG_SEC("cgroup/getsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
 9278						BPF_CGROUP_GETSOCKOPT),
 9279	BPF_EAPROG_SEC("cgroup/setsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
 9280						BPF_CGROUP_SETSOCKOPT),
 9281	BPF_PROG_SEC("struct_ops",		BPF_PROG_TYPE_STRUCT_OPS),
 9282	BPF_EAPROG_SEC("sk_lookup/",		BPF_PROG_TYPE_SK_LOOKUP,
 9283						BPF_SK_LOOKUP),
 9284};
 9285
 9286#undef BPF_PROG_SEC_IMPL
 9287#undef BPF_PROG_SEC
 9288#undef BPF_APROG_SEC
 9289#undef BPF_EAPROG_SEC
 9290#undef BPF_APROG_COMPAT
 9291#undef SEC_DEF
 9292
 9293#define MAX_TYPE_NAME_SIZE 32
 9294
 9295static const struct bpf_sec_def *find_sec_def(const char *sec_name)
 9296{
 9297	int i, n = ARRAY_SIZE(section_defs);
 9298
 9299	for (i = 0; i < n; i++) {
 9300		if (strncmp(sec_name,
 9301			    section_defs[i].sec, section_defs[i].len))
 9302			continue;
 9303		return &section_defs[i];
 9304	}
 9305	return NULL;
 9306}
 9307
 9308static char *libbpf_get_type_names(bool attach_type)
 9309{
 9310	int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
 9311	char *buf;
 9312
 9313	buf = malloc(len);
 9314	if (!buf)
 9315		return NULL;
 9316
 9317	buf[0] = '\0';
 9318	/* Forge string buf with all available names */
 9319	for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
 9320		if (attach_type && !section_defs[i].is_attachable)
 9321			continue;
 9322
 9323		if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
 9324			free(buf);
 9325			return NULL;
 9326		}
 9327		strcat(buf, " ");
 9328		strcat(buf, section_defs[i].sec);
 9329	}
 9330
 9331	return buf;
 9332}
 9333
 9334int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
 9335			     enum bpf_attach_type *expected_attach_type)
 9336{
 9337	const struct bpf_sec_def *sec_def;
 9338	char *type_names;
 9339
 9340	if (!name)
 9341		return libbpf_err(-EINVAL);
 9342
 9343	sec_def = find_sec_def(name);
 9344	if (sec_def) {
 9345		*prog_type = sec_def->prog_type;
 9346		*expected_attach_type = sec_def->expected_attach_type;
 9347		return 0;
 9348	}
 9349
 9350	pr_debug("failed to guess program type from ELF section '%s'\n", name);
 9351	type_names = libbpf_get_type_names(false);
 9352	if (type_names != NULL) {
 9353		pr_debug("supported section(type) names are:%s\n", type_names);
 9354		free(type_names);
 9355	}
 9356
 9357	return libbpf_err(-ESRCH);
 9358}
 9359
 9360static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
 9361						     size_t offset)
 9362{
 9363	struct bpf_map *map;
 9364	size_t i;
 9365
 9366	for (i = 0; i < obj->nr_maps; i++) {
 9367		map = &obj->maps[i];
 9368		if (!bpf_map__is_struct_ops(map))
 9369			continue;
 9370		if (map->sec_offset <= offset &&
 9371		    offset - map->sec_offset < map->def.value_size)
 9372			return map;
 9373	}
 9374
 9375	return NULL;
 9376}
 9377
 9378/* Collect the reloc from ELF and populate the st_ops->progs[] */
 9379static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
 9380					    GElf_Shdr *shdr, Elf_Data *data)
 9381{
 9382	const struct btf_member *member;
 9383	struct bpf_struct_ops *st_ops;
 9384	struct bpf_program *prog;
 9385	unsigned int shdr_idx;
 9386	const struct btf *btf;
 9387	struct bpf_map *map;
 9388	Elf_Data *symbols;
 9389	unsigned int moff, insn_idx;
 9390	const char *name;
 9391	__u32 member_idx;
 9392	GElf_Sym sym;
 9393	GElf_Rel rel;
 9394	int i, nrels;
 9395
 9396	symbols = obj->efile.symbols;
 9397	btf = obj->btf;
 9398	nrels = shdr->sh_size / shdr->sh_entsize;
 9399	for (i = 0; i < nrels; i++) {
 9400		if (!gelf_getrel(data, i, &rel)) {
 9401			pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
 9402			return -LIBBPF_ERRNO__FORMAT;
 9403		}
 9404
 9405		if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
 9406			pr_warn("struct_ops reloc: symbol %zx not found\n",
 9407				(size_t)GELF_R_SYM(rel.r_info));
 9408			return -LIBBPF_ERRNO__FORMAT;
 9409		}
 9410
 9411		name = elf_sym_str(obj, sym.st_name) ?: "<?>";
 9412		map = find_struct_ops_map_by_offset(obj, rel.r_offset);
 9413		if (!map) {
 9414			pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
 9415				(size_t)rel.r_offset);
 9416			return -EINVAL;
 9417		}
 9418
 9419		moff = rel.r_offset - map->sec_offset;
 9420		shdr_idx = sym.st_shndx;
 9421		st_ops = map->st_ops;
 9422		pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
 9423			 map->name,
 9424			 (long long)(rel.r_info >> 32),
 9425			 (long long)sym.st_value,
 9426			 shdr_idx, (size_t)rel.r_offset,
 9427			 map->sec_offset, sym.st_name, name);
 9428
 9429		if (shdr_idx >= SHN_LORESERVE) {
 9430			pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
 9431				map->name, (size_t)rel.r_offset, shdr_idx);
 9432			return -LIBBPF_ERRNO__RELOC;
 9433		}
 9434		if (sym.st_value % BPF_INSN_SZ) {
 9435			pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
 9436				map->name, (unsigned long long)sym.st_value);
 9437			return -LIBBPF_ERRNO__FORMAT;
 9438		}
 9439		insn_idx = sym.st_value / BPF_INSN_SZ;
 9440
 9441		member = find_member_by_offset(st_ops->type, moff * 8);
 9442		if (!member) {
 9443			pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
 9444				map->name, moff);
 9445			return -EINVAL;
 9446		}
 9447		member_idx = member - btf_members(st_ops->type);
 9448		name = btf__name_by_offset(btf, member->name_off);
 9449
 9450		if (!resolve_func_ptr(btf, member->type, NULL)) {
 9451			pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
 9452				map->name, name);
 9453			return -EINVAL;
 9454		}
 9455
 9456		prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
 9457		if (!prog) {
 9458			pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
 9459				map->name, shdr_idx, name);
 9460			return -EINVAL;
 9461		}
 9462
 9463		if (prog->type == BPF_PROG_TYPE_UNSPEC) {
 9464			const struct bpf_sec_def *sec_def;
 9465
 9466			sec_def = find_sec_def(prog->sec_name);
 9467			if (sec_def &&
 9468			    sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
 9469				/* for pr_warn */
 9470				prog->type = sec_def->prog_type;
 9471				goto invalid_prog;
 9472			}
 9473
 9474			prog->type = BPF_PROG_TYPE_STRUCT_OPS;
 9475			prog->attach_btf_id = st_ops->type_id;
 9476			prog->expected_attach_type = member_idx;
 9477		} else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
 9478			   prog->attach_btf_id != st_ops->type_id ||
 9479			   prog->expected_attach_type != member_idx) {
 9480			goto invalid_prog;
 9481		}
 9482		st_ops->progs[member_idx] = prog;
 9483	}
 9484
 9485	return 0;
 9486
 9487invalid_prog:
 9488	pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
 9489		map->name, prog->name, prog->sec_name, prog->type,
 9490		prog->attach_btf_id, prog->expected_attach_type, name);
 9491	return -EINVAL;
 9492}
 9493
 9494#define BTF_TRACE_PREFIX "btf_trace_"
 9495#define BTF_LSM_PREFIX "bpf_lsm_"
 9496#define BTF_ITER_PREFIX "bpf_iter_"
 9497#define BTF_MAX_NAME_SIZE 128
 9498
 9499void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
 9500				const char **prefix, int *kind)
 9501{
 9502	switch (attach_type) {
 9503	case BPF_TRACE_RAW_TP:
 9504		*prefix = BTF_TRACE_PREFIX;
 9505		*kind = BTF_KIND_TYPEDEF;
 9506		break;
 9507	case BPF_LSM_MAC:
 9508		*prefix = BTF_LSM_PREFIX;
 9509		*kind = BTF_KIND_FUNC;
 9510		break;
 9511	case BPF_TRACE_ITER:
 9512		*prefix = BTF_ITER_PREFIX;
 9513		*kind = BTF_KIND_FUNC;
 9514		break;
 9515	default:
 9516		*prefix = "";
 9517		*kind = BTF_KIND_FUNC;
 9518	}
 9519}
 9520
 9521static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
 9522				   const char *name, __u32 kind)
 9523{
 9524	char btf_type_name[BTF_MAX_NAME_SIZE];
 9525	int ret;
 9526
 9527	ret = snprintf(btf_type_name, sizeof(btf_type_name),
 9528		       "%s%s", prefix, name);
 9529	/* snprintf returns the number of characters written excluding the
 9530	 * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
 9531	 * indicates truncation.
 9532	 */
 9533	if (ret < 0 || ret >= sizeof(btf_type_name))
 9534		return -ENAMETOOLONG;
 9535	return btf__find_by_name_kind(btf, btf_type_name, kind);
 9536}
 9537
 9538static inline int find_attach_btf_id(struct btf *btf, const char *name,
 9539				     enum bpf_attach_type attach_type)
 9540{
 9541	const char *prefix;
 9542	int kind;
 9543
 9544	btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
 9545	return find_btf_by_prefix_kind(btf, prefix, name, kind);
 9546}
 9547
 9548int libbpf_find_vmlinux_btf_id(const char *name,
 9549			       enum bpf_attach_type attach_type)
 9550{
 9551	struct btf *btf;
 9552	int err;
 9553
 9554	btf = libbpf_find_kernel_btf();
 9555	err = libbpf_get_error(btf);
 9556	if (err) {
 9557		pr_warn("vmlinux BTF is not found\n");
 9558		return libbpf_err(err);
 9559	}
 9560
 9561	err = find_attach_btf_id(btf, name, attach_type);
 9562	if (err <= 0)
 9563		pr_warn("%s is not found in vmlinux BTF\n", name);
 9564
 9565	btf__free(btf);
 9566	return libbpf_err(err);
 9567}
 9568
 9569static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
 9570{
 9571	struct bpf_prog_info_linear *info_linear;
 9572	struct bpf_prog_info *info;
 9573	struct btf *btf = NULL;
 9574	int err;
 9575
 9576	info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
 9577	err = libbpf_get_error(info_linear);
 9578	if (err) {
 9579		pr_warn("failed get_prog_info_linear for FD %d\n",
 9580			attach_prog_fd);
 9581		return err;
 9582	}
 9583
 9584	err = -EINVAL;
 9585	info = &info_linear->info;
 9586	if (!info->btf_id) {
 9587		pr_warn("The target program doesn't have BTF\n");
 9588		goto out;
 9589	}
 9590	if (btf__get_from_id(info->btf_id, &btf)) {
 9591		pr_warn("Failed to get BTF of the program\n");
 9592		goto out;
 9593	}
 9594	err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
 9595	btf__free(btf);
 9596	if (err <= 0) {
 9597		pr_warn("%s is not found in prog's BTF\n", name);
 9598		goto out;
 9599	}
 9600out:
 9601	free(info_linear);
 9602	return err;
 9603}
 9604
 9605static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
 9606			      enum bpf_attach_type attach_type,
 9607			      int *btf_obj_fd, int *btf_type_id)
 9608{
 9609	int ret, i;
 9610
 9611	ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
 9612	if (ret > 0) {
 9613		*btf_obj_fd = 0; /* vmlinux BTF */
 9614		*btf_type_id = ret;
 9615		return 0;
 9616	}
 9617	if (ret != -ENOENT)
 9618		return ret;
 9619
 9620	ret = load_module_btfs(obj);
 9621	if (ret)
 9622		return ret;
 9623
 9624	for (i = 0; i < obj->btf_module_cnt; i++) {
 9625		const struct module_btf *mod = &obj->btf_modules[i];
 9626
 9627		ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
 9628		if (ret > 0) {
 9629			*btf_obj_fd = mod->fd;
 9630			*btf_type_id = ret;
 9631			return 0;
 9632		}
 9633		if (ret == -ENOENT)
 9634			continue;
 9635
 9636		return ret;
 9637	}
 9638
 9639	return -ESRCH;
 9640}
 9641
 9642static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
 9643{
 9644	enum bpf_attach_type attach_type = prog->expected_attach_type;
 9645	__u32 attach_prog_fd = prog->attach_prog_fd;
 9646	const char *name = prog->sec_name, *attach_name;
 9647	const struct bpf_sec_def *sec = NULL;
 9648	int i, err = 0;
 9649
 9650	if (!name)
 9651		return -EINVAL;
 9652
 9653	for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
 9654		if (!section_defs[i].is_attach_btf)
 9655			continue;
 9656		if (strncmp(name, section_defs[i].sec, section_defs[i].len))
 9657			continue;
 9658
 9659		sec = &section_defs[i];
 9660		break;
 9661	}
 9662
 9663	if (!sec) {
 9664		pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
 9665		return -ESRCH;
 9666	}
 9667	attach_name = name + sec->len;
 9668
 9669	/* BPF program's BTF ID */
 9670	if (attach_prog_fd) {
 9671		err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
 9672		if (err < 0) {
 9673			pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
 9674				 attach_prog_fd, attach_name, err);
 9675			return err;
 9676		}
 9677		*btf_obj_fd = 0;
 9678		*btf_type_id = err;
 9679		return 0;
 9680	}
 9681
 9682	/* kernel/module BTF ID */
 9683	if (prog->obj->gen_loader) {
 9684		bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
 9685		*btf_obj_fd = 0;
 9686		*btf_type_id = 1;
 9687	} else {
 9688		err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
 9689	}
 9690	if (err) {
 9691		pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
 9692		return err;
 9693	}
 9694	return 0;
 9695}
 9696
 9697int libbpf_attach_type_by_name(const char *name,
 9698			       enum bpf_attach_type *attach_type)
 9699{
 9700	char *type_names;
 9701	int i;
 9702
 9703	if (!name)
 9704		return libbpf_err(-EINVAL);
 
 9705
 9706	for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
 9707		if (strncmp(name, section_defs[i].sec, section_defs[i].len))
 9708			continue;
 9709		if (!section_defs[i].is_attachable)
 9710			return libbpf_err(-EINVAL);
 9711		*attach_type = section_defs[i].expected_attach_type;
 9712		return 0;
 9713	}
 9714	pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
 9715	type_names = libbpf_get_type_names(true);
 9716	if (type_names != NULL) {
 9717		pr_debug("attachable section(type) names are:%s\n", type_names);
 9718		free(type_names);
 9719	}
 9720
 9721	return libbpf_err(-EINVAL);
 9722}
 9723
 9724int bpf_map__fd(const struct bpf_map *map)
 9725{
 9726	return map ? map->fd : libbpf_err(-EINVAL);
 9727}
 9728
 9729const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
 9730{
 9731	return map ? &map->def : libbpf_err_ptr(-EINVAL);
 9732}
 9733
 9734const char *bpf_map__name(const struct bpf_map *map)
 9735{
 9736	return map ? map->name : NULL;
 9737}
 9738
 9739enum bpf_map_type bpf_map__type(const struct bpf_map *map)
 9740{
 9741	return map->def.type;
 9742}
 9743
 9744int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
 9745{
 9746	if (map->fd >= 0)
 9747		return libbpf_err(-EBUSY);
 9748	map->def.type = type;
 9749	return 0;
 9750}
 9751
 9752__u32 bpf_map__map_flags(const struct bpf_map *map)
 9753{
 9754	return map->def.map_flags;
 9755}
 9756
 9757int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
 9758{
 9759	if (map->fd >= 0)
 9760		return libbpf_err(-EBUSY);
 9761	map->def.map_flags = flags;
 9762	return 0;
 9763}
 9764
 9765__u32 bpf_map__numa_node(const struct bpf_map *map)
 9766{
 9767	return map->numa_node;
 9768}
 9769
 9770int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
 9771{
 9772	if (map->fd >= 0)
 9773		return libbpf_err(-EBUSY);
 9774	map->numa_node = numa_node;
 9775	return 0;
 9776}
 9777
 9778__u32 bpf_map__key_size(const struct bpf_map *map)
 9779{
 9780	return map->def.key_size;
 9781}
 9782
 9783int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
 9784{
 9785	if (map->fd >= 0)
 9786		return libbpf_err(-EBUSY);
 9787	map->def.key_size = size;
 9788	return 0;
 9789}
 9790
 9791__u32 bpf_map__value_size(const struct bpf_map *map)
 9792{
 9793	return map->def.value_size;
 9794}
 9795
 9796int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
 9797{
 9798	if (map->fd >= 0)
 9799		return libbpf_err(-EBUSY);
 9800	map->def.value_size = size;
 9801	return 0;
 9802}
 9803
 9804__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
 9805{
 9806	return map ? map->btf_key_type_id : 0;
 9807}
 9808
 9809__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
 9810{
 9811	return map ? map->btf_value_type_id : 0;
 9812}
 9813
 9814int bpf_map__set_priv(struct bpf_map *map, void *priv,
 9815		     bpf_map_clear_priv_t clear_priv)
 9816{
 9817	if (!map)
 9818		return libbpf_err(-EINVAL);
 9819
 9820	if (map->priv) {
 9821		if (map->clear_priv)
 9822			map->clear_priv(map, map->priv);
 9823	}
 9824
 9825	map->priv = priv;
 9826	map->clear_priv = clear_priv;
 9827	return 0;
 9828}
 9829
 9830void *bpf_map__priv(const struct bpf_map *map)
 9831{
 9832	return map ? map->priv : libbpf_err_ptr(-EINVAL);
 9833}
 9834
 9835int bpf_map__set_initial_value(struct bpf_map *map,
 9836			       const void *data, size_t size)
 9837{
 9838	if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
 9839	    size != map->def.value_size || map->fd >= 0)
 9840		return libbpf_err(-EINVAL);
 9841
 9842	memcpy(map->mmaped, data, size);
 9843	return 0;
 9844}
 9845
 9846const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
 9847{
 9848	if (!map->mmaped)
 9849		return NULL;
 9850	*psize = map->def.value_size;
 9851	return map->mmaped;
 9852}
 9853
 9854bool bpf_map__is_offload_neutral(const struct bpf_map *map)
 9855{
 9856	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
 9857}
 9858
 9859bool bpf_map__is_internal(const struct bpf_map *map)
 9860{
 9861	return map->libbpf_type != LIBBPF_MAP_UNSPEC;
 9862}
 9863
 9864__u32 bpf_map__ifindex(const struct bpf_map *map)
 9865{
 9866	return map->map_ifindex;
 9867}
 9868
 9869int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
 9870{
 9871	if (map->fd >= 0)
 9872		return libbpf_err(-EBUSY);
 9873	map->map_ifindex = ifindex;
 9874	return 0;
 9875}
 9876
 9877int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
 9878{
 9879	if (!bpf_map_type__is_map_in_map(map->def.type)) {
 9880		pr_warn("error: unsupported map type\n");
 9881		return libbpf_err(-EINVAL);
 9882	}
 9883	if (map->inner_map_fd != -1) {
 9884		pr_warn("error: inner_map_fd already specified\n");
 9885		return libbpf_err(-EINVAL);
 9886	}
 9887	zfree(&map->inner_map);
 9888	map->inner_map_fd = fd;
 9889	return 0;
 9890}
 9891
 9892static struct bpf_map *
 9893__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
 9894{
 9895	ssize_t idx;
 9896	struct bpf_map *s, *e;
 9897
 9898	if (!obj || !obj->maps)
 9899		return errno = EINVAL, NULL;
 9900
 9901	s = obj->maps;
 9902	e = obj->maps + obj->nr_maps;
 9903
 9904	if ((m < s) || (m >= e)) {
 9905		pr_warn("error in %s: map handler doesn't belong to object\n",
 9906			 __func__);
 9907		return errno = EINVAL, NULL;
 
 
 
 9908	}
 9909
 9910	idx = (m - obj->maps) + i;
 9911	if (idx >= obj->nr_maps || idx < 0)
 9912		return NULL;
 9913	return &obj->maps[idx];
 9914}
 9915
 9916struct bpf_map *
 9917bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
 9918{
 9919	if (prev == NULL)
 9920		return obj->maps;
 9921
 9922	return __bpf_map__iter(prev, obj, 1);
 9923}
 9924
 9925struct bpf_map *
 9926bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
 9927{
 9928	if (next == NULL) {
 9929		if (!obj->nr_maps)
 9930			return NULL;
 9931		return obj->maps + obj->nr_maps - 1;
 9932	}
 9933
 9934	return __bpf_map__iter(next, obj, -1);
 9935}
 9936
 9937struct bpf_map *
 9938bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
 9939{
 9940	struct bpf_map *pos;
 9941
 9942	bpf_object__for_each_map(pos, obj) {
 9943		if (pos->name && !strcmp(pos->name, name))
 9944			return pos;
 9945	}
 9946	return errno = ENOENT, NULL;
 9947}
 9948
 9949int
 9950bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
 9951{
 9952	return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
 9953}
 9954
 9955struct bpf_map *
 9956bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
 9957{
 9958	return libbpf_err_ptr(-ENOTSUP);
 
 
 
 
 
 
 9959}
 9960
 9961long libbpf_get_error(const void *ptr)
 9962{
 9963	if (!IS_ERR_OR_NULL(ptr))
 9964		return 0;
 9965
 9966	if (IS_ERR(ptr))
 9967		errno = -PTR_ERR(ptr);
 9968
 9969	/* If ptr == NULL, then errno should be already set by the failing
 9970	 * API, because libbpf never returns NULL on success and it now always
 9971	 * sets errno on error. So no extra errno handling for ptr == NULL
 9972	 * case.
 9973	 */
 9974	return -errno;
 9975}
 9976
 9977int bpf_prog_load(const char *file, enum bpf_prog_type type,
 9978		  struct bpf_object **pobj, int *prog_fd)
 9979{
 9980	struct bpf_prog_load_attr attr;
 9981
 9982	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
 9983	attr.file = file;
 9984	attr.prog_type = type;
 9985	attr.expected_attach_type = 0;
 9986
 9987	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
 9988}
 9989
 9990int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
 9991			struct bpf_object **pobj, int *prog_fd)
 9992{
 9993	struct bpf_object_open_attr open_attr = {};
 9994	struct bpf_program *prog, *first_prog = NULL;
 
 
 9995	struct bpf_object *obj;
 9996	struct bpf_map *map;
 9997	int err;
 9998
 9999	if (!attr)
10000		return libbpf_err(-EINVAL);
10001	if (!attr->file)
10002		return libbpf_err(-EINVAL);
10003
10004	open_attr.file = attr->file;
10005	open_attr.prog_type = attr->prog_type;
10006
10007	obj = bpf_object__open_xattr(&open_attr);
10008	err = libbpf_get_error(obj);
10009	if (err)
10010		return libbpf_err(-ENOENT);
10011
10012	bpf_object__for_each_program(prog, obj) {
10013		enum bpf_attach_type attach_type = attr->expected_attach_type;
10014		/*
10015		 * to preserve backwards compatibility, bpf_prog_load treats
10016		 * attr->prog_type, if specified, as an override to whatever
10017		 * bpf_object__open guessed
10018		 */
10019		if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
10020			bpf_program__set_type(prog, attr->prog_type);
10021			bpf_program__set_expected_attach_type(prog,
10022							      attach_type);
10023		}
10024		if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
10025			/*
10026			 * we haven't guessed from section name and user
10027			 * didn't provide a fallback type, too bad...
10028			 */
10029			bpf_object__close(obj);
10030			return libbpf_err(-EINVAL);
10031		}
10032
10033		prog->prog_ifindex = attr->ifindex;
10034		prog->log_level = attr->log_level;
10035		prog->prog_flags |= attr->prog_flags;
10036		if (!first_prog)
 
10037			first_prog = prog;
10038	}
10039
10040	bpf_object__for_each_map(map, obj) {
10041		if (!bpf_map__is_offload_neutral(map))
10042			map->map_ifindex = attr->ifindex;
10043	}
10044
10045	if (!first_prog) {
10046		pr_warn("object file doesn't contain bpf program\n");
10047		bpf_object__close(obj);
10048		return libbpf_err(-ENOENT);
10049	}
10050
10051	err = bpf_object__load(obj);
10052	if (err) {
10053		bpf_object__close(obj);
10054		return libbpf_err(err);
10055	}
10056
10057	*pobj = obj;
10058	*prog_fd = bpf_program__fd(first_prog);
10059	return 0;
10060}
10061
10062struct bpf_link {
10063	int (*detach)(struct bpf_link *link);
10064	int (*destroy)(struct bpf_link *link);
10065	char *pin_path;		/* NULL, if not pinned */
10066	int fd;			/* hook FD, -1 if not applicable */
10067	bool disconnected;
10068};
10069
10070/* Replace link's underlying BPF program with the new one */
10071int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
10072{
10073	int ret;
10074	
10075	ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
10076	return libbpf_err_errno(ret);
10077}
10078
10079/* Release "ownership" of underlying BPF resource (typically, BPF program
10080 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
10081 * link, when destructed through bpf_link__destroy() call won't attempt to
10082 * detach/unregisted that BPF resource. This is useful in situations where,
10083 * say, attached BPF program has to outlive userspace program that attached it
10084 * in the system. Depending on type of BPF program, though, there might be
10085 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
10086 * exit of userspace program doesn't trigger automatic detachment and clean up
10087 * inside the kernel.
10088 */
10089void bpf_link__disconnect(struct bpf_link *link)
10090{
10091	link->disconnected = true;
10092}
10093
10094int bpf_link__destroy(struct bpf_link *link)
10095{
10096	int err = 0;
10097
10098	if (IS_ERR_OR_NULL(link))
10099		return 0;
10100
10101	if (!link->disconnected && link->detach)
10102		err = link->detach(link);
10103	if (link->destroy)
10104		link->destroy(link);
10105	if (link->pin_path)
10106		free(link->pin_path);
10107	free(link);
10108
10109	return libbpf_err(err);
10110}
10111
10112int bpf_link__fd(const struct bpf_link *link)
10113{
10114	return link->fd;
10115}
10116
10117const char *bpf_link__pin_path(const struct bpf_link *link)
10118{
10119	return link->pin_path;
10120}
10121
10122static int bpf_link__detach_fd(struct bpf_link *link)
10123{
10124	return libbpf_err_errno(close(link->fd));
10125}
10126
10127struct bpf_link *bpf_link__open(const char *path)
10128{
10129	struct bpf_link *link;
10130	int fd;
10131
10132	fd = bpf_obj_get(path);
10133	if (fd < 0) {
10134		fd = -errno;
10135		pr_warn("failed to open link at %s: %d\n", path, fd);
10136		return libbpf_err_ptr(fd);
10137	}
10138
10139	link = calloc(1, sizeof(*link));
10140	if (!link) {
10141		close(fd);
10142		return libbpf_err_ptr(-ENOMEM);
10143	}
10144	link->detach = &bpf_link__detach_fd;
10145	link->fd = fd;
10146
10147	link->pin_path = strdup(path);
10148	if (!link->pin_path) {
10149		bpf_link__destroy(link);
10150		return libbpf_err_ptr(-ENOMEM);
10151	}
10152
10153	return link;
10154}
10155
10156int bpf_link__detach(struct bpf_link *link)
10157{
10158	return bpf_link_detach(link->fd) ? -errno : 0;
10159}
10160
10161int bpf_link__pin(struct bpf_link *link, const char *path)
10162{
10163	int err;
10164
10165	if (link->pin_path)
10166		return libbpf_err(-EBUSY);
10167	err = make_parent_dir(path);
10168	if (err)
10169		return libbpf_err(err);
10170	err = check_path(path);
10171	if (err)
10172		return libbpf_err(err);
10173
10174	link->pin_path = strdup(path);
10175	if (!link->pin_path)
10176		return libbpf_err(-ENOMEM);
10177
10178	if (bpf_obj_pin(link->fd, link->pin_path)) {
10179		err = -errno;
10180		zfree(&link->pin_path);
10181		return libbpf_err(err);
10182	}
10183
10184	pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10185	return 0;
10186}
10187
10188int bpf_link__unpin(struct bpf_link *link)
10189{
10190	int err;
10191
10192	if (!link->pin_path)
10193		return libbpf_err(-EINVAL);
10194
10195	err = unlink(link->pin_path);
10196	if (err != 0)
10197		return -errno;
10198
10199	pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10200	zfree(&link->pin_path);
10201	return 0;
10202}
10203
10204static int bpf_link__detach_perf_event(struct bpf_link *link)
10205{
10206	int err;
10207
10208	err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
10209	if (err)
10210		err = -errno;
10211
10212	close(link->fd);
10213	return libbpf_err(err);
10214}
10215
10216struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, int pfd)
10217{
10218	char errmsg[STRERR_BUFSIZE];
10219	struct bpf_link *link;
10220	int prog_fd, err;
10221
10222	if (pfd < 0) {
10223		pr_warn("prog '%s': invalid perf event FD %d\n",
10224			prog->name, pfd);
10225		return libbpf_err_ptr(-EINVAL);
10226	}
10227	prog_fd = bpf_program__fd(prog);
10228	if (prog_fd < 0) {
10229		pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
10230			prog->name);
10231		return libbpf_err_ptr(-EINVAL);
10232	}
10233
10234	link = calloc(1, sizeof(*link));
10235	if (!link)
10236		return libbpf_err_ptr(-ENOMEM);
10237	link->detach = &bpf_link__detach_perf_event;
10238	link->fd = pfd;
10239
10240	if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
10241		err = -errno;
10242		free(link);
10243		pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
10244			prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10245		if (err == -EPROTO)
10246			pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
10247				prog->name, pfd);
10248		return libbpf_err_ptr(err);
10249	}
10250	if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10251		err = -errno;
10252		free(link);
10253		pr_warn("prog '%s': failed to enable pfd %d: %s\n",
10254			prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10255		return libbpf_err_ptr(err);
10256	}
10257	return link;
10258}
10259
10260/*
10261 * this function is expected to parse integer in the range of [0, 2^31-1] from
10262 * given file using scanf format string fmt. If actual parsed value is
10263 * negative, the result might be indistinguishable from error
10264 */
10265static int parse_uint_from_file(const char *file, const char *fmt)
10266{
10267	char buf[STRERR_BUFSIZE];
10268	int err, ret;
10269	FILE *f;
10270
10271	f = fopen(file, "r");
10272	if (!f) {
10273		err = -errno;
10274		pr_debug("failed to open '%s': %s\n", file,
10275			 libbpf_strerror_r(err, buf, sizeof(buf)));
10276		return err;
10277	}
10278	err = fscanf(f, fmt, &ret);
10279	if (err != 1) {
10280		err = err == EOF ? -EIO : -errno;
10281		pr_debug("failed to parse '%s': %s\n", file,
10282			libbpf_strerror_r(err, buf, sizeof(buf)));
10283		fclose(f);
10284		return err;
10285	}
10286	fclose(f);
10287	return ret;
10288}
10289
10290static int determine_kprobe_perf_type(void)
10291{
10292	const char *file = "/sys/bus/event_source/devices/kprobe/type";
10293
10294	return parse_uint_from_file(file, "%d\n");
10295}
10296
10297static int determine_uprobe_perf_type(void)
10298{
10299	const char *file = "/sys/bus/event_source/devices/uprobe/type";
10300
10301	return parse_uint_from_file(file, "%d\n");
10302}
10303
10304static int determine_kprobe_retprobe_bit(void)
10305{
10306	const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10307
10308	return parse_uint_from_file(file, "config:%d\n");
10309}
10310
10311static int determine_uprobe_retprobe_bit(void)
10312{
10313	const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10314
10315	return parse_uint_from_file(file, "config:%d\n");
10316}
10317
10318static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
10319				 uint64_t offset, int pid)
10320{
10321	struct perf_event_attr attr = {};
10322	char errmsg[STRERR_BUFSIZE];
10323	int type, pfd, err;
10324
10325	type = uprobe ? determine_uprobe_perf_type()
10326		      : determine_kprobe_perf_type();
10327	if (type < 0) {
10328		pr_warn("failed to determine %s perf type: %s\n",
10329			uprobe ? "uprobe" : "kprobe",
10330			libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10331		return type;
10332	}
10333	if (retprobe) {
10334		int bit = uprobe ? determine_uprobe_retprobe_bit()
10335				 : determine_kprobe_retprobe_bit();
10336
10337		if (bit < 0) {
10338			pr_warn("failed to determine %s retprobe bit: %s\n",
10339				uprobe ? "uprobe" : "kprobe",
10340				libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
10341			return bit;
10342		}
10343		attr.config |= 1 << bit;
10344	}
10345	attr.size = sizeof(attr);
10346	attr.type = type;
10347	attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
10348	attr.config2 = offset;		 /* kprobe_addr or probe_offset */
10349
10350	/* pid filter is meaningful only for uprobes */
10351	pfd = syscall(__NR_perf_event_open, &attr,
10352		      pid < 0 ? -1 : pid /* pid */,
10353		      pid == -1 ? 0 : -1 /* cpu */,
10354		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10355	if (pfd < 0) {
10356		err = -errno;
10357		pr_warn("%s perf_event_open() failed: %s\n",
10358			uprobe ? "uprobe" : "kprobe",
10359			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10360		return err;
10361	}
10362	return pfd;
10363}
10364
10365struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
10366					    bool retprobe,
10367					    const char *func_name)
10368{
10369	char errmsg[STRERR_BUFSIZE];
10370	struct bpf_link *link;
10371	int pfd, err;
10372
10373	pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
10374				    0 /* offset */, -1 /* pid */);
10375	if (pfd < 0) {
10376		pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
10377			prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
10378			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10379		return libbpf_err_ptr(pfd);
10380	}
10381	link = bpf_program__attach_perf_event(prog, pfd);
10382	err = libbpf_get_error(link);
10383	if (err) {
10384		close(pfd);
10385		pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
10386			prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
10387			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10388		return libbpf_err_ptr(err);
10389	}
10390	return link;
10391}
10392
10393static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
10394				      struct bpf_program *prog)
10395{
10396	const char *func_name;
10397	bool retprobe;
10398
10399	func_name = prog->sec_name + sec->len;
10400	retprobe = strcmp(sec->sec, "kretprobe/") == 0;
10401
10402	return bpf_program__attach_kprobe(prog, retprobe, func_name);
10403}
10404
10405struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
10406					    bool retprobe, pid_t pid,
10407					    const char *binary_path,
10408					    size_t func_offset)
10409{
10410	char errmsg[STRERR_BUFSIZE];
10411	struct bpf_link *link;
10412	int pfd, err;
10413
10414	pfd = perf_event_open_probe(true /* uprobe */, retprobe,
10415				    binary_path, func_offset, pid);
10416	if (pfd < 0) {
10417		pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
10418			prog->name, retprobe ? "uretprobe" : "uprobe",
10419			binary_path, func_offset,
10420			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10421		return libbpf_err_ptr(pfd);
10422	}
10423	link = bpf_program__attach_perf_event(prog, pfd);
10424	err = libbpf_get_error(link);
10425	if (err) {
10426		close(pfd);
10427		pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
10428			prog->name, retprobe ? "uretprobe" : "uprobe",
10429			binary_path, func_offset,
10430			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10431		return libbpf_err_ptr(err);
10432	}
10433	return link;
10434}
10435
10436static int determine_tracepoint_id(const char *tp_category,
10437				   const char *tp_name)
10438{
10439	char file[PATH_MAX];
10440	int ret;
10441
10442	ret = snprintf(file, sizeof(file),
10443		       "/sys/kernel/debug/tracing/events/%s/%s/id",
10444		       tp_category, tp_name);
10445	if (ret < 0)
10446		return -errno;
10447	if (ret >= sizeof(file)) {
10448		pr_debug("tracepoint %s/%s path is too long\n",
10449			 tp_category, tp_name);
10450		return -E2BIG;
10451	}
10452	return parse_uint_from_file(file, "%d\n");
10453}
10454
10455static int perf_event_open_tracepoint(const char *tp_category,
10456				      const char *tp_name)
10457{
10458	struct perf_event_attr attr = {};
10459	char errmsg[STRERR_BUFSIZE];
10460	int tp_id, pfd, err;
10461
10462	tp_id = determine_tracepoint_id(tp_category, tp_name);
10463	if (tp_id < 0) {
10464		pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
10465			tp_category, tp_name,
10466			libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
10467		return tp_id;
10468	}
10469
10470	attr.type = PERF_TYPE_TRACEPOINT;
10471	attr.size = sizeof(attr);
10472	attr.config = tp_id;
10473
10474	pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
10475		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10476	if (pfd < 0) {
10477		err = -errno;
10478		pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
10479			tp_category, tp_name,
10480			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10481		return err;
10482	}
10483	return pfd;
10484}
10485
10486struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
10487						const char *tp_category,
10488						const char *tp_name)
10489{
10490	char errmsg[STRERR_BUFSIZE];
10491	struct bpf_link *link;
10492	int pfd, err;
10493
10494	pfd = perf_event_open_tracepoint(tp_category, tp_name);
10495	if (pfd < 0) {
10496		pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
10497			prog->name, tp_category, tp_name,
10498			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10499		return libbpf_err_ptr(pfd);
10500	}
10501	link = bpf_program__attach_perf_event(prog, pfd);
10502	err = libbpf_get_error(link);
10503	if (err) {
10504		close(pfd);
10505		pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
10506			prog->name, tp_category, tp_name,
10507			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10508		return libbpf_err_ptr(err);
10509	}
10510	return link;
10511}
10512
10513static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
10514				  struct bpf_program *prog)
10515{
10516	char *sec_name, *tp_cat, *tp_name;
10517	struct bpf_link *link;
10518
10519	sec_name = strdup(prog->sec_name);
10520	if (!sec_name)
10521		return libbpf_err_ptr(-ENOMEM);
10522
10523	/* extract "tp/<category>/<name>" */
10524	tp_cat = sec_name + sec->len;
10525	tp_name = strchr(tp_cat, '/');
10526	if (!tp_name) {
10527		free(sec_name);
10528		return libbpf_err_ptr(-EINVAL);
10529	}
10530	*tp_name = '\0';
10531	tp_name++;
10532
10533	link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
10534	free(sec_name);
10535	return link;
10536}
10537
10538struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
10539						    const char *tp_name)
10540{
10541	char errmsg[STRERR_BUFSIZE];
10542	struct bpf_link *link;
10543	int prog_fd, pfd;
10544
10545	prog_fd = bpf_program__fd(prog);
10546	if (prog_fd < 0) {
10547		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10548		return libbpf_err_ptr(-EINVAL);
10549	}
10550
10551	link = calloc(1, sizeof(*link));
10552	if (!link)
10553		return libbpf_err_ptr(-ENOMEM);
10554	link->detach = &bpf_link__detach_fd;
10555
10556	pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
10557	if (pfd < 0) {
10558		pfd = -errno;
10559		free(link);
10560		pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
10561			prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10562		return libbpf_err_ptr(pfd);
10563	}
10564	link->fd = pfd;
10565	return link;
10566}
10567
10568static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
10569				      struct bpf_program *prog)
10570{
10571	const char *tp_name = prog->sec_name + sec->len;
10572
10573	return bpf_program__attach_raw_tracepoint(prog, tp_name);
10574}
10575
10576/* Common logic for all BPF program types that attach to a btf_id */
10577static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
10578{
10579	char errmsg[STRERR_BUFSIZE];
10580	struct bpf_link *link;
10581	int prog_fd, pfd;
10582
10583	prog_fd = bpf_program__fd(prog);
10584	if (prog_fd < 0) {
10585		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10586		return libbpf_err_ptr(-EINVAL);
10587	}
10588
10589	link = calloc(1, sizeof(*link));
10590	if (!link)
10591		return libbpf_err_ptr(-ENOMEM);
10592	link->detach = &bpf_link__detach_fd;
10593
10594	pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
10595	if (pfd < 0) {
10596		pfd = -errno;
10597		free(link);
10598		pr_warn("prog '%s': failed to attach: %s\n",
10599			prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10600		return libbpf_err_ptr(pfd);
10601	}
10602	link->fd = pfd;
10603	return (struct bpf_link *)link;
10604}
10605
10606struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
10607{
10608	return bpf_program__attach_btf_id(prog);
10609}
10610
10611struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
10612{
10613	return bpf_program__attach_btf_id(prog);
10614}
10615
10616static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
10617				     struct bpf_program *prog)
10618{
10619	return bpf_program__attach_trace(prog);
10620}
10621
10622static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
10623				   struct bpf_program *prog)
10624{
10625	return bpf_program__attach_lsm(prog);
10626}
10627
10628static struct bpf_link *
10629bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
10630		       const char *target_name)
10631{
10632	DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
10633			    .target_btf_id = btf_id);
10634	enum bpf_attach_type attach_type;
10635	char errmsg[STRERR_BUFSIZE];
10636	struct bpf_link *link;
10637	int prog_fd, link_fd;
10638
10639	prog_fd = bpf_program__fd(prog);
10640	if (prog_fd < 0) {
10641		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10642		return libbpf_err_ptr(-EINVAL);
10643	}
10644
10645	link = calloc(1, sizeof(*link));
10646	if (!link)
10647		return libbpf_err_ptr(-ENOMEM);
10648	link->detach = &bpf_link__detach_fd;
10649
10650	attach_type = bpf_program__get_expected_attach_type(prog);
10651	link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
10652	if (link_fd < 0) {
10653		link_fd = -errno;
10654		free(link);
10655		pr_warn("prog '%s': failed to attach to %s: %s\n",
10656			prog->name, target_name,
10657			libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10658		return libbpf_err_ptr(link_fd);
10659	}
10660	link->fd = link_fd;
10661	return link;
10662}
10663
10664struct bpf_link *
10665bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
10666{
10667	return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
10668}
10669
10670struct bpf_link *
10671bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
10672{
10673	return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
10674}
10675
10676struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
10677{
10678	/* target_fd/target_ifindex use the same field in LINK_CREATE */
10679	return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
10680}
10681
10682struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
10683					      int target_fd,
10684					      const char *attach_func_name)
10685{
10686	int btf_id;
10687
10688	if (!!target_fd != !!attach_func_name) {
10689		pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
10690			prog->name);
10691		return libbpf_err_ptr(-EINVAL);
10692	}
10693
10694	if (prog->type != BPF_PROG_TYPE_EXT) {
10695		pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
10696			prog->name);
10697		return libbpf_err_ptr(-EINVAL);
10698	}
10699
10700	if (target_fd) {
10701		btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
10702		if (btf_id < 0)
10703			return libbpf_err_ptr(btf_id);
10704
10705		return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
10706	} else {
10707		/* no target, so use raw_tracepoint_open for compatibility
10708		 * with old kernels
10709		 */
10710		return bpf_program__attach_trace(prog);
10711	}
10712}
10713
10714struct bpf_link *
10715bpf_program__attach_iter(struct bpf_program *prog,
10716			 const struct bpf_iter_attach_opts *opts)
10717{
10718	DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
10719	char errmsg[STRERR_BUFSIZE];
10720	struct bpf_link *link;
10721	int prog_fd, link_fd;
10722	__u32 target_fd = 0;
10723
10724	if (!OPTS_VALID(opts, bpf_iter_attach_opts))
10725		return libbpf_err_ptr(-EINVAL);
10726
10727	link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
10728	link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
10729
10730	prog_fd = bpf_program__fd(prog);
10731	if (prog_fd < 0) {
10732		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10733		return libbpf_err_ptr(-EINVAL);
10734	}
10735
10736	link = calloc(1, sizeof(*link));
10737	if (!link)
10738		return libbpf_err_ptr(-ENOMEM);
10739	link->detach = &bpf_link__detach_fd;
10740
10741	link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
10742				  &link_create_opts);
10743	if (link_fd < 0) {
10744		link_fd = -errno;
10745		free(link);
10746		pr_warn("prog '%s': failed to attach to iterator: %s\n",
10747			prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10748		return libbpf_err_ptr(link_fd);
10749	}
10750	link->fd = link_fd;
10751	return link;
10752}
10753
10754static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
10755				    struct bpf_program *prog)
10756{
10757	return bpf_program__attach_iter(prog, NULL);
10758}
10759
10760struct bpf_link *bpf_program__attach(struct bpf_program *prog)
10761{
10762	const struct bpf_sec_def *sec_def;
10763
10764	sec_def = find_sec_def(prog->sec_name);
10765	if (!sec_def || !sec_def->attach_fn)
10766		return libbpf_err_ptr(-ESRCH);
10767
10768	return sec_def->attach_fn(sec_def, prog);
10769}
10770
10771static int bpf_link__detach_struct_ops(struct bpf_link *link)
10772{
10773	__u32 zero = 0;
10774
10775	if (bpf_map_delete_elem(link->fd, &zero))
10776		return -errno;
10777
10778	return 0;
10779}
10780
10781struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
10782{
10783	struct bpf_struct_ops *st_ops;
10784	struct bpf_link *link;
10785	__u32 i, zero = 0;
10786	int err;
10787
10788	if (!bpf_map__is_struct_ops(map) || map->fd == -1)
10789		return libbpf_err_ptr(-EINVAL);
10790
10791	link = calloc(1, sizeof(*link));
10792	if (!link)
10793		return libbpf_err_ptr(-EINVAL);
10794
10795	st_ops = map->st_ops;
10796	for (i = 0; i < btf_vlen(st_ops->type); i++) {
10797		struct bpf_program *prog = st_ops->progs[i];
10798		void *kern_data;
10799		int prog_fd;
10800
10801		if (!prog)
10802			continue;
10803
10804		prog_fd = bpf_program__fd(prog);
10805		kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
10806		*(unsigned long *)kern_data = prog_fd;
10807	}
10808
10809	err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
10810	if (err) {
10811		err = -errno;
10812		free(link);
10813		return libbpf_err_ptr(err);
10814	}
10815
10816	link->detach = bpf_link__detach_struct_ops;
10817	link->fd = map->fd;
10818
10819	return link;
10820}
10821
10822enum bpf_perf_event_ret
10823bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
10824			   void **copy_mem, size_t *copy_size,
10825			   bpf_perf_event_print_t fn, void *private_data)
10826{
10827	struct perf_event_mmap_page *header = mmap_mem;
10828	__u64 data_head = ring_buffer_read_head(header);
10829	__u64 data_tail = header->data_tail;
10830	void *base = ((__u8 *)header) + page_size;
10831	int ret = LIBBPF_PERF_EVENT_CONT;
10832	struct perf_event_header *ehdr;
10833	size_t ehdr_size;
10834
10835	while (data_head != data_tail) {
10836		ehdr = base + (data_tail & (mmap_size - 1));
10837		ehdr_size = ehdr->size;
10838
10839		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
10840			void *copy_start = ehdr;
10841			size_t len_first = base + mmap_size - copy_start;
10842			size_t len_secnd = ehdr_size - len_first;
10843
10844			if (*copy_size < ehdr_size) {
10845				free(*copy_mem);
10846				*copy_mem = malloc(ehdr_size);
10847				if (!*copy_mem) {
10848					*copy_size = 0;
10849					ret = LIBBPF_PERF_EVENT_ERROR;
10850					break;
10851				}
10852				*copy_size = ehdr_size;
10853			}
10854
10855			memcpy(*copy_mem, copy_start, len_first);
10856			memcpy(*copy_mem + len_first, base, len_secnd);
10857			ehdr = *copy_mem;
10858		}
10859
10860		ret = fn(ehdr, private_data);
10861		data_tail += ehdr_size;
10862		if (ret != LIBBPF_PERF_EVENT_CONT)
10863			break;
10864	}
10865
10866	ring_buffer_write_tail(header, data_tail);
10867	return libbpf_err(ret);
10868}
10869
10870struct perf_buffer;
10871
10872struct perf_buffer_params {
10873	struct perf_event_attr *attr;
10874	/* if event_cb is specified, it takes precendence */
10875	perf_buffer_event_fn event_cb;
10876	/* sample_cb and lost_cb are higher-level common-case callbacks */
10877	perf_buffer_sample_fn sample_cb;
10878	perf_buffer_lost_fn lost_cb;
10879	void *ctx;
10880	int cpu_cnt;
10881	int *cpus;
10882	int *map_keys;
10883};
10884
10885struct perf_cpu_buf {
10886	struct perf_buffer *pb;
10887	void *base; /* mmap()'ed memory */
10888	void *buf; /* for reconstructing segmented data */
10889	size_t buf_size;
10890	int fd;
10891	int cpu;
10892	int map_key;
10893};
10894
10895struct perf_buffer {
10896	perf_buffer_event_fn event_cb;
10897	perf_buffer_sample_fn sample_cb;
10898	perf_buffer_lost_fn lost_cb;
10899	void *ctx; /* passed into callbacks */
10900
10901	size_t page_size;
10902	size_t mmap_size;
10903	struct perf_cpu_buf **cpu_bufs;
10904	struct epoll_event *events;
10905	int cpu_cnt; /* number of allocated CPU buffers */
10906	int epoll_fd; /* perf event FD */
10907	int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
10908};
10909
10910static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
10911				      struct perf_cpu_buf *cpu_buf)
10912{
10913	if (!cpu_buf)
10914		return;
10915	if (cpu_buf->base &&
10916	    munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
10917		pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
10918	if (cpu_buf->fd >= 0) {
10919		ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
10920		close(cpu_buf->fd);
10921	}
10922	free(cpu_buf->buf);
10923	free(cpu_buf);
10924}
10925
10926void perf_buffer__free(struct perf_buffer *pb)
10927{
10928	int i;
10929
10930	if (IS_ERR_OR_NULL(pb))
10931		return;
10932	if (pb->cpu_bufs) {
10933		for (i = 0; i < pb->cpu_cnt; i++) {
10934			struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10935
10936			if (!cpu_buf)
10937				continue;
10938
10939			bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
10940			perf_buffer__free_cpu_buf(pb, cpu_buf);
10941		}
10942		free(pb->cpu_bufs);
10943	}
10944	if (pb->epoll_fd >= 0)
10945		close(pb->epoll_fd);
10946	free(pb->events);
10947	free(pb);
10948}
10949
10950static struct perf_cpu_buf *
10951perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
10952			  int cpu, int map_key)
10953{
10954	struct perf_cpu_buf *cpu_buf;
10955	char msg[STRERR_BUFSIZE];
10956	int err;
10957
10958	cpu_buf = calloc(1, sizeof(*cpu_buf));
10959	if (!cpu_buf)
10960		return ERR_PTR(-ENOMEM);
10961
10962	cpu_buf->pb = pb;
10963	cpu_buf->cpu = cpu;
10964	cpu_buf->map_key = map_key;
10965
10966	cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
10967			      -1, PERF_FLAG_FD_CLOEXEC);
10968	if (cpu_buf->fd < 0) {
10969		err = -errno;
10970		pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
10971			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10972		goto error;
10973	}
10974
10975	cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
10976			     PROT_READ | PROT_WRITE, MAP_SHARED,
10977			     cpu_buf->fd, 0);
10978	if (cpu_buf->base == MAP_FAILED) {
10979		cpu_buf->base = NULL;
10980		err = -errno;
10981		pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
10982			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10983		goto error;
10984	}
10985
10986	if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10987		err = -errno;
10988		pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
10989			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10990		goto error;
10991	}
10992
10993	return cpu_buf;
10994
10995error:
10996	perf_buffer__free_cpu_buf(pb, cpu_buf);
10997	return (struct perf_cpu_buf *)ERR_PTR(err);
10998}
10999
11000static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
11001					      struct perf_buffer_params *p);
11002
11003struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
11004				     const struct perf_buffer_opts *opts)
11005{
11006	struct perf_buffer_params p = {};
11007	struct perf_event_attr attr = { 0, };
11008
11009	attr.config = PERF_COUNT_SW_BPF_OUTPUT;
11010	attr.type = PERF_TYPE_SOFTWARE;
11011	attr.sample_type = PERF_SAMPLE_RAW;
11012	attr.sample_period = 1;
11013	attr.wakeup_events = 1;
11014
11015	p.attr = &attr;
11016	p.sample_cb = opts ? opts->sample_cb : NULL;
11017	p.lost_cb = opts ? opts->lost_cb : NULL;
11018	p.ctx = opts ? opts->ctx : NULL;
11019
11020	return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
11021}
11022
11023struct perf_buffer *
11024perf_buffer__new_raw(int map_fd, size_t page_cnt,
11025		     const struct perf_buffer_raw_opts *opts)
11026{
11027	struct perf_buffer_params p = {};
11028
11029	p.attr = opts->attr;
11030	p.event_cb = opts->event_cb;
11031	p.ctx = opts->ctx;
11032	p.cpu_cnt = opts->cpu_cnt;
11033	p.cpus = opts->cpus;
11034	p.map_keys = opts->map_keys;
11035
11036	return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
11037}
11038
11039static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
11040					      struct perf_buffer_params *p)
11041{
11042	const char *online_cpus_file = "/sys/devices/system/cpu/online";
11043	struct bpf_map_info map;
11044	char msg[STRERR_BUFSIZE];
11045	struct perf_buffer *pb;
11046	bool *online = NULL;
11047	__u32 map_info_len;
11048	int err, i, j, n;
11049
11050	if (page_cnt & (page_cnt - 1)) {
11051		pr_warn("page count should be power of two, but is %zu\n",
11052			page_cnt);
11053		return ERR_PTR(-EINVAL);
11054	}
11055
11056	/* best-effort sanity checks */
11057	memset(&map, 0, sizeof(map));
11058	map_info_len = sizeof(map);
11059	err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
11060	if (err) {
11061		err = -errno;
11062		/* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
11063		 * -EBADFD, -EFAULT, or -E2BIG on real error
11064		 */
11065		if (err != -EINVAL) {
11066			pr_warn("failed to get map info for map FD %d: %s\n",
11067				map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
11068			return ERR_PTR(err);
11069		}
11070		pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
11071			 map_fd);
11072	} else {
11073		if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
11074			pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
11075				map.name);
11076			return ERR_PTR(-EINVAL);
11077		}
11078	}
11079
11080	pb = calloc(1, sizeof(*pb));
11081	if (!pb)
11082		return ERR_PTR(-ENOMEM);
11083
11084	pb->event_cb = p->event_cb;
11085	pb->sample_cb = p->sample_cb;
11086	pb->lost_cb = p->lost_cb;
11087	pb->ctx = p->ctx;
11088
11089	pb->page_size = getpagesize();
11090	pb->mmap_size = pb->page_size * page_cnt;
11091	pb->map_fd = map_fd;
11092
11093	pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
11094	if (pb->epoll_fd < 0) {
11095		err = -errno;
11096		pr_warn("failed to create epoll instance: %s\n",
11097			libbpf_strerror_r(err, msg, sizeof(msg)));
11098		goto error;
11099	}
11100
11101	if (p->cpu_cnt > 0) {
11102		pb->cpu_cnt = p->cpu_cnt;
11103	} else {
11104		pb->cpu_cnt = libbpf_num_possible_cpus();
11105		if (pb->cpu_cnt < 0) {
11106			err = pb->cpu_cnt;
11107			goto error;
11108		}
11109		if (map.max_entries && map.max_entries < pb->cpu_cnt)
11110			pb->cpu_cnt = map.max_entries;
11111	}
11112
11113	pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
11114	if (!pb->events) {
11115		err = -ENOMEM;
11116		pr_warn("failed to allocate events: out of memory\n");
11117		goto error;
11118	}
11119	pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
11120	if (!pb->cpu_bufs) {
11121		err = -ENOMEM;
11122		pr_warn("failed to allocate buffers: out of memory\n");
11123		goto error;
11124	}
11125
11126	err = parse_cpu_mask_file(online_cpus_file, &online, &n);
11127	if (err) {
11128		pr_warn("failed to get online CPU mask: %d\n", err);
11129		goto error;
11130	}
11131
11132	for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
11133		struct perf_cpu_buf *cpu_buf;
11134		int cpu, map_key;
11135
11136		cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
11137		map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
11138
11139		/* in case user didn't explicitly requested particular CPUs to
11140		 * be attached to, skip offline/not present CPUs
11141		 */
11142		if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
11143			continue;
11144
11145		cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
11146		if (IS_ERR(cpu_buf)) {
11147			err = PTR_ERR(cpu_buf);
11148			goto error;
11149		}
11150
11151		pb->cpu_bufs[j] = cpu_buf;
11152
11153		err = bpf_map_update_elem(pb->map_fd, &map_key,
11154					  &cpu_buf->fd, 0);
11155		if (err) {
11156			err = -errno;
11157			pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
11158				cpu, map_key, cpu_buf->fd,
11159				libbpf_strerror_r(err, msg, sizeof(msg)));
11160			goto error;
11161		}
11162
11163		pb->events[j].events = EPOLLIN;
11164		pb->events[j].data.ptr = cpu_buf;
11165		if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
11166			      &pb->events[j]) < 0) {
11167			err = -errno;
11168			pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
11169				cpu, cpu_buf->fd,
11170				libbpf_strerror_r(err, msg, sizeof(msg)));
11171			goto error;
11172		}
11173		j++;
11174	}
11175	pb->cpu_cnt = j;
11176	free(online);
11177
11178	return pb;
11179
11180error:
11181	free(online);
11182	if (pb)
11183		perf_buffer__free(pb);
11184	return ERR_PTR(err);
11185}
11186
11187struct perf_sample_raw {
11188	struct perf_event_header header;
11189	uint32_t size;
11190	char data[];
11191};
11192
11193struct perf_sample_lost {
11194	struct perf_event_header header;
11195	uint64_t id;
11196	uint64_t lost;
11197	uint64_t sample_id;
11198};
11199
11200static enum bpf_perf_event_ret
11201perf_buffer__process_record(struct perf_event_header *e, void *ctx)
11202{
11203	struct perf_cpu_buf *cpu_buf = ctx;
11204	struct perf_buffer *pb = cpu_buf->pb;
11205	void *data = e;
11206
11207	/* user wants full control over parsing perf event */
11208	if (pb->event_cb)
11209		return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
11210
11211	switch (e->type) {
11212	case PERF_RECORD_SAMPLE: {
11213		struct perf_sample_raw *s = data;
11214
11215		if (pb->sample_cb)
11216			pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
11217		break;
11218	}
11219	case PERF_RECORD_LOST: {
11220		struct perf_sample_lost *s = data;
11221
11222		if (pb->lost_cb)
11223			pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
11224		break;
11225	}
11226	default:
11227		pr_warn("unknown perf sample type %d\n", e->type);
11228		return LIBBPF_PERF_EVENT_ERROR;
11229	}
11230	return LIBBPF_PERF_EVENT_CONT;
11231}
11232
11233static int perf_buffer__process_records(struct perf_buffer *pb,
11234					struct perf_cpu_buf *cpu_buf)
11235{
11236	enum bpf_perf_event_ret ret;
11237
11238	ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
11239					 pb->page_size, &cpu_buf->buf,
11240					 &cpu_buf->buf_size,
11241					 perf_buffer__process_record, cpu_buf);
11242	if (ret != LIBBPF_PERF_EVENT_CONT)
11243		return ret;
11244	return 0;
11245}
11246
11247int perf_buffer__epoll_fd(const struct perf_buffer *pb)
11248{
11249	return pb->epoll_fd;
11250}
11251
11252int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
11253{
11254	int i, cnt, err;
11255
11256	cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
11257	if (cnt < 0)
11258		return -errno;
11259
11260	for (i = 0; i < cnt; i++) {
11261		struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
11262
11263		err = perf_buffer__process_records(pb, cpu_buf);
11264		if (err) {
11265			pr_warn("error while processing records: %d\n", err);
11266			return libbpf_err(err);
11267		}
11268	}
11269	return cnt;
11270}
11271
11272/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
11273 * manager.
11274 */
11275size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
11276{
11277	return pb->cpu_cnt;
11278}
11279
11280/*
11281 * Return perf_event FD of a ring buffer in *buf_idx* slot of
11282 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
11283 * select()/poll()/epoll() Linux syscalls.
11284 */
11285int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
11286{
11287	struct perf_cpu_buf *cpu_buf;
11288
11289	if (buf_idx >= pb->cpu_cnt)
11290		return libbpf_err(-EINVAL);
11291
11292	cpu_buf = pb->cpu_bufs[buf_idx];
11293	if (!cpu_buf)
11294		return libbpf_err(-ENOENT);
11295
11296	return cpu_buf->fd;
11297}
11298
11299/*
11300 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
11301 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
11302 * consume, do nothing and return success.
11303 * Returns:
11304 *   - 0 on success;
11305 *   - <0 on failure.
11306 */
11307int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
11308{
11309	struct perf_cpu_buf *cpu_buf;
11310
11311	if (buf_idx >= pb->cpu_cnt)
11312		return libbpf_err(-EINVAL);
11313
11314	cpu_buf = pb->cpu_bufs[buf_idx];
11315	if (!cpu_buf)
11316		return libbpf_err(-ENOENT);
11317
11318	return perf_buffer__process_records(pb, cpu_buf);
11319}
11320
11321int perf_buffer__consume(struct perf_buffer *pb)
11322{
11323	int i, err;
11324
11325	for (i = 0; i < pb->cpu_cnt; i++) {
11326		struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
11327
11328		if (!cpu_buf)
11329			continue;
11330
11331		err = perf_buffer__process_records(pb, cpu_buf);
11332		if (err) {
11333			pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
11334			return libbpf_err(err);
11335		}
11336	}
11337	return 0;
11338}
11339
11340struct bpf_prog_info_array_desc {
11341	int	array_offset;	/* e.g. offset of jited_prog_insns */
11342	int	count_offset;	/* e.g. offset of jited_prog_len */
11343	int	size_offset;	/* > 0: offset of rec size,
11344				 * < 0: fix size of -size_offset
11345				 */
11346};
11347
11348static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
11349	[BPF_PROG_INFO_JITED_INSNS] = {
11350		offsetof(struct bpf_prog_info, jited_prog_insns),
11351		offsetof(struct bpf_prog_info, jited_prog_len),
11352		-1,
11353	},
11354	[BPF_PROG_INFO_XLATED_INSNS] = {
11355		offsetof(struct bpf_prog_info, xlated_prog_insns),
11356		offsetof(struct bpf_prog_info, xlated_prog_len),
11357		-1,
11358	},
11359	[BPF_PROG_INFO_MAP_IDS] = {
11360		offsetof(struct bpf_prog_info, map_ids),
11361		offsetof(struct bpf_prog_info, nr_map_ids),
11362		-(int)sizeof(__u32),
11363	},
11364	[BPF_PROG_INFO_JITED_KSYMS] = {
11365		offsetof(struct bpf_prog_info, jited_ksyms),
11366		offsetof(struct bpf_prog_info, nr_jited_ksyms),
11367		-(int)sizeof(__u64),
11368	},
11369	[BPF_PROG_INFO_JITED_FUNC_LENS] = {
11370		offsetof(struct bpf_prog_info, jited_func_lens),
11371		offsetof(struct bpf_prog_info, nr_jited_func_lens),
11372		-(int)sizeof(__u32),
11373	},
11374	[BPF_PROG_INFO_FUNC_INFO] = {
11375		offsetof(struct bpf_prog_info, func_info),
11376		offsetof(struct bpf_prog_info, nr_func_info),
11377		offsetof(struct bpf_prog_info, func_info_rec_size),
11378	},
11379	[BPF_PROG_INFO_LINE_INFO] = {
11380		offsetof(struct bpf_prog_info, line_info),
11381		offsetof(struct bpf_prog_info, nr_line_info),
11382		offsetof(struct bpf_prog_info, line_info_rec_size),
11383	},
11384	[BPF_PROG_INFO_JITED_LINE_INFO] = {
11385		offsetof(struct bpf_prog_info, jited_line_info),
11386		offsetof(struct bpf_prog_info, nr_jited_line_info),
11387		offsetof(struct bpf_prog_info, jited_line_info_rec_size),
11388	},
11389	[BPF_PROG_INFO_PROG_TAGS] = {
11390		offsetof(struct bpf_prog_info, prog_tags),
11391		offsetof(struct bpf_prog_info, nr_prog_tags),
11392		-(int)sizeof(__u8) * BPF_TAG_SIZE,
11393	},
11394
11395};
11396
11397static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
11398					   int offset)
11399{
11400	__u32 *array = (__u32 *)info;
11401
11402	if (offset >= 0)
11403		return array[offset / sizeof(__u32)];
11404	return -(int)offset;
11405}
11406
11407static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
11408					   int offset)
11409{
11410	__u64 *array = (__u64 *)info;
11411
11412	if (offset >= 0)
11413		return array[offset / sizeof(__u64)];
11414	return -(int)offset;
11415}
11416
11417static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
11418					 __u32 val)
11419{
11420	__u32 *array = (__u32 *)info;
11421
11422	if (offset >= 0)
11423		array[offset / sizeof(__u32)] = val;
11424}
11425
11426static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
11427					 __u64 val)
11428{
11429	__u64 *array = (__u64 *)info;
11430
11431	if (offset >= 0)
11432		array[offset / sizeof(__u64)] = val;
11433}
11434
11435struct bpf_prog_info_linear *
11436bpf_program__get_prog_info_linear(int fd, __u64 arrays)
11437{
11438	struct bpf_prog_info_linear *info_linear;
11439	struct bpf_prog_info info = {};
11440	__u32 info_len = sizeof(info);
11441	__u32 data_len = 0;
11442	int i, err;
11443	void *ptr;
11444
11445	if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
11446		return libbpf_err_ptr(-EINVAL);
11447
11448	/* step 1: get array dimensions */
11449	err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
11450	if (err) {
11451		pr_debug("can't get prog info: %s", strerror(errno));
11452		return libbpf_err_ptr(-EFAULT);
11453	}
11454
11455	/* step 2: calculate total size of all arrays */
11456	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11457		bool include_array = (arrays & (1UL << i)) > 0;
11458		struct bpf_prog_info_array_desc *desc;
11459		__u32 count, size;
11460
11461		desc = bpf_prog_info_array_desc + i;
11462
11463		/* kernel is too old to support this field */
11464		if (info_len < desc->array_offset + sizeof(__u32) ||
11465		    info_len < desc->count_offset + sizeof(__u32) ||
11466		    (desc->size_offset > 0 && info_len < desc->size_offset))
11467			include_array = false;
11468
11469		if (!include_array) {
11470			arrays &= ~(1UL << i);	/* clear the bit */
11471			continue;
11472		}
11473
11474		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11475		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11476
11477		data_len += count * size;
11478	}
11479
11480	/* step 3: allocate continuous memory */
11481	data_len = roundup(data_len, sizeof(__u64));
11482	info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
11483	if (!info_linear)
11484		return libbpf_err_ptr(-ENOMEM);
11485
11486	/* step 4: fill data to info_linear->info */
11487	info_linear->arrays = arrays;
11488	memset(&info_linear->info, 0, sizeof(info));
11489	ptr = info_linear->data;
11490
11491	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11492		struct bpf_prog_info_array_desc *desc;
11493		__u32 count, size;
11494
11495		if ((arrays & (1UL << i)) == 0)
11496			continue;
11497
11498		desc  = bpf_prog_info_array_desc + i;
11499		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11500		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11501		bpf_prog_info_set_offset_u32(&info_linear->info,
11502					     desc->count_offset, count);
11503		bpf_prog_info_set_offset_u32(&info_linear->info,
11504					     desc->size_offset, size);
11505		bpf_prog_info_set_offset_u64(&info_linear->info,
11506					     desc->array_offset,
11507					     ptr_to_u64(ptr));
11508		ptr += count * size;
11509	}
11510
11511	/* step 5: call syscall again to get required arrays */
11512	err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
11513	if (err) {
11514		pr_debug("can't get prog info: %s", strerror(errno));
11515		free(info_linear);
11516		return libbpf_err_ptr(-EFAULT);
11517	}
11518
11519	/* step 6: verify the data */
11520	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11521		struct bpf_prog_info_array_desc *desc;
11522		__u32 v1, v2;
11523
11524		if ((arrays & (1UL << i)) == 0)
11525			continue;
11526
11527		desc = bpf_prog_info_array_desc + i;
11528		v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11529		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
11530						   desc->count_offset);
11531		if (v1 != v2)
11532			pr_warn("%s: mismatch in element count\n", __func__);
11533
11534		v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11535		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
11536						   desc->size_offset);
11537		if (v1 != v2)
11538			pr_warn("%s: mismatch in rec size\n", __func__);
11539	}
11540
11541	/* step 7: update info_len and data_len */
11542	info_linear->info_len = sizeof(struct bpf_prog_info);
11543	info_linear->data_len = data_len;
11544
11545	return info_linear;
11546}
11547
11548void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
11549{
11550	int i;
11551
11552	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11553		struct bpf_prog_info_array_desc *desc;
11554		__u64 addr, offs;
11555
11556		if ((info_linear->arrays & (1UL << i)) == 0)
11557			continue;
11558
11559		desc = bpf_prog_info_array_desc + i;
11560		addr = bpf_prog_info_read_offset_u64(&info_linear->info,
11561						     desc->array_offset);
11562		offs = addr - ptr_to_u64(info_linear->data);
11563		bpf_prog_info_set_offset_u64(&info_linear->info,
11564					     desc->array_offset, offs);
11565	}
11566}
11567
11568void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
11569{
11570	int i;
11571
11572	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11573		struct bpf_prog_info_array_desc *desc;
11574		__u64 addr, offs;
11575
11576		if ((info_linear->arrays & (1UL << i)) == 0)
11577			continue;
11578
11579		desc = bpf_prog_info_array_desc + i;
11580		offs = bpf_prog_info_read_offset_u64(&info_linear->info,
11581						     desc->array_offset);
11582		addr = offs + ptr_to_u64(info_linear->data);
11583		bpf_prog_info_set_offset_u64(&info_linear->info,
11584					     desc->array_offset, addr);
11585	}
11586}
11587
11588int bpf_program__set_attach_target(struct bpf_program *prog,
11589				   int attach_prog_fd,
11590				   const char *attach_func_name)
11591{
11592	int btf_obj_fd = 0, btf_id = 0, err;
11593
11594	if (!prog || attach_prog_fd < 0 || !attach_func_name)
11595		return libbpf_err(-EINVAL);
11596
11597	if (prog->obj->loaded)
11598		return libbpf_err(-EINVAL);
11599
11600	if (attach_prog_fd) {
11601		btf_id = libbpf_find_prog_btf_id(attach_func_name,
11602						 attach_prog_fd);
11603		if (btf_id < 0)
11604			return libbpf_err(btf_id);
11605	} else {
11606		/* load btf_vmlinux, if not yet */
11607		err = bpf_object__load_vmlinux_btf(prog->obj, true);
11608		if (err)
11609			return libbpf_err(err);
11610		err = find_kernel_btf_id(prog->obj, attach_func_name,
11611					 prog->expected_attach_type,
11612					 &btf_obj_fd, &btf_id);
11613		if (err)
11614			return libbpf_err(err);
11615	}
11616
11617	prog->attach_btf_id = btf_id;
11618	prog->attach_btf_obj_fd = btf_obj_fd;
11619	prog->attach_prog_fd = attach_prog_fd;
11620	return 0;
11621}
11622
11623int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
11624{
11625	int err = 0, n, len, start, end = -1;
11626	bool *tmp;
11627
11628	*mask = NULL;
11629	*mask_sz = 0;
11630
11631	/* Each sub string separated by ',' has format \d+-\d+ or \d+ */
11632	while (*s) {
11633		if (*s == ',' || *s == '\n') {
11634			s++;
11635			continue;
11636		}
11637		n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
11638		if (n <= 0 || n > 2) {
11639			pr_warn("Failed to get CPU range %s: %d\n", s, n);
11640			err = -EINVAL;
11641			goto cleanup;
11642		} else if (n == 1) {
11643			end = start;
11644		}
11645		if (start < 0 || start > end) {
11646			pr_warn("Invalid CPU range [%d,%d] in %s\n",
11647				start, end, s);
11648			err = -EINVAL;
11649			goto cleanup;
11650		}
11651		tmp = realloc(*mask, end + 1);
11652		if (!tmp) {
11653			err = -ENOMEM;
11654			goto cleanup;
11655		}
11656		*mask = tmp;
11657		memset(tmp + *mask_sz, 0, start - *mask_sz);
11658		memset(tmp + start, 1, end - start + 1);
11659		*mask_sz = end + 1;
11660		s += len;
11661	}
11662	if (!*mask_sz) {
11663		pr_warn("Empty CPU range\n");
11664		return -EINVAL;
11665	}
11666	return 0;
11667cleanup:
11668	free(*mask);
11669	*mask = NULL;
11670	return err;
11671}
11672
11673int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
11674{
11675	int fd, err = 0, len;
11676	char buf[128];
11677
11678	fd = open(fcpu, O_RDONLY);
11679	if (fd < 0) {
11680		err = -errno;
11681		pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
11682		return err;
11683	}
11684	len = read(fd, buf, sizeof(buf));
11685	close(fd);
11686	if (len <= 0) {
11687		err = len ? -errno : -EINVAL;
11688		pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
11689		return err;
11690	}
11691	if (len >= sizeof(buf)) {
11692		pr_warn("CPU mask is too big in file %s\n", fcpu);
11693		return -E2BIG;
11694	}
11695	buf[len] = '\0';
11696
11697	return parse_cpu_mask_str(buf, mask, mask_sz);
11698}
11699
11700int libbpf_num_possible_cpus(void)
11701{
11702	static const char *fcpu = "/sys/devices/system/cpu/possible";
11703	static int cpus;
11704	int err, n, i, tmp_cpus;
11705	bool *mask;
11706
11707	tmp_cpus = READ_ONCE(cpus);
11708	if (tmp_cpus > 0)
11709		return tmp_cpus;
11710
11711	err = parse_cpu_mask_file(fcpu, &mask, &n);
11712	if (err)
11713		return libbpf_err(err);
11714
11715	tmp_cpus = 0;
11716	for (i = 0; i < n; i++) {
11717		if (mask[i])
11718			tmp_cpus++;
11719	}
11720	free(mask);
11721
11722	WRITE_ONCE(cpus, tmp_cpus);
11723	return tmp_cpus;
11724}
11725
11726int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
11727			      const struct bpf_object_open_opts *opts)
11728{
11729	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
11730		.object_name = s->name,
11731	);
11732	struct bpf_object *obj;
11733	int i, err;
11734
11735	/* Attempt to preserve opts->object_name, unless overriden by user
11736	 * explicitly. Overwriting object name for skeletons is discouraged,
11737	 * as it breaks global data maps, because they contain object name
11738	 * prefix as their own map name prefix. When skeleton is generated,
11739	 * bpftool is making an assumption that this name will stay the same.
11740	 */
11741	if (opts) {
11742		memcpy(&skel_opts, opts, sizeof(*opts));
11743		if (!opts->object_name)
11744			skel_opts.object_name = s->name;
11745	}
11746
11747	obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
11748	err = libbpf_get_error(obj);
11749	if (err) {
11750		pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
11751			s->name, err);
11752		return libbpf_err(err);
11753	}
11754
11755	*s->obj = obj;
11756
11757	for (i = 0; i < s->map_cnt; i++) {
11758		struct bpf_map **map = s->maps[i].map;
11759		const char *name = s->maps[i].name;
11760		void **mmaped = s->maps[i].mmaped;
11761
11762		*map = bpf_object__find_map_by_name(obj, name);
11763		if (!*map) {
11764			pr_warn("failed to find skeleton map '%s'\n", name);
11765			return libbpf_err(-ESRCH);
11766		}
11767
11768		/* externs shouldn't be pre-setup from user code */
11769		if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
11770			*mmaped = (*map)->mmaped;
11771	}
11772
11773	for (i = 0; i < s->prog_cnt; i++) {
11774		struct bpf_program **prog = s->progs[i].prog;
11775		const char *name = s->progs[i].name;
11776
11777		*prog = bpf_object__find_program_by_name(obj, name);
11778		if (!*prog) {
11779			pr_warn("failed to find skeleton program '%s'\n", name);
11780			return libbpf_err(-ESRCH);
11781		}
11782	}
11783
11784	return 0;
11785}
11786
11787int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
11788{
11789	int i, err;
11790
11791	err = bpf_object__load(*s->obj);
11792	if (err) {
11793		pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
11794		return libbpf_err(err);
11795	}
11796
11797	for (i = 0; i < s->map_cnt; i++) {
11798		struct bpf_map *map = *s->maps[i].map;
11799		size_t mmap_sz = bpf_map_mmap_sz(map);
11800		int prot, map_fd = bpf_map__fd(map);
11801		void **mmaped = s->maps[i].mmaped;
11802
11803		if (!mmaped)
11804			continue;
11805
11806		if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
11807			*mmaped = NULL;
11808			continue;
11809		}
11810
11811		if (map->def.map_flags & BPF_F_RDONLY_PROG)
11812			prot = PROT_READ;
11813		else
11814			prot = PROT_READ | PROT_WRITE;
11815
11816		/* Remap anonymous mmap()-ed "map initialization image" as
11817		 * a BPF map-backed mmap()-ed memory, but preserving the same
11818		 * memory address. This will cause kernel to change process'
11819		 * page table to point to a different piece of kernel memory,
11820		 * but from userspace point of view memory address (and its
11821		 * contents, being identical at this point) will stay the
11822		 * same. This mapping will be released by bpf_object__close()
11823		 * as per normal clean up procedure, so we don't need to worry
11824		 * about it from skeleton's clean up perspective.
11825		 */
11826		*mmaped = mmap(map->mmaped, mmap_sz, prot,
11827				MAP_SHARED | MAP_FIXED, map_fd, 0);
11828		if (*mmaped == MAP_FAILED) {
11829			err = -errno;
11830			*mmaped = NULL;
11831			pr_warn("failed to re-mmap() map '%s': %d\n",
11832				 bpf_map__name(map), err);
11833			return libbpf_err(err);
11834		}
11835	}
11836
11837	return 0;
11838}
11839
11840int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
11841{
11842	int i, err;
11843
11844	for (i = 0; i < s->prog_cnt; i++) {
11845		struct bpf_program *prog = *s->progs[i].prog;
11846		struct bpf_link **link = s->progs[i].link;
11847		const struct bpf_sec_def *sec_def;
11848
11849		if (!prog->load)
11850			continue;
11851
11852		sec_def = find_sec_def(prog->sec_name);
11853		if (!sec_def || !sec_def->attach_fn)
11854			continue;
11855
11856		*link = sec_def->attach_fn(sec_def, prog);
11857		err = libbpf_get_error(*link);
11858		if (err) {
11859			pr_warn("failed to auto-attach program '%s': %d\n",
11860				bpf_program__name(prog), err);
11861			return libbpf_err(err);
11862		}
11863	}
11864
11865	return 0;
11866}
11867
11868void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
11869{
11870	int i;
11871
11872	for (i = 0; i < s->prog_cnt; i++) {
11873		struct bpf_link **link = s->progs[i].link;
11874
11875		bpf_link__destroy(*link);
11876		*link = NULL;
11877	}
11878}
11879
11880void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
11881{
11882	if (s->progs)
11883		bpf_object__detach_skeleton(s);
11884	if (s->obj)
11885		bpf_object__close(*s->obj);
11886	free(s->maps);
11887	free(s->progs);
11888	free(s);
11889}