Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
   2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
   3#include <ctype.h>
   4#include <stdio.h>
   5#include <stdlib.h>
   6#include <string.h>
   7#include <libelf.h>
   8#include <gelf.h>
   9#include <unistd.h>
  10#include <linux/ptrace.h>
  11#include <linux/kernel.h>
  12
  13/* s8 will be marked as poison while it's a reg of riscv */
  14#if defined(__riscv)
  15#define rv_s8 s8
  16#endif
  17
  18#include "bpf.h"
  19#include "libbpf.h"
  20#include "libbpf_common.h"
  21#include "libbpf_internal.h"
  22#include "hashmap.h"
  23#include "str_error.h"
  24
  25/* libbpf's USDT support consists of BPF-side state/code and user-space
  26 * state/code working together in concert. BPF-side parts are defined in
  27 * usdt.bpf.h header library. User-space state is encapsulated by struct
  28 * usdt_manager and all the supporting code centered around usdt_manager.
  29 *
  30 * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map
  31 * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that
  32 * don't support BPF cookie (see below). These two maps are implicitly
  33 * embedded into user's end BPF object file when user's code included
  34 * usdt.bpf.h. This means that libbpf doesn't do anything special to create
  35 * these USDT support maps. They are created by normal libbpf logic of
  36 * instantiating BPF maps when opening and loading BPF object.
  37 *
  38 * As such, libbpf is basically unaware of the need to do anything
  39 * USDT-related until the very first call to bpf_program__attach_usdt(), which
  40 * can be called by user explicitly or happen automatically during skeleton
  41 * attach (or, equivalently, through generic bpf_program__attach() call). At
  42 * this point, libbpf will instantiate and initialize struct usdt_manager and
  43 * store it in bpf_object. USDT manager is per-BPF object construct, as each
  44 * independent BPF object might or might not have USDT programs, and thus all
  45 * the expected USDT-related state. There is no coordination between two
  46 * bpf_object in parts of USDT attachment, they are oblivious of each other's
  47 * existence and libbpf is just oblivious, dealing with bpf_object-specific
  48 * USDT state.
  49 *
  50 * Quick crash course on USDTs.
  51 *
  52 * From user-space application's point of view, USDT is essentially just
  53 * a slightly special function call that normally has zero overhead, unless it
  54 * is being traced by some external entity (e.g, BPF-based tool). Here's how
  55 * a typical application can trigger USDT probe:
  56 *
  57 * #include <sys/sdt.h>  // provided by systemtap-sdt-devel package
  58 * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h
  59 *
  60 * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y);
  61 *
  62 * USDT is identified by it's <provider-name>:<probe-name> pair of names. Each
  63 * individual USDT has a fixed number of arguments (3 in the above example)
  64 * and specifies values of each argument as if it was a function call.
  65 *
  66 * USDT call is actually not a function call, but is instead replaced by
  67 * a single NOP instruction (thus zero overhead, effectively). But in addition
  68 * to that, those USDT macros generate special SHT_NOTE ELF records in
  69 * .note.stapsdt ELF section. Here's an example USDT definition as emitted by
  70 * `readelf -n <binary>`:
  71 *
  72 *   stapsdt              0x00000089       NT_STAPSDT (SystemTap probe descriptors)
  73 *   Provider: test
  74 *   Name: usdt12
  75 *   Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e
  76 *   Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil
  77 *
  78 * In this case we have USDT test:usdt12 with 12 arguments.
  79 *
  80 * Location and base are offsets used to calculate absolute IP address of that
  81 * NOP instruction that kernel can replace with an interrupt instruction to
  82 * trigger instrumentation code (BPF program for all that we care about).
  83 *
  84 * Semaphore above is and optional feature. It records an address of a 2-byte
  85 * refcount variable (normally in '.probes' ELF section) used for signaling if
  86 * there is anything that is attached to USDT. This is useful for user
  87 * applications if, for example, they need to prepare some arguments that are
  88 * passed only to USDTs and preparation is expensive. By checking if USDT is
  89 * "activated", an application can avoid paying those costs unnecessarily.
  90 * Recent enough kernel has built-in support for automatically managing this
  91 * refcount, which libbpf expects and relies on. If USDT is defined without
  92 * associated semaphore, this value will be zero. See selftests for semaphore
  93 * examples.
  94 *
  95 * Arguments is the most interesting part. This USDT specification string is
  96 * providing information about all the USDT arguments and their locations. The
  97 * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and
  98 * whether the argument is signed or unsigned (negative size means signed).
  99 * The part after @ sign is assembly-like definition of argument location
 100 * (see [0] for more details). Technically, assembler can provide some pretty
 101 * advanced definitions, but libbpf is currently supporting three most common
 102 * cases:
 103 *   1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9);
 104 *   2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer
 105 *      whose value is in register %rdx";
 106 *   3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which
 107 *      specifies signed 32-bit integer stored at offset -1204 bytes from
 108 *      memory address stored in %rbp.
 109 *
 110 *   [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
 111 *
 112 * During attachment, libbpf parses all the relevant USDT specifications and
 113 * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side
 114 * code through spec map. This allows BPF applications to quickly fetch the
 115 * actual value at runtime using a simple BPF-side code.
 116 *
 117 * With basics out of the way, let's go over less immediately obvious aspects
 118 * of supporting USDTs.
 119 *
 120 * First, there is no special USDT BPF program type. It is actually just
 121 * a uprobe BPF program (which for kernel, at least currently, is just a kprobe
 122 * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference
 123 * that uprobe is usually attached at the function entry, while USDT will
 124 * normally will be somewhere inside the function. But it should always be
 125 * pointing to NOP instruction, which makes such uprobes the fastest uprobe
 126 * kind.
 127 *
 128 * Second, it's important to realize that such STAP_PROBEn(provider, name, ...)
 129 * macro invocations can end up being inlined many-many times, depending on
 130 * specifics of each individual user application. So single conceptual USDT
 131 * (identified by provider:name pair of identifiers) is, generally speaking,
 132 * multiple uprobe locations (USDT call sites) in different places in user
 133 * application. Further, again due to inlining, each USDT call site might end
 134 * up having the same argument #N be located in a different place. In one call
 135 * site it could be a constant, in another will end up in a register, and in
 136 * yet another could be some other register or even somewhere on the stack.
 137 *
 138 * As such, "attaching to USDT" means (in general case) attaching the same
 139 * uprobe BPF program to multiple target locations in user application, each
 140 * potentially having a completely different USDT spec associated with it.
 141 * To wire all this up together libbpf allocates a unique integer spec ID for
 142 * each unique USDT spec. Spec IDs are allocated as sequential small integers
 143 * so that they can be used as keys in array BPF map (for performance reasons).
 144 * Spec ID allocation and accounting is big part of what usdt_manager is
 145 * about. This state has to be maintained per-BPF object and coordinate
 146 * between different USDT attachments within the same BPF object.
 147 *
 148 * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out
 149 * as struct usdt_spec. Each invocation of BPF program at runtime needs to
 150 * know its associated spec ID. It gets it either through BPF cookie, which
 151 * libbpf sets to spec ID during attach time, or, if kernel is too old to
 152 * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such
 153 * case. The latter means that some modes of operation can't be supported
 154 * without BPF cookie. Such mode is attaching to shared library "generically",
 155 * without specifying target process. In such case, it's impossible to
 156 * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode
 157 * is not supported without BPF cookie support.
 158 *
 159 * Note that libbpf is using BPF cookie functionality for its own internal
 160 * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf
 161 * provides conceptually equivalent USDT cookie support. It's still u64
 162 * user-provided value that can be associated with USDT attachment. Note that
 163 * this will be the same value for all USDT call sites within the same single
 164 * *logical* USDT attachment. This makes sense because to user attaching to
 165 * USDT is a single BPF program triggered for singular USDT probe. The fact
 166 * that this is done at multiple actual locations is a mostly hidden
 167 * implementation details. This USDT cookie value can be fetched with
 168 * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h
 169 *
 170 * Lastly, while single USDT can have tons of USDT call sites, it doesn't
 171 * necessarily have that many different USDT specs. It very well might be
 172 * that 1000 USDT call sites only need 5 different USDT specs, because all the
 173 * arguments are typically contained in a small set of registers or stack
 174 * locations. As such, it's wasteful to allocate as many USDT spec IDs as
 175 * there are USDT call sites. So libbpf tries to be frugal and performs
 176 * on-the-fly deduplication during a single USDT attachment to only allocate
 177 * the minimal required amount of unique USDT specs (and thus spec IDs). This
 178 * is trivially achieved by using USDT spec string (Arguments string from USDT
 179 * note) as a lookup key in a hashmap. USDT spec string uniquely defines
 180 * everything about how to fetch USDT arguments, so two USDT call sites
 181 * sharing USDT spec string can safely share the same USDT spec and spec ID.
 182 * Note, this spec string deduplication is happening only during the same USDT
 183 * attachment, so each USDT spec shares the same USDT cookie value. This is
 184 * not generally true for other USDT attachments within the same BPF object,
 185 * as even if USDT spec string is the same, USDT cookie value can be
 186 * different. It was deemed excessive to try to deduplicate across independent
 187 * USDT attachments by taking into account USDT spec string *and* USDT cookie
 188 * value, which would complicated spec ID accounting significantly for little
 189 * gain.
 190 */
 191
 192#define USDT_BASE_SEC ".stapsdt.base"
 193#define USDT_SEMA_SEC ".probes"
 194#define USDT_NOTE_SEC  ".note.stapsdt"
 195#define USDT_NOTE_TYPE 3
 196#define USDT_NOTE_NAME "stapsdt"
 197
 198/* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */
 199enum usdt_arg_type {
 200	USDT_ARG_CONST,
 201	USDT_ARG_REG,
 202	USDT_ARG_REG_DEREF,
 203};
 204
 205/* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */
 206struct usdt_arg_spec {
 207	__u64 val_off;
 208	enum usdt_arg_type arg_type;
 209	short reg_off;
 210	bool arg_signed;
 211	char arg_bitshift;
 212};
 213
 214/* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */
 215#define USDT_MAX_ARG_CNT 12
 216
 217/* should match struct __bpf_usdt_spec from usdt.bpf.h */
 218struct usdt_spec {
 219	struct usdt_arg_spec args[USDT_MAX_ARG_CNT];
 220	__u64 usdt_cookie;
 221	short arg_cnt;
 222};
 223
 224struct usdt_note {
 225	const char *provider;
 226	const char *name;
 227	/* USDT args specification string, e.g.:
 228	 * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx"
 229	 */
 230	const char *args;
 231	long loc_addr;
 232	long base_addr;
 233	long sema_addr;
 234};
 235
 236struct usdt_target {
 237	long abs_ip;
 238	long rel_ip;
 239	long sema_off;
 240	struct usdt_spec spec;
 241	const char *spec_str;
 242};
 243
 244struct usdt_manager {
 245	struct bpf_map *specs_map;
 246	struct bpf_map *ip_to_spec_id_map;
 247
 248	int *free_spec_ids;
 249	size_t free_spec_cnt;
 250	size_t next_free_spec_id;
 251
 252	bool has_bpf_cookie;
 253	bool has_sema_refcnt;
 254	bool has_uprobe_multi;
 255};
 256
 257struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
 258{
 259	static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset";
 260	struct usdt_manager *man;
 261	struct bpf_map *specs_map, *ip_to_spec_id_map;
 262
 263	specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs");
 264	ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id");
 265	if (!specs_map || !ip_to_spec_id_map) {
 266		pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n");
 267		return ERR_PTR(-ESRCH);
 268	}
 269
 270	man = calloc(1, sizeof(*man));
 271	if (!man)
 272		return ERR_PTR(-ENOMEM);
 273
 274	man->specs_map = specs_map;
 275	man->ip_to_spec_id_map = ip_to_spec_id_map;
 276
 277	/* Detect if BPF cookie is supported for kprobes.
 278	 * We don't need IP-to-ID mapping if we can use BPF cookies.
 279	 * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
 280	 */
 281	man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE);
 282
 283	/* Detect kernel support for automatic refcounting of USDT semaphore.
 284	 * If this is not supported, USDTs with semaphores will not be supported.
 285	 * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
 286	 */
 287	man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0;
 288
 289	/*
 290	 * Detect kernel support for uprobe multi link to be used for attaching
 291	 * usdt probes.
 292	 */
 293	man->has_uprobe_multi = kernel_supports(obj, FEAT_UPROBE_MULTI_LINK);
 294	return man;
 295}
 296
 297void usdt_manager_free(struct usdt_manager *man)
 298{
 299	if (IS_ERR_OR_NULL(man))
 300		return;
 301
 302	free(man->free_spec_ids);
 303	free(man);
 304}
 305
 306static int sanity_check_usdt_elf(Elf *elf, const char *path)
 307{
 308	GElf_Ehdr ehdr;
 309	int endianness;
 310
 311	if (elf_kind(elf) != ELF_K_ELF) {
 312		pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path);
 313		return -EBADF;
 314	}
 315
 316	switch (gelf_getclass(elf)) {
 317	case ELFCLASS64:
 318		if (sizeof(void *) != 8) {
 319			pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path);
 320			return -EBADF;
 321		}
 322		break;
 323	case ELFCLASS32:
 324		if (sizeof(void *) != 4) {
 325			pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path);
 326			return -EBADF;
 327		}
 328		break;
 329	default:
 330		pr_warn("usdt: unsupported ELF class for '%s'\n", path);
 331		return -EBADF;
 332	}
 333
 334	if (!gelf_getehdr(elf, &ehdr))
 335		return -EINVAL;
 336
 337	if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) {
 338		pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n",
 339			path, ehdr.e_type);
 340		return -EBADF;
 341	}
 342
 343#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 344	endianness = ELFDATA2LSB;
 345#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
 346	endianness = ELFDATA2MSB;
 347#else
 348# error "Unrecognized __BYTE_ORDER__"
 349#endif
 350	if (endianness != ehdr.e_ident[EI_DATA]) {
 351		pr_warn("usdt: ELF endianness mismatch for '%s'\n", path);
 352		return -EBADF;
 353	}
 354
 355	return 0;
 356}
 357
 358static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn)
 359{
 360	Elf_Scn *sec = NULL;
 361	size_t shstrndx;
 362
 363	if (elf_getshdrstrndx(elf, &shstrndx))
 364		return -EINVAL;
 365
 366	/* check if ELF is corrupted and avoid calling elf_strptr if yes */
 367	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL))
 368		return -EINVAL;
 369
 370	while ((sec = elf_nextscn(elf, sec)) != NULL) {
 371		char *name;
 372
 373		if (!gelf_getshdr(sec, shdr))
 374			return -EINVAL;
 375
 376		name = elf_strptr(elf, shstrndx, shdr->sh_name);
 377		if (name && strcmp(sec_name, name) == 0) {
 378			*scn = sec;
 379			return 0;
 380		}
 381	}
 382
 383	return -ENOENT;
 384}
 385
 386struct elf_seg {
 387	long start;
 388	long end;
 389	long offset;
 390	bool is_exec;
 391};
 392
 393static int cmp_elf_segs(const void *_a, const void *_b)
 394{
 395	const struct elf_seg *a = _a;
 396	const struct elf_seg *b = _b;
 397
 398	return a->start < b->start ? -1 : 1;
 399}
 400
 401static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt)
 402{
 403	GElf_Phdr phdr;
 404	size_t n;
 405	int i, err;
 406	struct elf_seg *seg;
 407	void *tmp;
 408
 409	*seg_cnt = 0;
 410
 411	if (elf_getphdrnum(elf, &n)) {
 412		err = -errno;
 413		return err;
 414	}
 415
 416	for (i = 0; i < n; i++) {
 417		if (!gelf_getphdr(elf, i, &phdr)) {
 418			err = -errno;
 419			return err;
 420		}
 421
 422		pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n",
 423			 i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset,
 424			 (long)phdr.p_type, (long)phdr.p_flags);
 425		if (phdr.p_type != PT_LOAD)
 426			continue;
 427
 428		tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
 429		if (!tmp)
 430			return -ENOMEM;
 431
 432		*segs = tmp;
 433		seg = *segs + *seg_cnt;
 434		(*seg_cnt)++;
 435
 436		seg->start = phdr.p_vaddr;
 437		seg->end = phdr.p_vaddr + phdr.p_memsz;
 438		seg->offset = phdr.p_offset;
 439		seg->is_exec = phdr.p_flags & PF_X;
 440	}
 441
 442	if (*seg_cnt == 0) {
 443		pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path);
 444		return -ESRCH;
 445	}
 446
 447	qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
 448	return 0;
 449}
 450
 451static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
 452{
 453	char path[PATH_MAX], line[PATH_MAX], mode[16];
 454	size_t seg_start, seg_end, seg_off;
 455	struct elf_seg *seg;
 456	int tmp_pid, i, err;
 457	FILE *f;
 458
 459	*seg_cnt = 0;
 460
 461	/* Handle containerized binaries only accessible from
 462	 * /proc/<pid>/root/<path>. They will be reported as just /<path> in
 463	 * /proc/<pid>/maps.
 464	 */
 465	if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid)
 466		goto proceed;
 467
 468	if (!realpath(lib_path, path)) {
 469		pr_warn("usdt: failed to get absolute path of '%s' (err %s), using path as is...\n",
 470			lib_path, errstr(-errno));
 471		libbpf_strlcpy(path, lib_path, sizeof(path));
 472	}
 473
 474proceed:
 475	sprintf(line, "/proc/%d/maps", pid);
 476	f = fopen(line, "re");
 477	if (!f) {
 478		err = -errno;
 479		pr_warn("usdt: failed to open '%s' to get base addr of '%s': %s\n",
 480			line, lib_path, errstr(err));
 481		return err;
 482	}
 483
 484	/* We need to handle lines with no path at the end:
 485	 *
 486	 * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613      /usr/lib64/libc-2.17.so
 487	 * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0
 488	 * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598    /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so
 489	 */
 490	while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n",
 491		      &seg_start, &seg_end, mode, &seg_off, line) == 5) {
 492		void *tmp;
 493
 494		/* to handle no path case (see above) we need to capture line
 495		 * without skipping any whitespaces. So we need to strip
 496		 * leading whitespaces manually here
 497		 */
 498		i = 0;
 499		while (isblank(line[i]))
 500			i++;
 501		if (strcmp(line + i, path) != 0)
 502			continue;
 503
 504		pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n",
 505			 path, seg_start, seg_end, mode, seg_off);
 506
 507		/* ignore non-executable sections for shared libs */
 508		if (mode[2] != 'x')
 509			continue;
 510
 511		tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
 512		if (!tmp) {
 513			err = -ENOMEM;
 514			goto err_out;
 515		}
 516
 517		*segs = tmp;
 518		seg = *segs + *seg_cnt;
 519		*seg_cnt += 1;
 520
 521		seg->start = seg_start;
 522		seg->end = seg_end;
 523		seg->offset = seg_off;
 524		seg->is_exec = true;
 525	}
 526
 527	if (*seg_cnt == 0) {
 528		pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n",
 529			lib_path, path, pid);
 530		err = -ESRCH;
 531		goto err_out;
 532	}
 533
 534	qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
 535	err = 0;
 536err_out:
 537	fclose(f);
 538	return err;
 539}
 540
 541static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr)
 542{
 543	struct elf_seg *seg;
 544	int i;
 545
 546	/* for ELF binaries (both executables and shared libraries), we are
 547	 * given virtual address (absolute for executables, relative for
 548	 * libraries) which should match address range of [seg_start, seg_end)
 549	 */
 550	for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
 551		if (seg->start <= virtaddr && virtaddr < seg->end)
 552			return seg;
 553	}
 554	return NULL;
 555}
 556
 557static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset)
 558{
 559	struct elf_seg *seg;
 560	int i;
 561
 562	/* for VMA segments from /proc/<pid>/maps file, provided "address" is
 563	 * actually a file offset, so should be fall within logical
 564	 * offset-based range of [offset_start, offset_end)
 565	 */
 566	for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
 567		if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start))
 568			return seg;
 569	}
 570	return NULL;
 571}
 572
 573static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
 574			   const char *data, size_t name_off, size_t desc_off,
 575			   struct usdt_note *usdt_note);
 576
 577static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
 578
 579static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid,
 580				const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
 581				struct usdt_target **out_targets, size_t *out_target_cnt)
 582{
 583	size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0;
 584	struct elf_seg *segs = NULL, *vma_segs = NULL;
 585	struct usdt_target *targets = NULL, *target;
 586	long base_addr = 0;
 587	Elf_Scn *notes_scn, *base_scn;
 588	GElf_Shdr base_shdr, notes_shdr;
 589	GElf_Ehdr ehdr;
 590	GElf_Nhdr nhdr;
 591	Elf_Data *data;
 592	int err;
 593
 594	*out_targets = NULL;
 595	*out_target_cnt = 0;
 596
 597	err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, &notes_shdr, &notes_scn);
 598	if (err) {
 599		pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path);
 600		return err;
 601	}
 602
 603	if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) {
 604		pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path);
 605		return -EINVAL;
 606	}
 607
 608	err = parse_elf_segs(elf, path, &segs, &seg_cnt);
 609	if (err) {
 610		pr_warn("usdt: failed to process ELF program segments for '%s': %s\n",
 611			path, errstr(err));
 612		goto err_out;
 613	}
 614
 615	/* .stapsdt.base ELF section is optional, but is used for prelink
 616	 * offset compensation (see a big comment further below)
 617	 */
 618	if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0)
 619		base_addr = base_shdr.sh_addr;
 620
 621	data = elf_getdata(notes_scn, 0);
 622	off = 0;
 623	while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) {
 624		long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0;
 625		struct usdt_note note;
 626		struct elf_seg *seg = NULL;
 627		void *tmp;
 628
 629		err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, &note);
 630		if (err)
 631			goto err_out;
 632
 633		if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0)
 634			continue;
 635
 636		/* We need to compensate "prelink effect". See [0] for details,
 637		 * relevant parts quoted here:
 638		 *
 639		 * Each SDT probe also expands into a non-allocated ELF note. You can
 640		 * find this by looking at SHT_NOTE sections and decoding the format;
 641		 * see below for details. Because the note is non-allocated, it means
 642		 * there is no runtime cost, and also preserved in both stripped files
 643		 * and .debug files.
 644		 *
 645		 * However, this means that prelink won't adjust the note's contents
 646		 * for address offsets. Instead, this is done via the .stapsdt.base
 647		 * section. This is a special section that is added to the text. We
 648		 * will only ever have one of these sections in a final link and it
 649		 * will only ever be one byte long. Nothing about this section itself
 650		 * matters, we just use it as a marker to detect prelink address
 651		 * adjustments.
 652		 *
 653		 * Each probe note records the link-time address of the .stapsdt.base
 654		 * section alongside the probe PC address. The decoder compares the
 655		 * base address stored in the note with the .stapsdt.base section's
 656		 * sh_addr. Initially these are the same, but the section header will
 657		 * be adjusted by prelink. So the decoder applies the difference to
 658		 * the probe PC address to get the correct prelinked PC address; the
 659		 * same adjustment is applied to the semaphore address, if any.
 660		 *
 661		 *   [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
 662		 */
 663		usdt_abs_ip = note.loc_addr;
 664		if (base_addr && note.base_addr)
 665			usdt_abs_ip += base_addr - note.base_addr;
 666
 667		/* When attaching uprobes (which is what USDTs basically are)
 668		 * kernel expects file offset to be specified, not a relative
 669		 * virtual address, so we need to translate virtual address to
 670		 * file offset, for both ET_EXEC and ET_DYN binaries.
 671		 */
 672		seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip);
 673		if (!seg) {
 674			err = -ESRCH;
 675			pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
 676				usdt_provider, usdt_name, path, usdt_abs_ip);
 677			goto err_out;
 678		}
 679		if (!seg->is_exec) {
 680			err = -ESRCH;
 681			pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n",
 682				path, seg->start, seg->end, usdt_provider, usdt_name,
 683				usdt_abs_ip);
 684			goto err_out;
 685		}
 686		/* translate from virtual address to file offset */
 687		usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset;
 688
 689		if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) {
 690			/* If we don't have BPF cookie support but need to
 691			 * attach to a shared library, we'll need to know and
 692			 * record absolute addresses of attach points due to
 693			 * the need to lookup USDT spec by absolute IP of
 694			 * triggered uprobe. Doing this resolution is only
 695			 * possible when we have a specific PID of the process
 696			 * that's using specified shared library. BPF cookie
 697			 * removes the absolute address limitation as we don't
 698			 * need to do this lookup (we just use BPF cookie as
 699			 * an index of USDT spec), so for newer kernels with
 700			 * BPF cookie support libbpf supports USDT attachment
 701			 * to shared libraries with no PID filter.
 702			 */
 703			if (pid < 0) {
 704				pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n");
 705				err = -ENOTSUP;
 706				goto err_out;
 707			}
 708
 709			/* vma_segs are lazily initialized only if necessary */
 710			if (vma_seg_cnt == 0) {
 711				err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
 712				if (err) {
 713					pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %s\n",
 714						pid, path, errstr(err));
 715					goto err_out;
 716				}
 717			}
 718
 719			seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip);
 720			if (!seg) {
 721				err = -ESRCH;
 722				pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n",
 723					usdt_provider, usdt_name, path, usdt_rel_ip);
 724				goto err_out;
 725			}
 726
 727			usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip;
 728		}
 729
 730		pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n",
 731			 usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path,
 732			 note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args,
 733			 seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0);
 734
 735		/* Adjust semaphore address to be a file offset */
 736		if (note.sema_addr) {
 737			if (!man->has_sema_refcnt) {
 738				pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n",
 739					usdt_provider, usdt_name, path);
 740				err = -ENOTSUP;
 741				goto err_out;
 742			}
 743
 744			seg = find_elf_seg(segs, seg_cnt, note.sema_addr);
 745			if (!seg) {
 746				err = -ESRCH;
 747				pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n",
 748					usdt_provider, usdt_name, path, note.sema_addr);
 749				goto err_out;
 750			}
 751			if (seg->is_exec) {
 752				err = -ESRCH;
 753				pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n",
 754					path, seg->start, seg->end, usdt_provider, usdt_name,
 755					note.sema_addr);
 756				goto err_out;
 757			}
 758
 759			usdt_sema_off = note.sema_addr - seg->start + seg->offset;
 760
 761			pr_debug("usdt: sema  for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n",
 762				 usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ",
 763				 path, note.sema_addr, note.base_addr, usdt_sema_off,
 764				 seg->start, seg->end, seg->offset);
 765		}
 766
 767		/* Record adjusted addresses and offsets and parse USDT spec */
 768		tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets));
 769		if (!tmp) {
 770			err = -ENOMEM;
 771			goto err_out;
 772		}
 773		targets = tmp;
 774
 775		target = &targets[target_cnt];
 776		memset(target, 0, sizeof(*target));
 777
 778		target->abs_ip = usdt_abs_ip;
 779		target->rel_ip = usdt_rel_ip;
 780		target->sema_off = usdt_sema_off;
 781
 782		/* notes.args references strings from ELF itself, so they can
 783		 * be referenced safely until elf_end() call
 784		 */
 785		target->spec_str = note.args;
 786
 787		err = parse_usdt_spec(&target->spec, &note, usdt_cookie);
 788		if (err)
 789			goto err_out;
 790
 791		target_cnt++;
 792	}
 793
 794	*out_targets = targets;
 795	*out_target_cnt = target_cnt;
 796	err = target_cnt;
 797
 798err_out:
 799	free(segs);
 800	free(vma_segs);
 801	if (err < 0)
 802		free(targets);
 803	return err;
 804}
 805
 806struct bpf_link_usdt {
 807	struct bpf_link link;
 808
 809	struct usdt_manager *usdt_man;
 810
 811	size_t spec_cnt;
 812	int *spec_ids;
 813
 814	size_t uprobe_cnt;
 815	struct {
 816		long abs_ip;
 817		struct bpf_link *link;
 818	} *uprobes;
 819
 820	struct bpf_link *multi_link;
 821};
 822
 823static int bpf_link_usdt_detach(struct bpf_link *link)
 824{
 825	struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
 826	struct usdt_manager *man = usdt_link->usdt_man;
 827	int i;
 828
 829	bpf_link__destroy(usdt_link->multi_link);
 830
 831	/* When having multi_link, uprobe_cnt is 0 */
 832	for (i = 0; i < usdt_link->uprobe_cnt; i++) {
 833		/* detach underlying uprobe link */
 834		bpf_link__destroy(usdt_link->uprobes[i].link);
 835		/* there is no need to update specs map because it will be
 836		 * unconditionally overwritten on subsequent USDT attaches,
 837		 * but if BPF cookies are not used we need to remove entry
 838		 * from ip_to_spec_id map, otherwise we'll run into false
 839		 * conflicting IP errors
 840		 */
 841		if (!man->has_bpf_cookie) {
 842			/* not much we can do about errors here */
 843			(void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map),
 844						  &usdt_link->uprobes[i].abs_ip);
 845		}
 846	}
 847
 848	/* try to return the list of previously used spec IDs to usdt_manager
 849	 * for future reuse for subsequent USDT attaches
 850	 */
 851	if (!man->free_spec_ids) {
 852		/* if there were no free spec IDs yet, just transfer our IDs */
 853		man->free_spec_ids = usdt_link->spec_ids;
 854		man->free_spec_cnt = usdt_link->spec_cnt;
 855		usdt_link->spec_ids = NULL;
 856	} else {
 857		/* otherwise concat IDs */
 858		size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt;
 859		int *new_free_ids;
 860
 861		new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt,
 862						   sizeof(*new_free_ids));
 863		/* If we couldn't resize free_spec_ids, we'll just leak
 864		 * a bunch of free IDs; this is very unlikely to happen and if
 865		 * system is so exhausted on memory, it's the least of user's
 866		 * concerns, probably.
 867		 * So just do our best here to return those IDs to usdt_manager.
 868		 * Another edge case when we can legitimately get NULL is when
 869		 * new_cnt is zero, which can happen in some edge cases, so we
 870		 * need to be careful about that.
 871		 */
 872		if (new_free_ids || new_cnt == 0) {
 873			memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
 874			       usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
 875			man->free_spec_ids = new_free_ids;
 876			man->free_spec_cnt = new_cnt;
 877		}
 878	}
 879
 880	return 0;
 881}
 882
 883static void bpf_link_usdt_dealloc(struct bpf_link *link)
 884{
 885	struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
 886
 887	free(usdt_link->spec_ids);
 888	free(usdt_link->uprobes);
 889	free(usdt_link);
 890}
 891
 892static size_t specs_hash_fn(long key, void *ctx)
 893{
 894	return str_hash((char *)key);
 895}
 896
 897static bool specs_equal_fn(long key1, long key2, void *ctx)
 898{
 899	return strcmp((char *)key1, (char *)key2) == 0;
 900}
 901
 902static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash,
 903			    struct bpf_link_usdt *link, struct usdt_target *target,
 904			    int *spec_id, bool *is_new)
 905{
 906	long tmp;
 907	void *new_ids;
 908	int err;
 909
 910	/* check if we already allocated spec ID for this spec string */
 911	if (hashmap__find(specs_hash, target->spec_str, &tmp)) {
 912		*spec_id = tmp;
 913		*is_new = false;
 914		return 0;
 915	}
 916
 917	/* otherwise it's a new ID that needs to be set up in specs map and
 918	 * returned back to usdt_manager when USDT link is detached
 919	 */
 920	new_ids = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids));
 921	if (!new_ids)
 922		return -ENOMEM;
 923	link->spec_ids = new_ids;
 924
 925	/* get next free spec ID, giving preference to free list, if not empty */
 926	if (man->free_spec_cnt) {
 927		*spec_id = man->free_spec_ids[man->free_spec_cnt - 1];
 928
 929		/* cache spec ID for current spec string for future lookups */
 930		err = hashmap__add(specs_hash, target->spec_str, *spec_id);
 931		if (err)
 932			 return err;
 933
 934		man->free_spec_cnt--;
 935	} else {
 936		/* don't allocate spec ID bigger than what fits in specs map */
 937		if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map))
 938			return -E2BIG;
 939
 940		*spec_id = man->next_free_spec_id;
 941
 942		/* cache spec ID for current spec string for future lookups */
 943		err = hashmap__add(specs_hash, target->spec_str, *spec_id);
 944		if (err)
 945			 return err;
 946
 947		man->next_free_spec_id++;
 948	}
 949
 950	/* remember new spec ID in the link for later return back to free list on detach */
 951	link->spec_ids[link->spec_cnt] = *spec_id;
 952	link->spec_cnt++;
 953	*is_new = true;
 954	return 0;
 955}
 956
 957struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog,
 958					  pid_t pid, const char *path,
 959					  const char *usdt_provider, const char *usdt_name,
 960					  __u64 usdt_cookie)
 961{
 962	unsigned long *offsets = NULL, *ref_ctr_offsets = NULL;
 963	int i, err, spec_map_fd, ip_map_fd;
 964	LIBBPF_OPTS(bpf_uprobe_opts, opts);
 965	struct hashmap *specs_hash = NULL;
 966	struct bpf_link_usdt *link = NULL;
 967	struct usdt_target *targets = NULL;
 968	__u64 *cookies = NULL;
 969	struct elf_fd elf_fd;
 970	size_t target_cnt;
 971
 972	spec_map_fd = bpf_map__fd(man->specs_map);
 973	ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
 974
 975	err = elf_open(path, &elf_fd);
 976	if (err)
 977		return libbpf_err_ptr(err);
 978
 979	err = sanity_check_usdt_elf(elf_fd.elf, path);
 980	if (err)
 981		goto err_out;
 982
 983	/* normalize PID filter */
 984	if (pid < 0)
 985		pid = -1;
 986	else if (pid == 0)
 987		pid = getpid();
 988
 989	/* discover USDT in given binary, optionally limiting
 990	 * activations to a given PID, if pid > 0
 991	 */
 992	err = collect_usdt_targets(man, elf_fd.elf, path, pid, usdt_provider, usdt_name,
 993				   usdt_cookie, &targets, &target_cnt);
 994	if (err <= 0) {
 995		err = (err == 0) ? -ENOENT : err;
 996		goto err_out;
 997	}
 998
 999	specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL);
1000	if (IS_ERR(specs_hash)) {
1001		err = PTR_ERR(specs_hash);
1002		goto err_out;
1003	}
1004
1005	link = calloc(1, sizeof(*link));
1006	if (!link) {
1007		err = -ENOMEM;
1008		goto err_out;
1009	}
1010
1011	link->usdt_man = man;
1012	link->link.detach = &bpf_link_usdt_detach;
1013	link->link.dealloc = &bpf_link_usdt_dealloc;
1014
1015	if (man->has_uprobe_multi) {
1016		offsets = calloc(target_cnt, sizeof(*offsets));
1017		cookies = calloc(target_cnt, sizeof(*cookies));
1018		ref_ctr_offsets = calloc(target_cnt, sizeof(*ref_ctr_offsets));
1019
1020		if (!offsets || !ref_ctr_offsets || !cookies) {
1021			err = -ENOMEM;
1022			goto err_out;
1023		}
1024	} else {
1025		link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
1026		if (!link->uprobes) {
1027			err = -ENOMEM;
1028			goto err_out;
1029		}
1030	}
1031
1032	for (i = 0; i < target_cnt; i++) {
1033		struct usdt_target *target = &targets[i];
1034		struct bpf_link *uprobe_link;
1035		bool is_new;
1036		int spec_id;
1037
1038		/* Spec ID can be either reused or newly allocated. If it is
1039		 * newly allocated, we'll need to fill out spec map, otherwise
1040		 * entire spec should be valid and can be just used by a new
1041		 * uprobe. We reuse spec when USDT arg spec is identical. We
1042		 * also never share specs between two different USDT
1043		 * attachments ("links"), so all the reused specs already
1044		 * share USDT cookie value implicitly.
1045		 */
1046		err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new);
1047		if (err)
1048			goto err_out;
1049
1050		if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
1051			err = -errno;
1052			pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %s\n",
1053				spec_id, usdt_provider, usdt_name, path, errstr(err));
1054			goto err_out;
1055		}
1056		if (!man->has_bpf_cookie &&
1057		    bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) {
1058			err = -errno;
1059			if (err == -EEXIST) {
1060				pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
1061				        spec_id, usdt_provider, usdt_name, path);
1062			} else {
1063				pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %s\n",
1064					target->abs_ip, spec_id, usdt_provider, usdt_name,
1065					path, errstr(err));
1066			}
1067			goto err_out;
1068		}
1069
1070		if (man->has_uprobe_multi) {
1071			offsets[i] = target->rel_ip;
1072			ref_ctr_offsets[i] = target->sema_off;
1073			cookies[i] = spec_id;
1074		} else {
1075			opts.ref_ctr_offset = target->sema_off;
1076			opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
1077			uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
1078								      target->rel_ip, &opts);
1079			err = libbpf_get_error(uprobe_link);
1080			if (err) {
1081				pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %s\n",
1082					i, usdt_provider, usdt_name, path, errstr(err));
1083				goto err_out;
1084			}
1085
1086			link->uprobes[i].link = uprobe_link;
1087			link->uprobes[i].abs_ip = target->abs_ip;
1088			link->uprobe_cnt++;
1089		}
1090	}
1091
1092	if (man->has_uprobe_multi) {
1093		LIBBPF_OPTS(bpf_uprobe_multi_opts, opts_multi,
1094			.ref_ctr_offsets = ref_ctr_offsets,
1095			.offsets = offsets,
1096			.cookies = cookies,
1097			.cnt = target_cnt,
1098		);
1099
1100		link->multi_link = bpf_program__attach_uprobe_multi(prog, pid, path,
1101								    NULL, &opts_multi);
1102		if (!link->multi_link) {
1103			err = -errno;
1104			pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %s\n",
1105				usdt_provider, usdt_name, path, errstr(err));
1106			goto err_out;
1107		}
1108
1109		free(offsets);
1110		free(ref_ctr_offsets);
1111		free(cookies);
1112	}
1113
1114	free(targets);
1115	hashmap__free(specs_hash);
1116	elf_close(&elf_fd);
1117	return &link->link;
1118
1119err_out:
1120	free(offsets);
1121	free(ref_ctr_offsets);
1122	free(cookies);
1123
1124	if (link)
1125		bpf_link__destroy(&link->link);
1126	free(targets);
1127	hashmap__free(specs_hash);
1128	elf_close(&elf_fd);
1129	return libbpf_err_ptr(err);
1130}
1131
1132/* Parse out USDT ELF note from '.note.stapsdt' section.
1133 * Logic inspired by perf's code.
1134 */
1135static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
1136			   const char *data, size_t name_off, size_t desc_off,
1137			   struct usdt_note *note)
1138{
1139	const char *provider, *name, *args;
1140	long addrs[3];
1141	size_t len;
1142
1143	/* sanity check USDT note name and type first */
1144	if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0)
1145		return -EINVAL;
1146	if (nhdr->n_type != USDT_NOTE_TYPE)
1147		return -EINVAL;
1148
1149	/* sanity check USDT note contents ("description" in ELF terminology) */
1150	len = nhdr->n_descsz;
1151	data = data + desc_off;
1152
1153	/* +3 is the very minimum required to store three empty strings */
1154	if (len < sizeof(addrs) + 3)
1155		return -EINVAL;
1156
1157	/* get location, base, and semaphore addrs */
1158	memcpy(&addrs, data, sizeof(addrs));
1159
1160	/* parse string fields: provider, name, args */
1161	provider = data + sizeof(addrs);
1162
1163	name = (const char *)memchr(provider, '\0', data + len - provider);
1164	if (!name) /* non-zero-terminated provider */
1165		return -EINVAL;
1166	name++;
1167	if (name >= data + len || *name == '\0') /* missing or empty name */
1168		return -EINVAL;
1169
1170	args = memchr(name, '\0', data + len - name);
1171	if (!args) /* non-zero-terminated name */
1172		return -EINVAL;
1173	++args;
1174	if (args >= data + len) /* missing arguments spec */
1175		return -EINVAL;
1176
1177	note->provider = provider;
1178	note->name = name;
1179	if (*args == '\0' || *args == ':')
1180		note->args = "";
1181	else
1182		note->args = args;
1183	note->loc_addr = addrs[0];
1184	note->base_addr = addrs[1];
1185	note->sema_addr = addrs[2];
1186
1187	return 0;
1188}
1189
1190static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz);
1191
1192static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie)
1193{
1194	struct usdt_arg_spec *arg;
1195	const char *s;
1196	int arg_sz, len;
1197
1198	spec->usdt_cookie = usdt_cookie;
1199	spec->arg_cnt = 0;
1200
1201	s = note->args;
1202	while (s[0]) {
1203		if (spec->arg_cnt >= USDT_MAX_ARG_CNT) {
1204			pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n",
1205				USDT_MAX_ARG_CNT, note->provider, note->name, note->args);
1206			return -E2BIG;
1207		}
1208
1209		arg = &spec->args[spec->arg_cnt];
1210		len = parse_usdt_arg(s, spec->arg_cnt, arg, &arg_sz);
1211		if (len < 0)
1212			return len;
1213
1214		arg->arg_signed = arg_sz < 0;
1215		if (arg_sz < 0)
1216			arg_sz = -arg_sz;
1217
1218		switch (arg_sz) {
1219		case 1: case 2: case 4: case 8:
1220			arg->arg_bitshift = 64 - arg_sz * 8;
1221			break;
1222		default:
1223			pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
1224				spec->arg_cnt, s, arg_sz);
1225			return -EINVAL;
1226		}
1227
1228		s += len;
1229		spec->arg_cnt++;
1230	}
1231
1232	return 0;
1233}
1234
1235/* Architecture-specific logic for parsing USDT argument location specs */
1236
1237#if defined(__x86_64__) || defined(__i386__)
1238
1239static int calc_pt_regs_off(const char *reg_name)
1240{
1241	static struct {
1242		const char *names[4];
1243		size_t pt_regs_off;
1244	} reg_map[] = {
1245#ifdef __x86_64__
1246#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64)
1247#else
1248#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32)
1249#endif
1250		{ {"rip", "eip", "", ""}, reg_off(rip, eip) },
1251		{ {"rax", "eax", "ax", "al"}, reg_off(rax, eax) },
1252		{ {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) },
1253		{ {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) },
1254		{ {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) },
1255		{ {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) },
1256		{ {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) },
1257		{ {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) },
1258		{ {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) },
1259#undef reg_off
1260#ifdef __x86_64__
1261		{ {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) },
1262		{ {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) },
1263		{ {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) },
1264		{ {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) },
1265		{ {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) },
1266		{ {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) },
1267		{ {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) },
1268		{ {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) },
1269#endif
1270	};
1271	int i, j;
1272
1273	for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1274		for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) {
1275			if (strcmp(reg_name, reg_map[i].names[j]) == 0)
1276				return reg_map[i].pt_regs_off;
1277		}
1278	}
1279
1280	pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1281	return -ENOENT;
1282}
1283
1284static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1285{
1286	char reg_name[16];
1287	int len, reg_off;
1288	long off;
1289
1290	if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", arg_sz, &off, reg_name, &len) == 3) {
1291		/* Memory dereference case, e.g., -4@-20(%rbp) */
1292		arg->arg_type = USDT_ARG_REG_DEREF;
1293		arg->val_off = off;
1294		reg_off = calc_pt_regs_off(reg_name);
1295		if (reg_off < 0)
1296			return reg_off;
1297		arg->reg_off = reg_off;
1298	} else if (sscanf(arg_str, " %d @ ( %%%15[^)] ) %n", arg_sz, reg_name, &len) == 2) {
1299		/* Memory dereference case without offset, e.g., 8@(%rsp) */
1300		arg->arg_type = USDT_ARG_REG_DEREF;
1301		arg->val_off = 0;
1302		reg_off = calc_pt_regs_off(reg_name);
1303		if (reg_off < 0)
1304			return reg_off;
1305		arg->reg_off = reg_off;
1306	} else if (sscanf(arg_str, " %d @ %%%15s %n", arg_sz, reg_name, &len) == 2) {
1307		/* Register read case, e.g., -4@%eax */
1308		arg->arg_type = USDT_ARG_REG;
1309		arg->val_off = 0;
1310
1311		reg_off = calc_pt_regs_off(reg_name);
1312		if (reg_off < 0)
1313			return reg_off;
1314		arg->reg_off = reg_off;
1315	} else if (sscanf(arg_str, " %d @ $%ld %n", arg_sz, &off, &len) == 2) {
1316		/* Constant value case, e.g., 4@$71 */
1317		arg->arg_type = USDT_ARG_CONST;
1318		arg->val_off = off;
1319		arg->reg_off = 0;
1320	} else {
1321		pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1322		return -EINVAL;
1323	}
1324
1325	return len;
1326}
1327
1328#elif defined(__s390x__)
1329
1330/* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */
1331
1332static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1333{
1334	unsigned int reg;
1335	int len;
1336	long off;
1337
1338	if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", arg_sz, &off, &reg, &len) == 3) {
1339		/* Memory dereference case, e.g., -2@-28(%r15) */
1340		arg->arg_type = USDT_ARG_REG_DEREF;
1341		arg->val_off = off;
1342		if (reg > 15) {
1343			pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
1344			return -EINVAL;
1345		}
1346		arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
1347	} else if (sscanf(arg_str, " %d @ %%r%u %n", arg_sz, &reg, &len) == 2) {
1348		/* Register read case, e.g., -8@%r0 */
1349		arg->arg_type = USDT_ARG_REG;
1350		arg->val_off = 0;
1351		if (reg > 15) {
1352			pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
1353			return -EINVAL;
1354		}
1355		arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
1356	} else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
1357		/* Constant value case, e.g., 4@71 */
1358		arg->arg_type = USDT_ARG_CONST;
1359		arg->val_off = off;
1360		arg->reg_off = 0;
1361	} else {
1362		pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1363		return -EINVAL;
1364	}
1365
1366	return len;
1367}
1368
1369#elif defined(__aarch64__)
1370
1371static int calc_pt_regs_off(const char *reg_name)
1372{
1373	int reg_num;
1374
1375	if (sscanf(reg_name, "x%d", &reg_num) == 1) {
1376		if (reg_num >= 0 && reg_num < 31)
1377			return offsetof(struct user_pt_regs, regs[reg_num]);
1378	} else if (strcmp(reg_name, "sp") == 0) {
1379		return offsetof(struct user_pt_regs, sp);
1380	}
1381	pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1382	return -ENOENT;
1383}
1384
1385static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1386{
1387	char reg_name[16];
1388	int len, reg_off;
1389	long off;
1390
1391	if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , %ld ] %n", arg_sz, reg_name, &off, &len) == 3) {
1392		/* Memory dereference case, e.g., -4@[sp, 96] */
1393		arg->arg_type = USDT_ARG_REG_DEREF;
1394		arg->val_off = off;
1395		reg_off = calc_pt_regs_off(reg_name);
1396		if (reg_off < 0)
1397			return reg_off;
1398		arg->reg_off = reg_off;
1399	} else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) {
1400		/* Memory dereference case, e.g., -4@[sp] */
1401		arg->arg_type = USDT_ARG_REG_DEREF;
1402		arg->val_off = 0;
1403		reg_off = calc_pt_regs_off(reg_name);
1404		if (reg_off < 0)
1405			return reg_off;
1406		arg->reg_off = reg_off;
1407	} else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
1408		/* Constant value case, e.g., 4@5 */
1409		arg->arg_type = USDT_ARG_CONST;
1410		arg->val_off = off;
1411		arg->reg_off = 0;
1412	} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
1413		/* Register read case, e.g., -8@x4 */
1414		arg->arg_type = USDT_ARG_REG;
1415		arg->val_off = 0;
1416		reg_off = calc_pt_regs_off(reg_name);
1417		if (reg_off < 0)
1418			return reg_off;
1419		arg->reg_off = reg_off;
1420	} else {
1421		pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1422		return -EINVAL;
1423	}
1424
1425	return len;
1426}
1427
1428#elif defined(__riscv)
1429
1430static int calc_pt_regs_off(const char *reg_name)
1431{
1432	static struct {
1433		const char *name;
1434		size_t pt_regs_off;
1435	} reg_map[] = {
1436		{ "ra", offsetof(struct user_regs_struct, ra) },
1437		{ "sp", offsetof(struct user_regs_struct, sp) },
1438		{ "gp", offsetof(struct user_regs_struct, gp) },
1439		{ "tp", offsetof(struct user_regs_struct, tp) },
1440		{ "a0", offsetof(struct user_regs_struct, a0) },
1441		{ "a1", offsetof(struct user_regs_struct, a1) },
1442		{ "a2", offsetof(struct user_regs_struct, a2) },
1443		{ "a3", offsetof(struct user_regs_struct, a3) },
1444		{ "a4", offsetof(struct user_regs_struct, a4) },
1445		{ "a5", offsetof(struct user_regs_struct, a5) },
1446		{ "a6", offsetof(struct user_regs_struct, a6) },
1447		{ "a7", offsetof(struct user_regs_struct, a7) },
1448		{ "s0", offsetof(struct user_regs_struct, s0) },
1449		{ "s1", offsetof(struct user_regs_struct, s1) },
1450		{ "s2", offsetof(struct user_regs_struct, s2) },
1451		{ "s3", offsetof(struct user_regs_struct, s3) },
1452		{ "s4", offsetof(struct user_regs_struct, s4) },
1453		{ "s5", offsetof(struct user_regs_struct, s5) },
1454		{ "s6", offsetof(struct user_regs_struct, s6) },
1455		{ "s7", offsetof(struct user_regs_struct, s7) },
1456		{ "s8", offsetof(struct user_regs_struct, rv_s8) },
1457		{ "s9", offsetof(struct user_regs_struct, s9) },
1458		{ "s10", offsetof(struct user_regs_struct, s10) },
1459		{ "s11", offsetof(struct user_regs_struct, s11) },
1460		{ "t0", offsetof(struct user_regs_struct, t0) },
1461		{ "t1", offsetof(struct user_regs_struct, t1) },
1462		{ "t2", offsetof(struct user_regs_struct, t2) },
1463		{ "t3", offsetof(struct user_regs_struct, t3) },
1464		{ "t4", offsetof(struct user_regs_struct, t4) },
1465		{ "t5", offsetof(struct user_regs_struct, t5) },
1466		{ "t6", offsetof(struct user_regs_struct, t6) },
1467	};
1468	int i;
1469
1470	for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1471		if (strcmp(reg_name, reg_map[i].name) == 0)
1472			return reg_map[i].pt_regs_off;
1473	}
1474
1475	pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1476	return -ENOENT;
1477}
1478
1479static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1480{
1481	char reg_name[16];
1482	int len, reg_off;
1483	long off;
1484
1485	if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", arg_sz, &off, reg_name, &len) == 3) {
1486		/* Memory dereference case, e.g., -8@-88(s0) */
1487		arg->arg_type = USDT_ARG_REG_DEREF;
1488		arg->val_off = off;
1489		reg_off = calc_pt_regs_off(reg_name);
1490		if (reg_off < 0)
1491			return reg_off;
1492		arg->reg_off = reg_off;
1493	} else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
1494		/* Constant value case, e.g., 4@5 */
1495		arg->arg_type = USDT_ARG_CONST;
1496		arg->val_off = off;
1497		arg->reg_off = 0;
1498	} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
1499		/* Register read case, e.g., -8@a1 */
1500		arg->arg_type = USDT_ARG_REG;
1501		arg->val_off = 0;
1502		reg_off = calc_pt_regs_off(reg_name);
1503		if (reg_off < 0)
1504			return reg_off;
1505		arg->reg_off = reg_off;
1506	} else {
1507		pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1508		return -EINVAL;
1509	}
1510
1511	return len;
1512}
1513
1514#elif defined(__arm__)
1515
1516static int calc_pt_regs_off(const char *reg_name)
1517{
1518	static struct {
1519		const char *name;
1520		size_t pt_regs_off;
1521	} reg_map[] = {
1522		{ "r0", offsetof(struct pt_regs, uregs[0]) },
1523		{ "r1", offsetof(struct pt_regs, uregs[1]) },
1524		{ "r2", offsetof(struct pt_regs, uregs[2]) },
1525		{ "r3", offsetof(struct pt_regs, uregs[3]) },
1526		{ "r4", offsetof(struct pt_regs, uregs[4]) },
1527		{ "r5", offsetof(struct pt_regs, uregs[5]) },
1528		{ "r6", offsetof(struct pt_regs, uregs[6]) },
1529		{ "r7", offsetof(struct pt_regs, uregs[7]) },
1530		{ "r8", offsetof(struct pt_regs, uregs[8]) },
1531		{ "r9", offsetof(struct pt_regs, uregs[9]) },
1532		{ "r10", offsetof(struct pt_regs, uregs[10]) },
1533		{ "fp", offsetof(struct pt_regs, uregs[11]) },
1534		{ "ip", offsetof(struct pt_regs, uregs[12]) },
1535		{ "sp", offsetof(struct pt_regs, uregs[13]) },
1536		{ "lr", offsetof(struct pt_regs, uregs[14]) },
1537		{ "pc", offsetof(struct pt_regs, uregs[15]) },
1538	};
1539	int i;
1540
1541	for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1542		if (strcmp(reg_name, reg_map[i].name) == 0)
1543			return reg_map[i].pt_regs_off;
1544	}
1545
1546	pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1547	return -ENOENT;
1548}
1549
1550static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1551{
1552	char reg_name[16];
1553	int len, reg_off;
1554	long off;
1555
1556	if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , #%ld ] %n",
1557		   arg_sz, reg_name, &off, &len) == 3) {
1558		/* Memory dereference case, e.g., -4@[fp, #96] */
1559		arg->arg_type = USDT_ARG_REG_DEREF;
1560		arg->val_off = off;
1561		reg_off = calc_pt_regs_off(reg_name);
1562		if (reg_off < 0)
1563			return reg_off;
1564		arg->reg_off = reg_off;
1565	} else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) {
1566		/* Memory dereference case, e.g., -4@[sp] */
1567		arg->arg_type = USDT_ARG_REG_DEREF;
1568		arg->val_off = 0;
1569		reg_off = calc_pt_regs_off(reg_name);
1570		if (reg_off < 0)
1571			return reg_off;
1572		arg->reg_off = reg_off;
1573	} else if (sscanf(arg_str, " %d @ #%ld %n", arg_sz, &off, &len) == 2) {
1574		/* Constant value case, e.g., 4@#5 */
1575		arg->arg_type = USDT_ARG_CONST;
1576		arg->val_off = off;
1577		arg->reg_off = 0;
1578	} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
1579		/* Register read case, e.g., -8@r4 */
1580		arg->arg_type = USDT_ARG_REG;
1581		arg->val_off = 0;
1582		reg_off = calc_pt_regs_off(reg_name);
1583		if (reg_off < 0)
1584			return reg_off;
1585		arg->reg_off = reg_off;
1586	} else {
1587		pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1588		return -EINVAL;
1589	}
1590
1591	return len;
1592}
1593
1594#else
1595
1596static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1597{
1598	pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n");
1599	return -ENOTSUP;
1600}
1601
1602#endif