Loading...
Note: File does not exist in v3.15.
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
3/*
4 * Internal libbpf helpers.
5 *
6 * Copyright (c) 2019 Facebook
7 */
8
9#ifndef __LIBBPF_LIBBPF_INTERNAL_H
10#define __LIBBPF_LIBBPF_INTERNAL_H
11
12#include <stdlib.h>
13#include <limits.h>
14#include <errno.h>
15#include <linux/err.h>
16#include "libbpf_legacy.h"
17
18/* make sure libbpf doesn't use kernel-only integer typedefs */
19#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
20
21/* prevent accidental re-addition of reallocarray() */
22#pragma GCC poison reallocarray
23
24#include "libbpf.h"
25#include "btf.h"
26
27#ifndef EM_BPF
28#define EM_BPF 247
29#endif
30
31#ifndef R_BPF_64_64
32#define R_BPF_64_64 1
33#endif
34#ifndef R_BPF_64_ABS64
35#define R_BPF_64_ABS64 2
36#endif
37#ifndef R_BPF_64_ABS32
38#define R_BPF_64_ABS32 3
39#endif
40#ifndef R_BPF_64_32
41#define R_BPF_64_32 10
42#endif
43
44#ifndef SHT_LLVM_ADDRSIG
45#define SHT_LLVM_ADDRSIG 0x6FFF4C03
46#endif
47
48/* if libelf is old and doesn't support mmap(), fall back to read() */
49#ifndef ELF_C_READ_MMAP
50#define ELF_C_READ_MMAP ELF_C_READ
51#endif
52
53/* Older libelf all end up in this expression, for both 32 and 64 bit */
54#ifndef GELF_ST_VISIBILITY
55#define GELF_ST_VISIBILITY(o) ((o) & 0x03)
56#endif
57
58#define BTF_INFO_ENC(kind, kind_flag, vlen) \
59 ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
60#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
61#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
62 ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
63#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
64 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
65 BTF_INT_ENC(encoding, bits_offset, bits)
66#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset)
67#define BTF_PARAM_ENC(name, type) (name), (type)
68#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
69#define BTF_TYPE_FLOAT_ENC(name, sz) \
70 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
71
72#ifndef likely
73#define likely(x) __builtin_expect(!!(x), 1)
74#endif
75#ifndef unlikely
76#define unlikely(x) __builtin_expect(!!(x), 0)
77#endif
78#ifndef min
79# define min(x, y) ((x) < (y) ? (x) : (y))
80#endif
81#ifndef max
82# define max(x, y) ((x) < (y) ? (y) : (x))
83#endif
84#ifndef offsetofend
85# define offsetofend(TYPE, FIELD) \
86 (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
87#endif
88
89/* Symbol versioning is different between static and shared library.
90 * Properly versioned symbols are needed for shared library, but
91 * only the symbol of the new version is needed for static library.
92 */
93#ifdef SHARED
94# define COMPAT_VERSION(internal_name, api_name, version) \
95 asm(".symver " #internal_name "," #api_name "@" #version);
96# define DEFAULT_VERSION(internal_name, api_name, version) \
97 asm(".symver " #internal_name "," #api_name "@@" #version);
98#else
99# define COMPAT_VERSION(internal_name, api_name, version)
100# define DEFAULT_VERSION(internal_name, api_name, version) \
101 extern typeof(internal_name) api_name \
102 __attribute__((alias(#internal_name)));
103#endif
104
105extern void libbpf_print(enum libbpf_print_level level,
106 const char *format, ...)
107 __attribute__((format(printf, 2, 3)));
108
109#define __pr(level, fmt, ...) \
110do { \
111 libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \
112} while (0)
113
114#define pr_warn(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
115#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
116#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
117
118#ifndef __has_builtin
119#define __has_builtin(x) 0
120#endif
121/*
122 * Re-implement glibc's reallocarray() for libbpf internal-only use.
123 * reallocarray(), unfortunately, is not available in all versions of glibc,
124 * so requires extra feature detection and using reallocarray() stub from
125 * <tools/libc_compat.h> and COMPAT_NEED_REALLOCARRAY. All this complicates
126 * build of libbpf unnecessarily and is just a maintenance burden. Instead,
127 * it's trivial to implement libbpf-specific internal version and use it
128 * throughout libbpf.
129 */
130static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
131{
132 size_t total;
133
134#if __has_builtin(__builtin_mul_overflow)
135 if (unlikely(__builtin_mul_overflow(nmemb, size, &total)))
136 return NULL;
137#else
138 if (size == 0 || nmemb > ULONG_MAX / size)
139 return NULL;
140 total = nmemb * size;
141#endif
142 return realloc(ptr, total);
143}
144
145struct btf;
146struct btf_type;
147
148struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id);
149const char *btf_kind_str(const struct btf_type *t);
150const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
151
152static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
153{
154 return (enum btf_func_linkage)(int)btf_vlen(t);
155}
156
157static inline __u32 btf_type_info(int kind, int vlen, int kflag)
158{
159 return (kflag << 31) | (kind << 24) | vlen;
160}
161
162enum map_def_parts {
163 MAP_DEF_MAP_TYPE = 0x001,
164 MAP_DEF_KEY_TYPE = 0x002,
165 MAP_DEF_KEY_SIZE = 0x004,
166 MAP_DEF_VALUE_TYPE = 0x008,
167 MAP_DEF_VALUE_SIZE = 0x010,
168 MAP_DEF_MAX_ENTRIES = 0x020,
169 MAP_DEF_MAP_FLAGS = 0x040,
170 MAP_DEF_NUMA_NODE = 0x080,
171 MAP_DEF_PINNING = 0x100,
172 MAP_DEF_INNER_MAP = 0x200,
173
174 MAP_DEF_ALL = 0x3ff, /* combination of all above */
175};
176
177struct btf_map_def {
178 enum map_def_parts parts;
179 __u32 map_type;
180 __u32 key_type_id;
181 __u32 key_size;
182 __u32 value_type_id;
183 __u32 value_size;
184 __u32 max_entries;
185 __u32 map_flags;
186 __u32 numa_node;
187 __u32 pinning;
188};
189
190int parse_btf_map_def(const char *map_name, struct btf *btf,
191 const struct btf_type *def_t, bool strict,
192 struct btf_map_def *map_def, struct btf_map_def *inner_def);
193
194void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
195 size_t cur_cnt, size_t max_cnt, size_t add_cnt);
196int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
197
198static inline bool libbpf_validate_opts(const char *opts,
199 size_t opts_sz, size_t user_sz,
200 const char *type_name)
201{
202 if (user_sz < sizeof(size_t)) {
203 pr_warn("%s size (%zu) is too small\n", type_name, user_sz);
204 return false;
205 }
206 if (user_sz > opts_sz) {
207 size_t i;
208
209 for (i = opts_sz; i < user_sz; i++) {
210 if (opts[i]) {
211 pr_warn("%s has non-zero extra bytes\n",
212 type_name);
213 return false;
214 }
215 }
216 }
217 return true;
218}
219
220#define OPTS_VALID(opts, type) \
221 (!(opts) || libbpf_validate_opts((const char *)opts, \
222 offsetofend(struct type, \
223 type##__last_field), \
224 (opts)->sz, #type))
225#define OPTS_HAS(opts, field) \
226 ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
227#define OPTS_GET(opts, field, fallback_value) \
228 (OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
229#define OPTS_SET(opts, field, value) \
230 do { \
231 if (OPTS_HAS(opts, field)) \
232 (opts)->field = value; \
233 } while (0)
234
235int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
236int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
237int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
238 const char *str_sec, size_t str_len);
239
240struct bpf_prog_load_params {
241 enum bpf_prog_type prog_type;
242 enum bpf_attach_type expected_attach_type;
243 const char *name;
244 const struct bpf_insn *insns;
245 size_t insn_cnt;
246 const char *license;
247 __u32 kern_version;
248 __u32 attach_prog_fd;
249 __u32 attach_btf_obj_fd;
250 __u32 attach_btf_id;
251 __u32 prog_ifindex;
252 __u32 prog_btf_fd;
253 __u32 prog_flags;
254
255 __u32 func_info_rec_size;
256 const void *func_info;
257 __u32 func_info_cnt;
258
259 __u32 line_info_rec_size;
260 const void *line_info;
261 __u32 line_info_cnt;
262
263 __u32 log_level;
264 char *log_buf;
265 size_t log_buf_sz;
266};
267
268int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
269
270int bpf_object__section_size(const struct bpf_object *obj, const char *name,
271 __u32 *size);
272int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
273 __u32 *off);
274struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
275void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
276 const char **prefix, int *kind);
277
278struct btf_ext_info {
279 /*
280 * info points to the individual info section (e.g. func_info and
281 * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
282 */
283 void *info;
284 __u32 rec_size;
285 __u32 len;
286};
287
288#define for_each_btf_ext_sec(seg, sec) \
289 for (sec = (seg)->info; \
290 (void *)sec < (seg)->info + (seg)->len; \
291 sec = (void *)sec + sizeof(struct btf_ext_info_sec) + \
292 (seg)->rec_size * sec->num_info)
293
294#define for_each_btf_ext_rec(seg, sec, i, rec) \
295 for (i = 0, rec = (void *)&(sec)->data; \
296 i < (sec)->num_info; \
297 i++, rec = (void *)rec + (seg)->rec_size)
298
299/*
300 * The .BTF.ext ELF section layout defined as
301 * struct btf_ext_header
302 * func_info subsection
303 *
304 * The func_info subsection layout:
305 * record size for struct bpf_func_info in the func_info subsection
306 * struct btf_sec_func_info for section #1
307 * a list of bpf_func_info records for section #1
308 * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
309 * but may not be identical
310 * struct btf_sec_func_info for section #2
311 * a list of bpf_func_info records for section #2
312 * ......
313 *
314 * Note that the bpf_func_info record size in .BTF.ext may not
315 * be the same as the one defined in include/uapi/linux/bpf.h.
316 * The loader should ensure that record_size meets minimum
317 * requirement and pass the record as is to the kernel. The
318 * kernel will handle the func_info properly based on its contents.
319 */
320struct btf_ext_header {
321 __u16 magic;
322 __u8 version;
323 __u8 flags;
324 __u32 hdr_len;
325
326 /* All offsets are in bytes relative to the end of this header */
327 __u32 func_info_off;
328 __u32 func_info_len;
329 __u32 line_info_off;
330 __u32 line_info_len;
331
332 /* optional part of .BTF.ext header */
333 __u32 core_relo_off;
334 __u32 core_relo_len;
335};
336
337struct btf_ext {
338 union {
339 struct btf_ext_header *hdr;
340 void *data;
341 };
342 struct btf_ext_info func_info;
343 struct btf_ext_info line_info;
344 struct btf_ext_info core_relo_info;
345 __u32 data_size;
346};
347
348struct btf_ext_info_sec {
349 __u32 sec_name_off;
350 __u32 num_info;
351 /* Followed by num_info * record_size number of bytes */
352 __u8 data[];
353};
354
355/* The minimum bpf_func_info checked by the loader */
356struct bpf_func_info_min {
357 __u32 insn_off;
358 __u32 type_id;
359};
360
361/* The minimum bpf_line_info checked by the loader */
362struct bpf_line_info_min {
363 __u32 insn_off;
364 __u32 file_name_off;
365 __u32 line_off;
366 __u32 line_col;
367};
368
369/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
370 * has to be adjusted by relocations.
371 */
372enum bpf_core_relo_kind {
373 BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
374 BPF_FIELD_BYTE_SIZE = 1, /* field size in bytes */
375 BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
376 BPF_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
377 BPF_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
378 BPF_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
379 BPF_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
380 BPF_TYPE_ID_TARGET = 7, /* type ID in target kernel */
381 BPF_TYPE_EXISTS = 8, /* type existence in target kernel */
382 BPF_TYPE_SIZE = 9, /* type size in bytes */
383 BPF_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
384 BPF_ENUMVAL_VALUE = 11, /* enum value integer value */
385};
386
387/* The minimum bpf_core_relo checked by the loader
388 *
389 * CO-RE relocation captures the following data:
390 * - insn_off - instruction offset (in bytes) within a BPF program that needs
391 * its insn->imm field to be relocated with actual field info;
392 * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
393 * type or field;
394 * - access_str_off - offset into corresponding .BTF string section. String
395 * interpretation depends on specific relocation kind:
396 * - for field-based relocations, string encodes an accessed field using
397 * a sequence of field and array indices, separated by colon (:). It's
398 * conceptually very close to LLVM's getelementptr ([0]) instruction's
399 * arguments for identifying offset to a field.
400 * - for type-based relocations, strings is expected to be just "0";
401 * - for enum value-based relocations, string contains an index of enum
402 * value within its enum type;
403 *
404 * Example to provide a better feel.
405 *
406 * struct sample {
407 * int a;
408 * struct {
409 * int b[10];
410 * };
411 * };
412 *
413 * struct sample *s = ...;
414 * int x = &s->a; // encoded as "0:0" (a is field #0)
415 * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
416 * // b is field #0 inside anon struct, accessing elem #5)
417 * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
418 *
419 * type_id for all relocs in this example will capture BTF type id of
420 * `struct sample`.
421 *
422 * Such relocation is emitted when using __builtin_preserve_access_index()
423 * Clang built-in, passing expression that captures field address, e.g.:
424 *
425 * bpf_probe_read(&dst, sizeof(dst),
426 * __builtin_preserve_access_index(&src->a.b.c));
427 *
428 * In this case Clang will emit field relocation recording necessary data to
429 * be able to find offset of embedded `a.b.c` field within `src` struct.
430 *
431 * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
432 */
433struct bpf_core_relo {
434 __u32 insn_off;
435 __u32 type_id;
436 __u32 access_str_off;
437 enum bpf_core_relo_kind kind;
438};
439
440typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
441typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
442int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
443int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
444int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
445int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
446
447extern enum libbpf_strict_mode libbpf_mode;
448
449/* handle direct returned errors */
450static inline int libbpf_err(int ret)
451{
452 if (ret < 0)
453 errno = -ret;
454 return ret;
455}
456
457/* handle errno-based (e.g., syscall or libc) errors according to libbpf's
458 * strict mode settings
459 */
460static inline int libbpf_err_errno(int ret)
461{
462 if (libbpf_mode & LIBBPF_STRICT_DIRECT_ERRS)
463 /* errno is already assumed to be set on error */
464 return ret < 0 ? -errno : ret;
465
466 /* legacy: on error return -1 directly and don't touch errno */
467 return ret;
468}
469
470/* handle error for pointer-returning APIs, err is assumed to be < 0 always */
471static inline void *libbpf_err_ptr(int err)
472{
473 /* set errno on error, this doesn't break anything */
474 errno = -err;
475
476 if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
477 return NULL;
478
479 /* legacy: encode err as ptr */
480 return ERR_PTR(err);
481}
482
483/* handle pointer-returning APIs' error handling */
484static inline void *libbpf_ptr(void *ret)
485{
486 /* set errno on error, this doesn't break anything */
487 if (IS_ERR(ret))
488 errno = -PTR_ERR(ret);
489
490 if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
491 return IS_ERR(ret) ? NULL : ret;
492
493 /* legacy: pass-through original pointer */
494 return ret;
495}
496
497#endif /* __LIBBPF_LIBBPF_INTERNAL_H */