Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
  2
  3/*
  4 * Internal libbpf helpers.
  5 *
  6 * Copyright (c) 2019 Facebook
  7 */
  8
  9#ifndef __LIBBPF_LIBBPF_INTERNAL_H
 10#define __LIBBPF_LIBBPF_INTERNAL_H
 11
 12#include <stdlib.h>
 13#include <byteswap.h>
 14#include <limits.h>
 15#include <errno.h>
 16#include <linux/err.h>
 17#include <fcntl.h>
 18#include <unistd.h>
 19#include <sys/syscall.h>
 20#include <libelf.h>
 21#include "relo_core.h"
 22
 23/* Android's libc doesn't support AT_EACCESS in faccessat() implementation
 24 * ([0]), and just returns -EINVAL even if file exists and is accessible.
 25 * See [1] for issues caused by this.
 26 *
 27 * So just redefine it to 0 on Android.
 28 *
 29 * [0] https://android.googlesource.com/platform/bionic/+/refs/heads/android13-release/libc/bionic/faccessat.cpp#50
 30 * [1] https://github.com/libbpf/libbpf-bootstrap/issues/250#issuecomment-1911324250
 31 */
 32#ifdef __ANDROID__
 33#undef AT_EACCESS
 34#define AT_EACCESS 0
 35#endif
 36
 37/* make sure libbpf doesn't use kernel-only integer typedefs */
 38#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
 39
 40/* prevent accidental re-addition of reallocarray() */
 41#pragma GCC poison reallocarray
 42
 43#include "libbpf.h"
 44#include "btf.h"
 45
 46#ifndef EM_BPF
 47#define EM_BPF 247
 48#endif
 49
 50#ifndef R_BPF_64_64
 51#define R_BPF_64_64 1
 52#endif
 53#ifndef R_BPF_64_ABS64
 54#define R_BPF_64_ABS64 2
 55#endif
 56#ifndef R_BPF_64_ABS32
 57#define R_BPF_64_ABS32 3
 58#endif
 59#ifndef R_BPF_64_32
 60#define R_BPF_64_32 10
 61#endif
 62
 63#ifndef SHT_LLVM_ADDRSIG
 64#define SHT_LLVM_ADDRSIG 0x6FFF4C03
 65#endif
 66
 67/* if libelf is old and doesn't support mmap(), fall back to read() */
 68#ifndef ELF_C_READ_MMAP
 69#define ELF_C_READ_MMAP ELF_C_READ
 70#endif
 71
 72/* Older libelf all end up in this expression, for both 32 and 64 bit */
 73#ifndef ELF64_ST_VISIBILITY
 74#define ELF64_ST_VISIBILITY(o) ((o) & 0x03)
 75#endif
 76
 77#define BTF_INFO_ENC(kind, kind_flag, vlen) \
 78	((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
 79#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
 80#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
 81	((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
 82#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
 83	BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
 84	BTF_INT_ENC(encoding, bits_offset, bits)
 85#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset)
 86#define BTF_PARAM_ENC(name, type) (name), (type)
 87#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
 88#define BTF_TYPE_FLOAT_ENC(name, sz) \
 89	BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
 90#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \
 91	BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx)
 92#define BTF_TYPE_TYPE_TAG_ENC(value, type) \
 93	BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type)
 94
 95#ifndef likely
 96#define likely(x) __builtin_expect(!!(x), 1)
 97#endif
 98#ifndef unlikely
 99#define unlikely(x) __builtin_expect(!!(x), 0)
100#endif
101#ifndef min
102# define min(x, y) ((x) < (y) ? (x) : (y))
103#endif
104#ifndef max
105# define max(x, y) ((x) < (y) ? (y) : (x))
106#endif
107#ifndef offsetofend
108# define offsetofend(TYPE, FIELD) \
109	(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
110#endif
111#ifndef __alias
112#define __alias(symbol) __attribute__((alias(#symbol)))
113#endif
114
115/* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is
116 * a string literal known at compilation time or char * pointer known only at
117 * runtime.
118 */
119#define str_has_pfx(str, pfx) \
120	(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
121
122/* suffix check */
123static inline bool str_has_sfx(const char *str, const char *sfx)
124{
125	size_t str_len = strlen(str);
126	size_t sfx_len = strlen(sfx);
127
128	if (sfx_len > str_len)
129		return false;
130	return strcmp(str + str_len - sfx_len, sfx) == 0;
131}
132
133/* Symbol versioning is different between static and shared library.
134 * Properly versioned symbols are needed for shared library, but
135 * only the symbol of the new version is needed for static library.
136 * Starting with GNU C 10, use symver attribute instead of .symver assembler
137 * directive, which works better with GCC LTO builds.
138 */
139#if defined(SHARED) && defined(__GNUC__) && __GNUC__ >= 10
140
141#define DEFAULT_VERSION(internal_name, api_name, version) \
142	__attribute__((symver(#api_name "@@" #version)))
143#define COMPAT_VERSION(internal_name, api_name, version) \
144	__attribute__((symver(#api_name "@" #version)))
145
146#elif defined(SHARED)
147
148#define COMPAT_VERSION(internal_name, api_name, version) \
149	asm(".symver " #internal_name "," #api_name "@" #version);
150#define DEFAULT_VERSION(internal_name, api_name, version) \
151	asm(".symver " #internal_name "," #api_name "@@" #version);
152
153#else /* !SHARED */
154
155#define COMPAT_VERSION(internal_name, api_name, version)
156#define DEFAULT_VERSION(internal_name, api_name, version) \
157	extern typeof(internal_name) api_name \
158	__attribute__((alias(#internal_name)));
159
160#endif
161
162extern void libbpf_print(enum libbpf_print_level level,
163			 const char *format, ...)
164	__attribute__((format(printf, 2, 3)));
165
166#define __pr(level, fmt, ...)	\
167do {				\
168	libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__);	\
169} while (0)
170
171#define pr_warn(fmt, ...)	__pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
172#define pr_info(fmt, ...)	__pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
173#define pr_debug(fmt, ...)	__pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
174
175#ifndef __has_builtin
176#define __has_builtin(x) 0
177#endif
178
179struct bpf_link {
180	int (*detach)(struct bpf_link *link);
181	void (*dealloc)(struct bpf_link *link);
182	char *pin_path;		/* NULL, if not pinned */
183	int fd;			/* hook FD, -1 if not applicable */
184	bool disconnected;
185};
186
187/*
188 * Re-implement glibc's reallocarray() for libbpf internal-only use.
189 * reallocarray(), unfortunately, is not available in all versions of glibc,
190 * so requires extra feature detection and using reallocarray() stub from
191 * <tools/libc_compat.h> and COMPAT_NEED_REALLOCARRAY. All this complicates
192 * build of libbpf unnecessarily and is just a maintenance burden. Instead,
193 * it's trivial to implement libbpf-specific internal version and use it
194 * throughout libbpf.
195 */
196static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
197{
198	size_t total;
199
200#if __has_builtin(__builtin_mul_overflow)
201	if (unlikely(__builtin_mul_overflow(nmemb, size, &total)))
202		return NULL;
203#else
204	if (size == 0 || nmemb > ULONG_MAX / size)
205		return NULL;
206	total = nmemb * size;
207#endif
208	return realloc(ptr, total);
209}
210
211/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst
212 * is zero-terminated string no matter what (unless sz == 0, in which case
213 * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs
214 * in what is returned. Given this is internal helper, it's trivial to extend
215 * this, when necessary. Use this instead of strncpy inside libbpf source code.
216 */
217static inline void libbpf_strlcpy(char *dst, const char *src, size_t sz)
218{
219	size_t i;
220
221	if (sz == 0)
222		return;
223
224	sz--;
225	for (i = 0; i < sz && src[i]; i++)
226		dst[i] = src[i];
227	dst[i] = '\0';
228}
229
230__u32 get_kernel_version(void);
231
232struct btf;
233struct btf_type;
234
235struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
236const char *btf_kind_str(const struct btf_type *t);
237const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
238const struct btf_header *btf_header(const struct btf *btf);
239void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
240int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map);
241
242static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
243{
244	return (enum btf_func_linkage)(int)btf_vlen(t);
245}
246
247static inline __u32 btf_type_info(int kind, int vlen, int kflag)
248{
249	return (kflag << 31) | (kind << 24) | vlen;
250}
251
252enum map_def_parts {
253	MAP_DEF_MAP_TYPE	= 0x001,
254	MAP_DEF_KEY_TYPE	= 0x002,
255	MAP_DEF_KEY_SIZE	= 0x004,
256	MAP_DEF_VALUE_TYPE	= 0x008,
257	MAP_DEF_VALUE_SIZE	= 0x010,
258	MAP_DEF_MAX_ENTRIES	= 0x020,
259	MAP_DEF_MAP_FLAGS	= 0x040,
260	MAP_DEF_NUMA_NODE	= 0x080,
261	MAP_DEF_PINNING		= 0x100,
262	MAP_DEF_INNER_MAP	= 0x200,
263	MAP_DEF_MAP_EXTRA	= 0x400,
264
265	MAP_DEF_ALL		= 0x7ff, /* combination of all above */
266};
267
268struct btf_map_def {
269	enum map_def_parts parts;
270	__u32 map_type;
271	__u32 key_type_id;
272	__u32 key_size;
273	__u32 value_type_id;
274	__u32 value_size;
275	__u32 max_entries;
276	__u32 map_flags;
277	__u32 numa_node;
278	__u32 pinning;
279	__u64 map_extra;
280};
281
282int parse_btf_map_def(const char *map_name, struct btf *btf,
283		      const struct btf_type *def_t, bool strict,
284		      struct btf_map_def *map_def, struct btf_map_def *inner_def);
285
286void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
287		     size_t cur_cnt, size_t max_cnt, size_t add_cnt);
288int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
289
290static inline bool libbpf_is_mem_zeroed(const char *p, ssize_t len)
291{
292	while (len > 0) {
293		if (*p)
294			return false;
295		p++;
296		len--;
297	}
298	return true;
299}
300
301static inline bool libbpf_validate_opts(const char *opts,
302					size_t opts_sz, size_t user_sz,
303					const char *type_name)
304{
305	if (user_sz < sizeof(size_t)) {
306		pr_warn("%s size (%zu) is too small\n", type_name, user_sz);
307		return false;
308	}
309	if (!libbpf_is_mem_zeroed(opts + opts_sz, (ssize_t)user_sz - opts_sz)) {
310		pr_warn("%s has non-zero extra bytes\n", type_name);
311		return false;
 
 
 
 
 
 
 
312	}
313	return true;
314}
315
316#define OPTS_VALID(opts, type)						      \
317	(!(opts) || libbpf_validate_opts((const char *)opts,		      \
318					 offsetofend(struct type,	      \
319						     type##__last_field),     \
320					 (opts)->sz, #type))
321#define OPTS_HAS(opts, field) \
322	((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
323#define OPTS_GET(opts, field, fallback_value) \
324	(OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
325#define OPTS_SET(opts, field, value)		\
326	do {					\
327		if (OPTS_HAS(opts, field))	\
328			(opts)->field = value;	\
329	} while (0)
330
331#define OPTS_ZEROED(opts, last_nonzero_field)				      \
332({									      \
333	ssize_t __off = offsetofend(typeof(*(opts)), last_nonzero_field);     \
334	!(opts) || libbpf_is_mem_zeroed((const void *)opts + __off,	      \
335					(opts)->sz - __off);		      \
336})
337
338enum kern_feature_id {
339	/* v4.14: kernel support for program & map names. */
340	FEAT_PROG_NAME,
341	/* v5.2: kernel support for global data sections. */
342	FEAT_GLOBAL_DATA,
343	/* BTF support */
344	FEAT_BTF,
345	/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
346	FEAT_BTF_FUNC,
347	/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
348	FEAT_BTF_DATASEC,
349	/* BTF_FUNC_GLOBAL is supported */
350	FEAT_BTF_GLOBAL_FUNC,
351	/* BPF_F_MMAPABLE is supported for arrays */
352	FEAT_ARRAY_MMAP,
353	/* kernel support for expected_attach_type in BPF_PROG_LOAD */
354	FEAT_EXP_ATTACH_TYPE,
355	/* bpf_probe_read_{kernel,user}[_str] helpers */
356	FEAT_PROBE_READ_KERN,
357	/* BPF_PROG_BIND_MAP is supported */
358	FEAT_PROG_BIND_MAP,
359	/* Kernel support for module BTFs */
360	FEAT_MODULE_BTF,
361	/* BTF_KIND_FLOAT support */
362	FEAT_BTF_FLOAT,
363	/* BPF perf link support */
364	FEAT_PERF_LINK,
365	/* BTF_KIND_DECL_TAG support */
366	FEAT_BTF_DECL_TAG,
367	/* BTF_KIND_TYPE_TAG support */
368	FEAT_BTF_TYPE_TAG,
369	/* memcg-based accounting for BPF maps and progs */
370	FEAT_MEMCG_ACCOUNT,
371	/* BPF cookie (bpf_get_attach_cookie() BPF helper) support */
372	FEAT_BPF_COOKIE,
373	/* BTF_KIND_ENUM64 support and BTF_KIND_ENUM kflag support */
374	FEAT_BTF_ENUM64,
375	/* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */
376	FEAT_SYSCALL_WRAPPER,
377	/* BPF multi-uprobe link support */
378	FEAT_UPROBE_MULTI_LINK,
379	/* Kernel supports arg:ctx tag (__arg_ctx) for global subprogs natively */
380	FEAT_ARG_CTX_TAG,
381	/* Kernel supports '?' at the front of datasec names */
382	FEAT_BTF_QMARK_DATASEC,
383	__FEAT_CNT,
384};
385
386enum kern_feature_result {
387	FEAT_UNKNOWN = 0,
388	FEAT_SUPPORTED = 1,
389	FEAT_MISSING = 2,
390};
391
392struct kern_feature_cache {
393	enum kern_feature_result res[__FEAT_CNT];
394	int token_fd;
395};
396
397bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id);
398bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
399
400int probe_kern_syscall_wrapper(int token_fd);
401int probe_memcg_account(int token_fd);
402int bump_rlimit_memlock(void);
403
404int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
405int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
406int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
407			 const char *str_sec, size_t str_len,
408			 int token_fd);
409int btf_load_into_kernel(struct btf *btf,
410			 char *log_buf, size_t log_sz, __u32 log_level,
411			 int token_fd);
412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
414void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
415				const char **prefix, int *kind);
416
417struct btf_ext_info {
418	/*
419	 * info points to the individual info section (e.g. func_info and
420	 * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
421	 */
422	void *info;
423	__u32 rec_size;
424	__u32 len;
425	/* optional (maintained internally by libbpf) mapping between .BTF.ext
426	 * section and corresponding ELF section. This is used to join
427	 * information like CO-RE relocation records with corresponding BPF
428	 * programs defined in ELF sections
429	 */
430	__u32 *sec_idxs;
431	int sec_cnt;
432};
433
434#define for_each_btf_ext_sec(seg, sec)					\
435	for (sec = (seg)->info;						\
436	     (void *)sec < (seg)->info + (seg)->len;			\
437	     sec = (void *)sec + sizeof(struct btf_ext_info_sec) +	\
438		   (seg)->rec_size * sec->num_info)
439
440#define for_each_btf_ext_rec(seg, sec, i, rec)				\
441	for (i = 0, rec = (void *)&(sec)->data;				\
442	     i < (sec)->num_info;					\
443	     i++, rec = (void *)rec + (seg)->rec_size)
444
445/*
446 * The .BTF.ext ELF section layout defined as
447 *   struct btf_ext_header
448 *   func_info subsection
449 *
450 * The func_info subsection layout:
451 *   record size for struct bpf_func_info in the func_info subsection
452 *   struct btf_ext_info_sec for section #1
453 *   a list of bpf_func_info records for section #1
454 *     where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
455 *     but may not be identical
456 *   struct btf_ext_info_sec for section #2
457 *   a list of bpf_func_info records for section #2
458 *   ......
459 *
460 * Note that the bpf_func_info record size in .BTF.ext may not
461 * be the same as the one defined in include/uapi/linux/bpf.h.
462 * The loader should ensure that record_size meets minimum
463 * requirement and pass the record as is to the kernel. The
464 * kernel will handle the func_info properly based on its contents.
465 */
466struct btf_ext_header {
467	__u16	magic;
468	__u8	version;
469	__u8	flags;
470	__u32	hdr_len;
471
472	/* All offsets are in bytes relative to the end of this header */
473	__u32	func_info_off;
474	__u32	func_info_len;
475	__u32	line_info_off;
476	__u32	line_info_len;
477
478	/* optional part of .BTF.ext header */
479	__u32	core_relo_off;
480	__u32	core_relo_len;
481};
482
483struct btf_ext {
484	union {
485		struct btf_ext_header *hdr;
486		void *data;
487	};
488	void *data_swapped;
489	bool swapped_endian;
490	struct btf_ext_info func_info;
491	struct btf_ext_info line_info;
492	struct btf_ext_info core_relo_info;
493	__u32 data_size;
494};
495
496struct btf_ext_info_sec {
497	__u32	sec_name_off;
498	__u32	num_info;
499	/* Followed by num_info * record_size number of bytes */
500	__u8	data[];
501};
502
503/* The minimum bpf_func_info checked by the loader */
504struct bpf_func_info_min {
505	__u32   insn_off;
506	__u32   type_id;
507};
508
509/* The minimum bpf_line_info checked by the loader */
510struct bpf_line_info_min {
511	__u32	insn_off;
512	__u32	file_name_off;
513	__u32	line_off;
514	__u32	line_col;
515};
516
517/* Functions to byte-swap info records */
518
519typedef void (*info_rec_bswap_fn)(void *);
520
521static inline void bpf_func_info_bswap(struct bpf_func_info *i)
522{
523	i->insn_off = bswap_32(i->insn_off);
524	i->type_id = bswap_32(i->type_id);
525}
526
527static inline void bpf_line_info_bswap(struct bpf_line_info *i)
528{
529	i->insn_off = bswap_32(i->insn_off);
530	i->file_name_off = bswap_32(i->file_name_off);
531	i->line_off = bswap_32(i->line_off);
532	i->line_col = bswap_32(i->line_col);
533}
534
535static inline void bpf_core_relo_bswap(struct bpf_core_relo *i)
536{
537	i->insn_off = bswap_32(i->insn_off);
538	i->type_id = bswap_32(i->type_id);
539	i->access_str_off = bswap_32(i->access_str_off);
540	i->kind = bswap_32(i->kind);
541}
542
543enum btf_field_iter_kind {
544	BTF_FIELD_ITER_IDS,
545	BTF_FIELD_ITER_STRS,
546};
547
548struct btf_field_desc {
549	/* once-per-type offsets */
550	int t_off_cnt, t_offs[2];
551	/* member struct size, or zero, if no members */
552	int m_sz;
553	/* repeated per-member offsets */
554	int m_off_cnt, m_offs[1];
555};
556
557struct btf_field_iter {
558	struct btf_field_desc desc;
559	void *p;
560	int m_idx;
561	int off_idx;
562	int vlen;
563};
564
565int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind);
566__u32 *btf_field_iter_next(struct btf_field_iter *it);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567
568typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
569typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
 
 
570int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
571int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
572__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
573				 __u32 kind);
574
575/* handle direct returned errors */
576static inline int libbpf_err(int ret)
577{
578	if (ret < 0)
579		errno = -ret;
580	return ret;
581}
582
583/* handle errno-based (e.g., syscall or libc) errors according to libbpf's
584 * strict mode settings
585 */
586static inline int libbpf_err_errno(int ret)
587{
588	/* errno is already assumed to be set on error */
589	return ret < 0 ? -errno : ret;
 
 
 
 
590}
591
592/* handle error for pointer-returning APIs, err is assumed to be < 0 always */
593static inline void *libbpf_err_ptr(int err)
594{
595	/* set errno on error, this doesn't break anything */
596	errno = -err;
597	return NULL;
 
 
 
 
 
598}
599
600/* handle pointer-returning APIs' error handling */
601static inline void *libbpf_ptr(void *ret)
602{
603	/* set errno on error, this doesn't break anything */
604	if (IS_ERR(ret))
605		errno = -PTR_ERR(ret);
606
607	return IS_ERR(ret) ? NULL : ret;
608}
609
610static inline bool str_is_empty(const char *s)
611{
612	return !s || !s[0];
613}
614
615static inline bool is_ldimm64_insn(struct bpf_insn *insn)
616{
617	return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
618}
619
620static inline void bpf_insn_bswap(struct bpf_insn *insn)
621{
622	__u8 tmp_reg = insn->dst_reg;
623
624	insn->dst_reg = insn->src_reg;
625	insn->src_reg = tmp_reg;
626	insn->off = bswap_16(insn->off);
627	insn->imm = bswap_32(insn->imm);
628}
629
630/* Unconditionally dup FD, ensuring it doesn't use [0, 2] range.
631 * Original FD is not closed or altered in any other way.
632 * Preserves original FD value, if it's invalid (negative).
633 */
634static inline int dup_good_fd(int fd)
635{
636	if (fd < 0)
637		return fd;
638	return fcntl(fd, F_DUPFD_CLOEXEC, 3);
639}
640
641/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
642 * Takes ownership of the fd passed in, and closes it if calling
643 * fcntl(fd, F_DUPFD_CLOEXEC, 3).
644 */
645static inline int ensure_good_fd(int fd)
646{
647	int old_fd = fd, saved_errno;
648
649	if (fd < 0)
650		return fd;
651	if (fd < 3) {
652		fd = dup_good_fd(fd);
653		saved_errno = errno;
654		close(old_fd);
655		errno = saved_errno;
656		if (fd < 0) {
657			pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno);
658			errno = saved_errno;
659		}
660	}
661	return fd;
662}
663
664static inline int sys_dup3(int oldfd, int newfd, int flags)
665{
666	return syscall(__NR_dup3, oldfd, newfd, flags);
667}
668
669/* Point *fixed_fd* to the same file that *tmp_fd* points to.
670 * Regardless of success, *tmp_fd* is closed.
671 * Whatever *fixed_fd* pointed to is closed silently.
672 */
673static inline int reuse_fd(int fixed_fd, int tmp_fd)
674{
675	int err;
676
677	err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC);
678	err = err < 0 ? -errno : 0;
679	close(tmp_fd); /* clean up temporary FD */
680	return err;
681}
682
683/* The following two functions are exposed to bpftool */
684int bpf_core_add_cands(struct bpf_core_cand *local_cand,
685		       size_t local_essent_len,
686		       const struct btf *targ_btf,
687		       const char *targ_btf_name,
688		       int targ_start_id,
689		       struct bpf_core_cand_list *cands);
690void bpf_core_free_cands(struct bpf_core_cand_list *cands);
691
692struct usdt_manager *usdt_manager_new(struct bpf_object *obj);
693void usdt_manager_free(struct usdt_manager *man);
694struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man,
695					   const struct bpf_program *prog,
696					   pid_t pid, const char *path,
697					   const char *usdt_provider, const char *usdt_name,
698					   __u64 usdt_cookie);
699
700static inline bool is_pow_of_2(size_t x)
701{
702	return x && (x & (x - 1)) == 0;
703}
704
705#define PROG_LOAD_ATTEMPTS 5
706int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts);
707
708bool glob_match(const char *str, const char *pat);
709
710long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name);
711long elf_find_func_offset_from_file(const char *binary_path, const char *name);
712
713struct elf_fd {
714	Elf *elf;
715	int fd;
716};
717
718int elf_open(const char *binary_path, struct elf_fd *elf_fd);
719void elf_close(struct elf_fd *elf_fd);
720
721int elf_resolve_syms_offsets(const char *binary_path, int cnt,
722			     const char **syms, unsigned long **poffsets,
723			     int st_type);
724int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
725				 unsigned long **poffsets, size_t *pcnt);
726
727int probe_fd(int fd);
728
729#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
v5.14.15
  1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
  2
  3/*
  4 * Internal libbpf helpers.
  5 *
  6 * Copyright (c) 2019 Facebook
  7 */
  8
  9#ifndef __LIBBPF_LIBBPF_INTERNAL_H
 10#define __LIBBPF_LIBBPF_INTERNAL_H
 11
 12#include <stdlib.h>
 
 13#include <limits.h>
 14#include <errno.h>
 15#include <linux/err.h>
 16#include "libbpf_legacy.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 17
 18/* make sure libbpf doesn't use kernel-only integer typedefs */
 19#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
 20
 21/* prevent accidental re-addition of reallocarray() */
 22#pragma GCC poison reallocarray
 23
 24#include "libbpf.h"
 25#include "btf.h"
 26
 27#ifndef EM_BPF
 28#define EM_BPF 247
 29#endif
 30
 31#ifndef R_BPF_64_64
 32#define R_BPF_64_64 1
 33#endif
 34#ifndef R_BPF_64_ABS64
 35#define R_BPF_64_ABS64 2
 36#endif
 37#ifndef R_BPF_64_ABS32
 38#define R_BPF_64_ABS32 3
 39#endif
 40#ifndef R_BPF_64_32
 41#define R_BPF_64_32 10
 42#endif
 43
 44#ifndef SHT_LLVM_ADDRSIG
 45#define SHT_LLVM_ADDRSIG 0x6FFF4C03
 46#endif
 47
 48/* if libelf is old and doesn't support mmap(), fall back to read() */
 49#ifndef ELF_C_READ_MMAP
 50#define ELF_C_READ_MMAP ELF_C_READ
 51#endif
 52
 53/* Older libelf all end up in this expression, for both 32 and 64 bit */
 54#ifndef GELF_ST_VISIBILITY
 55#define GELF_ST_VISIBILITY(o) ((o) & 0x03)
 56#endif
 57
 58#define BTF_INFO_ENC(kind, kind_flag, vlen) \
 59	((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
 60#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
 61#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
 62	((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
 63#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
 64	BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
 65	BTF_INT_ENC(encoding, bits_offset, bits)
 66#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset)
 67#define BTF_PARAM_ENC(name, type) (name), (type)
 68#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
 69#define BTF_TYPE_FLOAT_ENC(name, sz) \
 70	BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
 
 
 
 
 71
 72#ifndef likely
 73#define likely(x) __builtin_expect(!!(x), 1)
 74#endif
 75#ifndef unlikely
 76#define unlikely(x) __builtin_expect(!!(x), 0)
 77#endif
 78#ifndef min
 79# define min(x, y) ((x) < (y) ? (x) : (y))
 80#endif
 81#ifndef max
 82# define max(x, y) ((x) < (y) ? (y) : (x))
 83#endif
 84#ifndef offsetofend
 85# define offsetofend(TYPE, FIELD) \
 86	(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
 87#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88
 89/* Symbol versioning is different between static and shared library.
 90 * Properly versioned symbols are needed for shared library, but
 91 * only the symbol of the new version is needed for static library.
 
 
 92 */
 93#ifdef SHARED
 94# define COMPAT_VERSION(internal_name, api_name, version) \
 
 
 
 
 
 
 
 
 95	asm(".symver " #internal_name "," #api_name "@" #version);
 96# define DEFAULT_VERSION(internal_name, api_name, version) \
 97	asm(".symver " #internal_name "," #api_name "@@" #version);
 98#else
 99# define COMPAT_VERSION(internal_name, api_name, version)
100# define DEFAULT_VERSION(internal_name, api_name, version) \
 
 
101	extern typeof(internal_name) api_name \
102	__attribute__((alias(#internal_name)));
 
103#endif
104
105extern void libbpf_print(enum libbpf_print_level level,
106			 const char *format, ...)
107	__attribute__((format(printf, 2, 3)));
108
109#define __pr(level, fmt, ...)	\
110do {				\
111	libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__);	\
112} while (0)
113
114#define pr_warn(fmt, ...)	__pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
115#define pr_info(fmt, ...)	__pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
116#define pr_debug(fmt, ...)	__pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
117
118#ifndef __has_builtin
119#define __has_builtin(x) 0
120#endif
 
 
 
 
 
 
 
 
 
121/*
122 * Re-implement glibc's reallocarray() for libbpf internal-only use.
123 * reallocarray(), unfortunately, is not available in all versions of glibc,
124 * so requires extra feature detection and using reallocarray() stub from
125 * <tools/libc_compat.h> and COMPAT_NEED_REALLOCARRAY. All this complicates
126 * build of libbpf unnecessarily and is just a maintenance burden. Instead,
127 * it's trivial to implement libbpf-specific internal version and use it
128 * throughout libbpf.
129 */
130static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
131{
132	size_t total;
133
134#if __has_builtin(__builtin_mul_overflow)
135	if (unlikely(__builtin_mul_overflow(nmemb, size, &total)))
136		return NULL;
137#else
138	if (size == 0 || nmemb > ULONG_MAX / size)
139		return NULL;
140	total = nmemb * size;
141#endif
142	return realloc(ptr, total);
143}
144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145struct btf;
146struct btf_type;
147
148struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id);
149const char *btf_kind_str(const struct btf_type *t);
150const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
 
 
 
151
152static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
153{
154	return (enum btf_func_linkage)(int)btf_vlen(t);
155}
156
157static inline __u32 btf_type_info(int kind, int vlen, int kflag)
158{
159	return (kflag << 31) | (kind << 24) | vlen;
160}
161
162enum map_def_parts {
163	MAP_DEF_MAP_TYPE	= 0x001,
164	MAP_DEF_KEY_TYPE	= 0x002,
165	MAP_DEF_KEY_SIZE	= 0x004,
166	MAP_DEF_VALUE_TYPE	= 0x008,
167	MAP_DEF_VALUE_SIZE	= 0x010,
168	MAP_DEF_MAX_ENTRIES	= 0x020,
169	MAP_DEF_MAP_FLAGS	= 0x040,
170	MAP_DEF_NUMA_NODE	= 0x080,
171	MAP_DEF_PINNING		= 0x100,
172	MAP_DEF_INNER_MAP	= 0x200,
 
173
174	MAP_DEF_ALL		= 0x3ff, /* combination of all above */
175};
176
177struct btf_map_def {
178	enum map_def_parts parts;
179	__u32 map_type;
180	__u32 key_type_id;
181	__u32 key_size;
182	__u32 value_type_id;
183	__u32 value_size;
184	__u32 max_entries;
185	__u32 map_flags;
186	__u32 numa_node;
187	__u32 pinning;
 
188};
189
190int parse_btf_map_def(const char *map_name, struct btf *btf,
191		      const struct btf_type *def_t, bool strict,
192		      struct btf_map_def *map_def, struct btf_map_def *inner_def);
193
194void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
195		     size_t cur_cnt, size_t max_cnt, size_t add_cnt);
196int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
197
 
 
 
 
 
 
 
 
 
 
 
198static inline bool libbpf_validate_opts(const char *opts,
199					size_t opts_sz, size_t user_sz,
200					const char *type_name)
201{
202	if (user_sz < sizeof(size_t)) {
203		pr_warn("%s size (%zu) is too small\n", type_name, user_sz);
204		return false;
205	}
206	if (user_sz > opts_sz) {
207		size_t i;
208
209		for (i = opts_sz; i < user_sz; i++) {
210			if (opts[i]) {
211				pr_warn("%s has non-zero extra bytes\n",
212					type_name);
213				return false;
214			}
215		}
216	}
217	return true;
218}
219
220#define OPTS_VALID(opts, type)						      \
221	(!(opts) || libbpf_validate_opts((const char *)opts,		      \
222					 offsetofend(struct type,	      \
223						     type##__last_field),     \
224					 (opts)->sz, #type))
225#define OPTS_HAS(opts, field) \
226	((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
227#define OPTS_GET(opts, field, fallback_value) \
228	(OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
229#define OPTS_SET(opts, field, value)		\
230	do {					\
231		if (OPTS_HAS(opts, field))	\
232			(opts)->field = value;	\
233	} while (0)
234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
236int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
237int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
238			 const char *str_sec, size_t str_len);
 
 
 
 
239
240struct bpf_prog_load_params {
241	enum bpf_prog_type prog_type;
242	enum bpf_attach_type expected_attach_type;
243	const char *name;
244	const struct bpf_insn *insns;
245	size_t insn_cnt;
246	const char *license;
247	__u32 kern_version;
248	__u32 attach_prog_fd;
249	__u32 attach_btf_obj_fd;
250	__u32 attach_btf_id;
251	__u32 prog_ifindex;
252	__u32 prog_btf_fd;
253	__u32 prog_flags;
254
255	__u32 func_info_rec_size;
256	const void *func_info;
257	__u32 func_info_cnt;
258
259	__u32 line_info_rec_size;
260	const void *line_info;
261	__u32 line_info_cnt;
262
263	__u32 log_level;
264	char *log_buf;
265	size_t log_buf_sz;
266};
267
268int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
269
270int bpf_object__section_size(const struct bpf_object *obj, const char *name,
271			     __u32 *size);
272int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
273				__u32 *off);
274struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
275void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
276				const char **prefix, int *kind);
277
278struct btf_ext_info {
279	/*
280	 * info points to the individual info section (e.g. func_info and
281	 * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
282	 */
283	void *info;
284	__u32 rec_size;
285	__u32 len;
 
 
 
 
 
 
 
286};
287
288#define for_each_btf_ext_sec(seg, sec)					\
289	for (sec = (seg)->info;						\
290	     (void *)sec < (seg)->info + (seg)->len;			\
291	     sec = (void *)sec + sizeof(struct btf_ext_info_sec) +	\
292		   (seg)->rec_size * sec->num_info)
293
294#define for_each_btf_ext_rec(seg, sec, i, rec)				\
295	for (i = 0, rec = (void *)&(sec)->data;				\
296	     i < (sec)->num_info;					\
297	     i++, rec = (void *)rec + (seg)->rec_size)
298
299/*
300 * The .BTF.ext ELF section layout defined as
301 *   struct btf_ext_header
302 *   func_info subsection
303 *
304 * The func_info subsection layout:
305 *   record size for struct bpf_func_info in the func_info subsection
306 *   struct btf_sec_func_info for section #1
307 *   a list of bpf_func_info records for section #1
308 *     where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
309 *     but may not be identical
310 *   struct btf_sec_func_info for section #2
311 *   a list of bpf_func_info records for section #2
312 *   ......
313 *
314 * Note that the bpf_func_info record size in .BTF.ext may not
315 * be the same as the one defined in include/uapi/linux/bpf.h.
316 * The loader should ensure that record_size meets minimum
317 * requirement and pass the record as is to the kernel. The
318 * kernel will handle the func_info properly based on its contents.
319 */
320struct btf_ext_header {
321	__u16	magic;
322	__u8	version;
323	__u8	flags;
324	__u32	hdr_len;
325
326	/* All offsets are in bytes relative to the end of this header */
327	__u32	func_info_off;
328	__u32	func_info_len;
329	__u32	line_info_off;
330	__u32	line_info_len;
331
332	/* optional part of .BTF.ext header */
333	__u32	core_relo_off;
334	__u32	core_relo_len;
335};
336
337struct btf_ext {
338	union {
339		struct btf_ext_header *hdr;
340		void *data;
341	};
 
 
342	struct btf_ext_info func_info;
343	struct btf_ext_info line_info;
344	struct btf_ext_info core_relo_info;
345	__u32 data_size;
346};
347
348struct btf_ext_info_sec {
349	__u32	sec_name_off;
350	__u32	num_info;
351	/* Followed by num_info * record_size number of bytes */
352	__u8	data[];
353};
354
355/* The minimum bpf_func_info checked by the loader */
356struct bpf_func_info_min {
357	__u32   insn_off;
358	__u32   type_id;
359};
360
361/* The minimum bpf_line_info checked by the loader */
362struct bpf_line_info_min {
363	__u32	insn_off;
364	__u32	file_name_off;
365	__u32	line_off;
366	__u32	line_col;
367};
368
369/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
370 * has to be adjusted by relocations.
371 */
372enum bpf_core_relo_kind {
373	BPF_FIELD_BYTE_OFFSET = 0,	/* field byte offset */
374	BPF_FIELD_BYTE_SIZE = 1,	/* field size in bytes */
375	BPF_FIELD_EXISTS = 2,		/* field existence in target kernel */
376	BPF_FIELD_SIGNED = 3,		/* field signedness (0 - unsigned, 1 - signed) */
377	BPF_FIELD_LSHIFT_U64 = 4,	/* bitfield-specific left bitshift */
378	BPF_FIELD_RSHIFT_U64 = 5,	/* bitfield-specific right bitshift */
379	BPF_TYPE_ID_LOCAL = 6,		/* type ID in local BPF object */
380	BPF_TYPE_ID_TARGET = 7,		/* type ID in target kernel */
381	BPF_TYPE_EXISTS = 8,		/* type existence in target kernel */
382	BPF_TYPE_SIZE = 9,		/* type size in bytes */
383	BPF_ENUMVAL_EXISTS = 10,	/* enum value existence in target kernel */
384	BPF_ENUMVAL_VALUE = 11,		/* enum value integer value */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385};
386
387/* The minimum bpf_core_relo checked by the loader
388 *
389 * CO-RE relocation captures the following data:
390 * - insn_off - instruction offset (in bytes) within a BPF program that needs
391 *   its insn->imm field to be relocated with actual field info;
392 * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
393 *   type or field;
394 * - access_str_off - offset into corresponding .BTF string section. String
395 *   interpretation depends on specific relocation kind:
396 *     - for field-based relocations, string encodes an accessed field using
397 *     a sequence of field and array indices, separated by colon (:). It's
398 *     conceptually very close to LLVM's getelementptr ([0]) instruction's
399 *     arguments for identifying offset to a field.
400 *     - for type-based relocations, strings is expected to be just "0";
401 *     - for enum value-based relocations, string contains an index of enum
402 *     value within its enum type;
403 *
404 * Example to provide a better feel.
405 *
406 *   struct sample {
407 *       int a;
408 *       struct {
409 *           int b[10];
410 *       };
411 *   };
412 *
413 *   struct sample *s = ...;
414 *   int x = &s->a;     // encoded as "0:0" (a is field #0)
415 *   int y = &s->b[5];  // encoded as "0:1:0:5" (anon struct is field #1, 
416 *                      // b is field #0 inside anon struct, accessing elem #5)
417 *   int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
418 *
419 * type_id for all relocs in this example  will capture BTF type id of
420 * `struct sample`.
421 *
422 * Such relocation is emitted when using __builtin_preserve_access_index()
423 * Clang built-in, passing expression that captures field address, e.g.:
424 *
425 * bpf_probe_read(&dst, sizeof(dst),
426 *		  __builtin_preserve_access_index(&src->a.b.c));
427 *
428 * In this case Clang will emit field relocation recording necessary data to
429 * be able to find offset of embedded `a.b.c` field within `src` struct.
430 *
431 *   [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
432 */
433struct bpf_core_relo {
434	__u32   insn_off;
435	__u32   type_id;
436	__u32   access_str_off;
437	enum bpf_core_relo_kind kind;
438};
439
440typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
441typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
442int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
443int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
444int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
445int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
446
447extern enum libbpf_strict_mode libbpf_mode;
448
449/* handle direct returned errors */
450static inline int libbpf_err(int ret)
451{
452	if (ret < 0)
453		errno = -ret;
454	return ret;
455}
456
457/* handle errno-based (e.g., syscall or libc) errors according to libbpf's
458 * strict mode settings
459 */
460static inline int libbpf_err_errno(int ret)
461{
462	if (libbpf_mode & LIBBPF_STRICT_DIRECT_ERRS)
463		/* errno is already assumed to be set on error */
464		return ret < 0 ? -errno : ret;
465
466	/* legacy: on error return -1 directly and don't touch errno */
467	return ret;
468}
469
470/* handle error for pointer-returning APIs, err is assumed to be < 0 always */
471static inline void *libbpf_err_ptr(int err)
472{
473	/* set errno on error, this doesn't break anything */
474	errno = -err;
475
476	if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
477		return NULL;
478
479	/* legacy: encode err as ptr */
480	return ERR_PTR(err);
481}
482
483/* handle pointer-returning APIs' error handling */
484static inline void *libbpf_ptr(void *ret)
485{
486	/* set errno on error, this doesn't break anything */
487	if (IS_ERR(ret))
488		errno = -PTR_ERR(ret);
489
490	if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
491		return IS_ERR(ret) ? NULL : ret;
 
 
 
 
 
 
 
 
 
 
492
493	/* legacy: pass-through original pointer */
494	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
496
497#endif /* __LIBBPF_LIBBPF_INTERNAL_H */