Loading...
Note: File does not exist in v3.15.
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
10 * Copyright (C) 2019 Isovalent, Inc.
11 */
12
13#ifndef _GNU_SOURCE
14#define _GNU_SOURCE
15#endif
16#include <stdlib.h>
17#include <stdio.h>
18#include <stdarg.h>
19#include <libgen.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <string.h>
23#include <unistd.h>
24#include <endian.h>
25#include <fcntl.h>
26#include <errno.h>
27#include <ctype.h>
28#include <asm/unistd.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
31#include <linux/bpf.h>
32#include <linux/btf.h>
33#include <linux/filter.h>
34#include <linux/limits.h>
35#include <linux/perf_event.h>
36#include <linux/bpf_perf_event.h>
37#include <linux/ring_buffer.h>
38#include <sys/epoll.h>
39#include <sys/ioctl.h>
40#include <sys/mman.h>
41#include <sys/stat.h>
42#include <sys/types.h>
43#include <sys/vfs.h>
44#include <sys/utsname.h>
45#include <sys/resource.h>
46#include <libelf.h>
47#include <gelf.h>
48#include <zlib.h>
49
50#include "libbpf.h"
51#include "bpf.h"
52#include "btf.h"
53#include "str_error.h"
54#include "libbpf_internal.h"
55#include "hashmap.h"
56#include "bpf_gen_internal.h"
57#include "zip.h"
58
59#ifndef BPF_FS_MAGIC
60#define BPF_FS_MAGIC 0xcafe4a11
61#endif
62
63#define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
64
65#define BPF_INSN_SZ (sizeof(struct bpf_insn))
66
67/* vsprintf() in __base_pr() uses nonliteral format string. It may break
68 * compilation if user enables corresponding warning. Disable it explicitly.
69 */
70#pragma GCC diagnostic ignored "-Wformat-nonliteral"
71
72#define __printf(a, b) __attribute__((format(printf, a, b)))
73
74static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
75static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
76static int map_set_def_max_entries(struct bpf_map *map);
77
78static const char * const attach_type_name[] = {
79 [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
80 [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress",
81 [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create",
82 [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release",
83 [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops",
84 [BPF_CGROUP_DEVICE] = "cgroup_device",
85 [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind",
86 [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind",
87 [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect",
88 [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect",
89 [BPF_CGROUP_UNIX_CONNECT] = "cgroup_unix_connect",
90 [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind",
91 [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind",
92 [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername",
93 [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername",
94 [BPF_CGROUP_UNIX_GETPEERNAME] = "cgroup_unix_getpeername",
95 [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname",
96 [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname",
97 [BPF_CGROUP_UNIX_GETSOCKNAME] = "cgroup_unix_getsockname",
98 [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg",
99 [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg",
100 [BPF_CGROUP_UNIX_SENDMSG] = "cgroup_unix_sendmsg",
101 [BPF_CGROUP_SYSCTL] = "cgroup_sysctl",
102 [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg",
103 [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg",
104 [BPF_CGROUP_UNIX_RECVMSG] = "cgroup_unix_recvmsg",
105 [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt",
106 [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt",
107 [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
108 [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
109 [BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
110 [BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
111 [BPF_LIRC_MODE2] = "lirc_mode2",
112 [BPF_FLOW_DISSECTOR] = "flow_dissector",
113 [BPF_TRACE_RAW_TP] = "trace_raw_tp",
114 [BPF_TRACE_FENTRY] = "trace_fentry",
115 [BPF_TRACE_FEXIT] = "trace_fexit",
116 [BPF_MODIFY_RETURN] = "modify_return",
117 [BPF_LSM_MAC] = "lsm_mac",
118 [BPF_LSM_CGROUP] = "lsm_cgroup",
119 [BPF_SK_LOOKUP] = "sk_lookup",
120 [BPF_TRACE_ITER] = "trace_iter",
121 [BPF_XDP_DEVMAP] = "xdp_devmap",
122 [BPF_XDP_CPUMAP] = "xdp_cpumap",
123 [BPF_XDP] = "xdp",
124 [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select",
125 [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
126 [BPF_PERF_EVENT] = "perf_event",
127 [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
128 [BPF_STRUCT_OPS] = "struct_ops",
129 [BPF_NETFILTER] = "netfilter",
130 [BPF_TCX_INGRESS] = "tcx_ingress",
131 [BPF_TCX_EGRESS] = "tcx_egress",
132 [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi",
133 [BPF_NETKIT_PRIMARY] = "netkit_primary",
134 [BPF_NETKIT_PEER] = "netkit_peer",
135};
136
137static const char * const link_type_name[] = {
138 [BPF_LINK_TYPE_UNSPEC] = "unspec",
139 [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
140 [BPF_LINK_TYPE_TRACING] = "tracing",
141 [BPF_LINK_TYPE_CGROUP] = "cgroup",
142 [BPF_LINK_TYPE_ITER] = "iter",
143 [BPF_LINK_TYPE_NETNS] = "netns",
144 [BPF_LINK_TYPE_XDP] = "xdp",
145 [BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
146 [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
147 [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
148 [BPF_LINK_TYPE_NETFILTER] = "netfilter",
149 [BPF_LINK_TYPE_TCX] = "tcx",
150 [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi",
151 [BPF_LINK_TYPE_NETKIT] = "netkit",
152};
153
154static const char * const map_type_name[] = {
155 [BPF_MAP_TYPE_UNSPEC] = "unspec",
156 [BPF_MAP_TYPE_HASH] = "hash",
157 [BPF_MAP_TYPE_ARRAY] = "array",
158 [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
159 [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
160 [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
161 [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
162 [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
163 [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
164 [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
165 [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
166 [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
167 [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
168 [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
169 [BPF_MAP_TYPE_DEVMAP] = "devmap",
170 [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
171 [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
172 [BPF_MAP_TYPE_CPUMAP] = "cpumap",
173 [BPF_MAP_TYPE_XSKMAP] = "xskmap",
174 [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
175 [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
176 [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
177 [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
178 [BPF_MAP_TYPE_QUEUE] = "queue",
179 [BPF_MAP_TYPE_STACK] = "stack",
180 [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
181 [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
182 [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
183 [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
184 [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
185 [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
186 [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
187 [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
188 [BPF_MAP_TYPE_ARENA] = "arena",
189};
190
191static const char * const prog_type_name[] = {
192 [BPF_PROG_TYPE_UNSPEC] = "unspec",
193 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
194 [BPF_PROG_TYPE_KPROBE] = "kprobe",
195 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
196 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
197 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
198 [BPF_PROG_TYPE_XDP] = "xdp",
199 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
200 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
201 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
202 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
203 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
204 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
205 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
206 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
207 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
208 [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
209 [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
210 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
211 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
212 [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
213 [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
214 [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
215 [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
216 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
217 [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
218 [BPF_PROG_TYPE_TRACING] = "tracing",
219 [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
220 [BPF_PROG_TYPE_EXT] = "ext",
221 [BPF_PROG_TYPE_LSM] = "lsm",
222 [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
223 [BPF_PROG_TYPE_SYSCALL] = "syscall",
224 [BPF_PROG_TYPE_NETFILTER] = "netfilter",
225};
226
227static int __base_pr(enum libbpf_print_level level, const char *format,
228 va_list args)
229{
230 if (level == LIBBPF_DEBUG)
231 return 0;
232
233 return vfprintf(stderr, format, args);
234}
235
236static libbpf_print_fn_t __libbpf_pr = __base_pr;
237
238libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
239{
240 libbpf_print_fn_t old_print_fn;
241
242 old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED);
243
244 return old_print_fn;
245}
246
247__printf(2, 3)
248void libbpf_print(enum libbpf_print_level level, const char *format, ...)
249{
250 va_list args;
251 int old_errno;
252 libbpf_print_fn_t print_fn;
253
254 print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED);
255 if (!print_fn)
256 return;
257
258 old_errno = errno;
259
260 va_start(args, format);
261 __libbpf_pr(level, format, args);
262 va_end(args);
263
264 errno = old_errno;
265}
266
267static void pr_perm_msg(int err)
268{
269 struct rlimit limit;
270 char buf[100];
271
272 if (err != -EPERM || geteuid() != 0)
273 return;
274
275 err = getrlimit(RLIMIT_MEMLOCK, &limit);
276 if (err)
277 return;
278
279 if (limit.rlim_cur == RLIM_INFINITY)
280 return;
281
282 if (limit.rlim_cur < 1024)
283 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
284 else if (limit.rlim_cur < 1024*1024)
285 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
286 else
287 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
288
289 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
290 buf);
291}
292
293#define STRERR_BUFSIZE 128
294
295/* Copied from tools/perf/util/util.h */
296#ifndef zfree
297# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
298#endif
299
300#ifndef zclose
301# define zclose(fd) ({ \
302 int ___err = 0; \
303 if ((fd) >= 0) \
304 ___err = close((fd)); \
305 fd = -1; \
306 ___err; })
307#endif
308
309static inline __u64 ptr_to_u64(const void *ptr)
310{
311 return (__u64) (unsigned long) ptr;
312}
313
314int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
315{
316 /* as of v1.0 libbpf_set_strict_mode() is a no-op */
317 return 0;
318}
319
320__u32 libbpf_major_version(void)
321{
322 return LIBBPF_MAJOR_VERSION;
323}
324
325__u32 libbpf_minor_version(void)
326{
327 return LIBBPF_MINOR_VERSION;
328}
329
330const char *libbpf_version_string(void)
331{
332#define __S(X) #X
333#define _S(X) __S(X)
334 return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
335#undef _S
336#undef __S
337}
338
339enum reloc_type {
340 RELO_LD64,
341 RELO_CALL,
342 RELO_DATA,
343 RELO_EXTERN_LD64,
344 RELO_EXTERN_CALL,
345 RELO_SUBPROG_ADDR,
346 RELO_CORE,
347};
348
349struct reloc_desc {
350 enum reloc_type type;
351 int insn_idx;
352 union {
353 const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
354 struct {
355 int map_idx;
356 int sym_off;
357 int ext_idx;
358 };
359 };
360};
361
362/* stored as sec_def->cookie for all libbpf-supported SEC()s */
363enum sec_def_flags {
364 SEC_NONE = 0,
365 /* expected_attach_type is optional, if kernel doesn't support that */
366 SEC_EXP_ATTACH_OPT = 1,
367 /* legacy, only used by libbpf_get_type_names() and
368 * libbpf_attach_type_by_name(), not used by libbpf itself at all.
369 * This used to be associated with cgroup (and few other) BPF programs
370 * that were attachable through BPF_PROG_ATTACH command. Pretty
371 * meaningless nowadays, though.
372 */
373 SEC_ATTACHABLE = 2,
374 SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
375 /* attachment target is specified through BTF ID in either kernel or
376 * other BPF program's BTF object
377 */
378 SEC_ATTACH_BTF = 4,
379 /* BPF program type allows sleeping/blocking in kernel */
380 SEC_SLEEPABLE = 8,
381 /* BPF program support non-linear XDP buffer */
382 SEC_XDP_FRAGS = 16,
383 /* Setup proper attach type for usdt probes. */
384 SEC_USDT = 32,
385};
386
387struct bpf_sec_def {
388 char *sec;
389 enum bpf_prog_type prog_type;
390 enum bpf_attach_type expected_attach_type;
391 long cookie;
392 int handler_id;
393
394 libbpf_prog_setup_fn_t prog_setup_fn;
395 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
396 libbpf_prog_attach_fn_t prog_attach_fn;
397};
398
399/*
400 * bpf_prog should be a better name but it has been used in
401 * linux/filter.h.
402 */
403struct bpf_program {
404 char *name;
405 char *sec_name;
406 size_t sec_idx;
407 const struct bpf_sec_def *sec_def;
408 /* this program's instruction offset (in number of instructions)
409 * within its containing ELF section
410 */
411 size_t sec_insn_off;
412 /* number of original instructions in ELF section belonging to this
413 * program, not taking into account subprogram instructions possible
414 * appended later during relocation
415 */
416 size_t sec_insn_cnt;
417 /* Offset (in number of instructions) of the start of instruction
418 * belonging to this BPF program within its containing main BPF
419 * program. For the entry-point (main) BPF program, this is always
420 * zero. For a sub-program, this gets reset before each of main BPF
421 * programs are processed and relocated and is used to determined
422 * whether sub-program was already appended to the main program, and
423 * if yes, at which instruction offset.
424 */
425 size_t sub_insn_off;
426
427 /* instructions that belong to BPF program; insns[0] is located at
428 * sec_insn_off instruction within its ELF section in ELF file, so
429 * when mapping ELF file instruction index to the local instruction,
430 * one needs to subtract sec_insn_off; and vice versa.
431 */
432 struct bpf_insn *insns;
433 /* actual number of instruction in this BPF program's image; for
434 * entry-point BPF programs this includes the size of main program
435 * itself plus all the used sub-programs, appended at the end
436 */
437 size_t insns_cnt;
438
439 struct reloc_desc *reloc_desc;
440 int nr_reloc;
441
442 /* BPF verifier log settings */
443 char *log_buf;
444 size_t log_size;
445 __u32 log_level;
446
447 struct bpf_object *obj;
448
449 int fd;
450 bool autoload;
451 bool autoattach;
452 bool sym_global;
453 bool mark_btf_static;
454 enum bpf_prog_type type;
455 enum bpf_attach_type expected_attach_type;
456 int exception_cb_idx;
457
458 int prog_ifindex;
459 __u32 attach_btf_obj_fd;
460 __u32 attach_btf_id;
461 __u32 attach_prog_fd;
462
463 void *func_info;
464 __u32 func_info_rec_size;
465 __u32 func_info_cnt;
466
467 void *line_info;
468 __u32 line_info_rec_size;
469 __u32 line_info_cnt;
470 __u32 prog_flags;
471};
472
473struct bpf_struct_ops {
474 const char *tname;
475 const struct btf_type *type;
476 struct bpf_program **progs;
477 __u32 *kern_func_off;
478 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
479 void *data;
480 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
481 * btf_vmlinux's format.
482 * struct bpf_struct_ops_tcp_congestion_ops {
483 * [... some other kernel fields ...]
484 * struct tcp_congestion_ops data;
485 * }
486 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
487 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
488 * from "data".
489 */
490 void *kern_vdata;
491 __u32 type_id;
492};
493
494#define DATA_SEC ".data"
495#define BSS_SEC ".bss"
496#define RODATA_SEC ".rodata"
497#define KCONFIG_SEC ".kconfig"
498#define KSYMS_SEC ".ksyms"
499#define STRUCT_OPS_SEC ".struct_ops"
500#define STRUCT_OPS_LINK_SEC ".struct_ops.link"
501#define ARENA_SEC ".addr_space.1"
502
503enum libbpf_map_type {
504 LIBBPF_MAP_UNSPEC,
505 LIBBPF_MAP_DATA,
506 LIBBPF_MAP_BSS,
507 LIBBPF_MAP_RODATA,
508 LIBBPF_MAP_KCONFIG,
509};
510
511struct bpf_map_def {
512 unsigned int type;
513 unsigned int key_size;
514 unsigned int value_size;
515 unsigned int max_entries;
516 unsigned int map_flags;
517};
518
519struct bpf_map {
520 struct bpf_object *obj;
521 char *name;
522 /* real_name is defined for special internal maps (.rodata*,
523 * .data*, .bss, .kconfig) and preserves their original ELF section
524 * name. This is important to be able to find corresponding BTF
525 * DATASEC information.
526 */
527 char *real_name;
528 int fd;
529 int sec_idx;
530 size_t sec_offset;
531 int map_ifindex;
532 int inner_map_fd;
533 struct bpf_map_def def;
534 __u32 numa_node;
535 __u32 btf_var_idx;
536 int mod_btf_fd;
537 __u32 btf_key_type_id;
538 __u32 btf_value_type_id;
539 __u32 btf_vmlinux_value_type_id;
540 enum libbpf_map_type libbpf_type;
541 void *mmaped;
542 struct bpf_struct_ops *st_ops;
543 struct bpf_map *inner_map;
544 void **init_slots;
545 int init_slots_sz;
546 char *pin_path;
547 bool pinned;
548 bool reused;
549 bool autocreate;
550 __u64 map_extra;
551};
552
553enum extern_type {
554 EXT_UNKNOWN,
555 EXT_KCFG,
556 EXT_KSYM,
557};
558
559enum kcfg_type {
560 KCFG_UNKNOWN,
561 KCFG_CHAR,
562 KCFG_BOOL,
563 KCFG_INT,
564 KCFG_TRISTATE,
565 KCFG_CHAR_ARR,
566};
567
568struct extern_desc {
569 enum extern_type type;
570 int sym_idx;
571 int btf_id;
572 int sec_btf_id;
573 const char *name;
574 char *essent_name;
575 bool is_set;
576 bool is_weak;
577 union {
578 struct {
579 enum kcfg_type type;
580 int sz;
581 int align;
582 int data_off;
583 bool is_signed;
584 } kcfg;
585 struct {
586 unsigned long long addr;
587
588 /* target btf_id of the corresponding kernel var. */
589 int kernel_btf_obj_fd;
590 int kernel_btf_id;
591
592 /* local btf_id of the ksym extern's type. */
593 __u32 type_id;
594 /* BTF fd index to be patched in for insn->off, this is
595 * 0 for vmlinux BTF, index in obj->fd_array for module
596 * BTF
597 */
598 __s16 btf_fd_idx;
599 } ksym;
600 };
601};
602
603struct module_btf {
604 struct btf *btf;
605 char *name;
606 __u32 id;
607 int fd;
608 int fd_array_idx;
609};
610
611enum sec_type {
612 SEC_UNUSED = 0,
613 SEC_RELO,
614 SEC_BSS,
615 SEC_DATA,
616 SEC_RODATA,
617 SEC_ST_OPS,
618};
619
620struct elf_sec_desc {
621 enum sec_type sec_type;
622 Elf64_Shdr *shdr;
623 Elf_Data *data;
624};
625
626struct elf_state {
627 int fd;
628 const void *obj_buf;
629 size_t obj_buf_sz;
630 Elf *elf;
631 Elf64_Ehdr *ehdr;
632 Elf_Data *symbols;
633 Elf_Data *arena_data;
634 size_t shstrndx; /* section index for section name strings */
635 size_t strtabidx;
636 struct elf_sec_desc *secs;
637 size_t sec_cnt;
638 int btf_maps_shndx;
639 __u32 btf_maps_sec_btf_id;
640 int text_shndx;
641 int symbols_shndx;
642 bool has_st_ops;
643 int arena_data_shndx;
644};
645
646struct usdt_manager;
647
648struct bpf_object {
649 char name[BPF_OBJ_NAME_LEN];
650 char license[64];
651 __u32 kern_version;
652
653 struct bpf_program *programs;
654 size_t nr_programs;
655 struct bpf_map *maps;
656 size_t nr_maps;
657 size_t maps_cap;
658
659 char *kconfig;
660 struct extern_desc *externs;
661 int nr_extern;
662 int kconfig_map_idx;
663
664 bool loaded;
665 bool has_subcalls;
666 bool has_rodata;
667
668 struct bpf_gen *gen_loader;
669
670 /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
671 struct elf_state efile;
672
673 struct btf *btf;
674 struct btf_ext *btf_ext;
675
676 /* Parse and load BTF vmlinux if any of the programs in the object need
677 * it at load time.
678 */
679 struct btf *btf_vmlinux;
680 /* Path to the custom BTF to be used for BPF CO-RE relocations as an
681 * override for vmlinux BTF.
682 */
683 char *btf_custom_path;
684 /* vmlinux BTF override for CO-RE relocations */
685 struct btf *btf_vmlinux_override;
686 /* Lazily initialized kernel module BTFs */
687 struct module_btf *btf_modules;
688 bool btf_modules_loaded;
689 size_t btf_module_cnt;
690 size_t btf_module_cap;
691
692 /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
693 char *log_buf;
694 size_t log_size;
695 __u32 log_level;
696
697 int *fd_array;
698 size_t fd_array_cap;
699 size_t fd_array_cnt;
700
701 struct usdt_manager *usdt_man;
702
703 struct bpf_map *arena_map;
704 void *arena_data;
705 size_t arena_data_sz;
706
707 struct kern_feature_cache *feat_cache;
708 char *token_path;
709 int token_fd;
710
711 char path[];
712};
713
714static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
715static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
716static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
717static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
718static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
719static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
720static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
721static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
722static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
723
724void bpf_program__unload(struct bpf_program *prog)
725{
726 if (!prog)
727 return;
728
729 zclose(prog->fd);
730
731 zfree(&prog->func_info);
732 zfree(&prog->line_info);
733}
734
735static void bpf_program__exit(struct bpf_program *prog)
736{
737 if (!prog)
738 return;
739
740 bpf_program__unload(prog);
741 zfree(&prog->name);
742 zfree(&prog->sec_name);
743 zfree(&prog->insns);
744 zfree(&prog->reloc_desc);
745
746 prog->nr_reloc = 0;
747 prog->insns_cnt = 0;
748 prog->sec_idx = -1;
749}
750
751static bool insn_is_subprog_call(const struct bpf_insn *insn)
752{
753 return BPF_CLASS(insn->code) == BPF_JMP &&
754 BPF_OP(insn->code) == BPF_CALL &&
755 BPF_SRC(insn->code) == BPF_K &&
756 insn->src_reg == BPF_PSEUDO_CALL &&
757 insn->dst_reg == 0 &&
758 insn->off == 0;
759}
760
761static bool is_call_insn(const struct bpf_insn *insn)
762{
763 return insn->code == (BPF_JMP | BPF_CALL);
764}
765
766static bool insn_is_pseudo_func(struct bpf_insn *insn)
767{
768 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
769}
770
771static int
772bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
773 const char *name, size_t sec_idx, const char *sec_name,
774 size_t sec_off, void *insn_data, size_t insn_data_sz)
775{
776 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
777 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
778 sec_name, name, sec_off, insn_data_sz);
779 return -EINVAL;
780 }
781
782 memset(prog, 0, sizeof(*prog));
783 prog->obj = obj;
784
785 prog->sec_idx = sec_idx;
786 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
787 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
788 /* insns_cnt can later be increased by appending used subprograms */
789 prog->insns_cnt = prog->sec_insn_cnt;
790
791 prog->type = BPF_PROG_TYPE_UNSPEC;
792 prog->fd = -1;
793 prog->exception_cb_idx = -1;
794
795 /* libbpf's convention for SEC("?abc...") is that it's just like
796 * SEC("abc...") but the corresponding bpf_program starts out with
797 * autoload set to false.
798 */
799 if (sec_name[0] == '?') {
800 prog->autoload = false;
801 /* from now on forget there was ? in section name */
802 sec_name++;
803 } else {
804 prog->autoload = true;
805 }
806
807 prog->autoattach = true;
808
809 /* inherit object's log_level */
810 prog->log_level = obj->log_level;
811
812 prog->sec_name = strdup(sec_name);
813 if (!prog->sec_name)
814 goto errout;
815
816 prog->name = strdup(name);
817 if (!prog->name)
818 goto errout;
819
820 prog->insns = malloc(insn_data_sz);
821 if (!prog->insns)
822 goto errout;
823 memcpy(prog->insns, insn_data, insn_data_sz);
824
825 return 0;
826errout:
827 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
828 bpf_program__exit(prog);
829 return -ENOMEM;
830}
831
832static int
833bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
834 const char *sec_name, int sec_idx)
835{
836 Elf_Data *symbols = obj->efile.symbols;
837 struct bpf_program *prog, *progs;
838 void *data = sec_data->d_buf;
839 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
840 int nr_progs, err, i;
841 const char *name;
842 Elf64_Sym *sym;
843
844 progs = obj->programs;
845 nr_progs = obj->nr_programs;
846 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
847
848 for (i = 0; i < nr_syms; i++) {
849 sym = elf_sym_by_idx(obj, i);
850
851 if (sym->st_shndx != sec_idx)
852 continue;
853 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
854 continue;
855
856 prog_sz = sym->st_size;
857 sec_off = sym->st_value;
858
859 name = elf_sym_str(obj, sym->st_name);
860 if (!name) {
861 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
862 sec_name, sec_off);
863 return -LIBBPF_ERRNO__FORMAT;
864 }
865
866 if (sec_off + prog_sz > sec_sz) {
867 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
868 sec_name, sec_off);
869 return -LIBBPF_ERRNO__FORMAT;
870 }
871
872 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
873 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
874 return -ENOTSUP;
875 }
876
877 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
878 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
879
880 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
881 if (!progs) {
882 /*
883 * In this case the original obj->programs
884 * is still valid, so don't need special treat for
885 * bpf_close_object().
886 */
887 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
888 sec_name, name);
889 return -ENOMEM;
890 }
891 obj->programs = progs;
892
893 prog = &progs[nr_progs];
894
895 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
896 sec_off, data + sec_off, prog_sz);
897 if (err)
898 return err;
899
900 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
901 prog->sym_global = true;
902
903 /* if function is a global/weak symbol, but has restricted
904 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
905 * as static to enable more permissive BPF verification mode
906 * with more outside context available to BPF verifier
907 */
908 if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
909 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
910 prog->mark_btf_static = true;
911
912 nr_progs++;
913 obj->nr_programs = nr_progs;
914 }
915
916 return 0;
917}
918
919static const struct btf_member *
920find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
921{
922 struct btf_member *m;
923 int i;
924
925 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
926 if (btf_member_bit_offset(t, i) == bit_offset)
927 return m;
928 }
929
930 return NULL;
931}
932
933static const struct btf_member *
934find_member_by_name(const struct btf *btf, const struct btf_type *t,
935 const char *name)
936{
937 struct btf_member *m;
938 int i;
939
940 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
941 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
942 return m;
943 }
944
945 return NULL;
946}
947
948static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
949 __u16 kind, struct btf **res_btf,
950 struct module_btf **res_mod_btf);
951
952#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
953static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
954 const char *name, __u32 kind);
955
956static int
957find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
958 struct module_btf **mod_btf,
959 const struct btf_type **type, __u32 *type_id,
960 const struct btf_type **vtype, __u32 *vtype_id,
961 const struct btf_member **data_member)
962{
963 const struct btf_type *kern_type, *kern_vtype;
964 const struct btf_member *kern_data_member;
965 struct btf *btf;
966 __s32 kern_vtype_id, kern_type_id;
967 char tname[256];
968 __u32 i;
969
970 snprintf(tname, sizeof(tname), "%.*s",
971 (int)bpf_core_essential_name_len(tname_raw), tname_raw);
972
973 kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
974 &btf, mod_btf);
975 if (kern_type_id < 0) {
976 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
977 tname);
978 return kern_type_id;
979 }
980 kern_type = btf__type_by_id(btf, kern_type_id);
981
982 /* Find the corresponding "map_value" type that will be used
983 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
984 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
985 * btf_vmlinux.
986 */
987 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
988 tname, BTF_KIND_STRUCT);
989 if (kern_vtype_id < 0) {
990 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
991 STRUCT_OPS_VALUE_PREFIX, tname);
992 return kern_vtype_id;
993 }
994 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
995
996 /* Find "struct tcp_congestion_ops" from
997 * struct bpf_struct_ops_tcp_congestion_ops {
998 * [ ... ]
999 * struct tcp_congestion_ops data;
1000 * }
1001 */
1002 kern_data_member = btf_members(kern_vtype);
1003 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
1004 if (kern_data_member->type == kern_type_id)
1005 break;
1006 }
1007 if (i == btf_vlen(kern_vtype)) {
1008 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
1009 tname, STRUCT_OPS_VALUE_PREFIX, tname);
1010 return -EINVAL;
1011 }
1012
1013 *type = kern_type;
1014 *type_id = kern_type_id;
1015 *vtype = kern_vtype;
1016 *vtype_id = kern_vtype_id;
1017 *data_member = kern_data_member;
1018
1019 return 0;
1020}
1021
1022static bool bpf_map__is_struct_ops(const struct bpf_map *map)
1023{
1024 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
1025}
1026
1027static bool is_valid_st_ops_program(struct bpf_object *obj,
1028 const struct bpf_program *prog)
1029{
1030 int i;
1031
1032 for (i = 0; i < obj->nr_programs; i++) {
1033 if (&obj->programs[i] == prog)
1034 return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
1035 }
1036
1037 return false;
1038}
1039
1040/* For each struct_ops program P, referenced from some struct_ops map M,
1041 * enable P.autoload if there are Ms for which M.autocreate is true,
1042 * disable P.autoload if for all Ms M.autocreate is false.
1043 * Don't change P.autoload for programs that are not referenced from any maps.
1044 */
1045static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
1046{
1047 struct bpf_program *prog, *slot_prog;
1048 struct bpf_map *map;
1049 int i, j, k, vlen;
1050
1051 for (i = 0; i < obj->nr_programs; ++i) {
1052 int should_load = false;
1053 int use_cnt = 0;
1054
1055 prog = &obj->programs[i];
1056 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1057 continue;
1058
1059 for (j = 0; j < obj->nr_maps; ++j) {
1060 map = &obj->maps[j];
1061 if (!bpf_map__is_struct_ops(map))
1062 continue;
1063
1064 vlen = btf_vlen(map->st_ops->type);
1065 for (k = 0; k < vlen; ++k) {
1066 slot_prog = map->st_ops->progs[k];
1067 if (prog != slot_prog)
1068 continue;
1069
1070 use_cnt++;
1071 if (map->autocreate)
1072 should_load = true;
1073 }
1074 }
1075 if (use_cnt)
1076 prog->autoload = should_load;
1077 }
1078
1079 return 0;
1080}
1081
1082/* Init the map's fields that depend on kern_btf */
1083static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
1084{
1085 const struct btf_member *member, *kern_member, *kern_data_member;
1086 const struct btf_type *type, *kern_type, *kern_vtype;
1087 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
1088 struct bpf_object *obj = map->obj;
1089 const struct btf *btf = obj->btf;
1090 struct bpf_struct_ops *st_ops;
1091 const struct btf *kern_btf;
1092 struct module_btf *mod_btf;
1093 void *data, *kern_data;
1094 const char *tname;
1095 int err;
1096
1097 st_ops = map->st_ops;
1098 type = st_ops->type;
1099 tname = st_ops->tname;
1100 err = find_struct_ops_kern_types(obj, tname, &mod_btf,
1101 &kern_type, &kern_type_id,
1102 &kern_vtype, &kern_vtype_id,
1103 &kern_data_member);
1104 if (err)
1105 return err;
1106
1107 kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
1108
1109 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
1110 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
1111
1112 map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
1113 map->def.value_size = kern_vtype->size;
1114 map->btf_vmlinux_value_type_id = kern_vtype_id;
1115
1116 st_ops->kern_vdata = calloc(1, kern_vtype->size);
1117 if (!st_ops->kern_vdata)
1118 return -ENOMEM;
1119
1120 data = st_ops->data;
1121 kern_data_off = kern_data_member->offset / 8;
1122 kern_data = st_ops->kern_vdata + kern_data_off;
1123
1124 member = btf_members(type);
1125 for (i = 0; i < btf_vlen(type); i++, member++) {
1126 const struct btf_type *mtype, *kern_mtype;
1127 __u32 mtype_id, kern_mtype_id;
1128 void *mdata, *kern_mdata;
1129 __s64 msize, kern_msize;
1130 __u32 moff, kern_moff;
1131 __u32 kern_member_idx;
1132 const char *mname;
1133
1134 mname = btf__name_by_offset(btf, member->name_off);
1135 kern_member = find_member_by_name(kern_btf, kern_type, mname);
1136 if (!kern_member) {
1137 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
1138 map->name, mname);
1139 return -ENOTSUP;
1140 }
1141
1142 kern_member_idx = kern_member - btf_members(kern_type);
1143 if (btf_member_bitfield_size(type, i) ||
1144 btf_member_bitfield_size(kern_type, kern_member_idx)) {
1145 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
1146 map->name, mname);
1147 return -ENOTSUP;
1148 }
1149
1150 moff = member->offset / 8;
1151 kern_moff = kern_member->offset / 8;
1152
1153 mdata = data + moff;
1154 kern_mdata = kern_data + kern_moff;
1155
1156 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1157 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1158 &kern_mtype_id);
1159 if (BTF_INFO_KIND(mtype->info) !=
1160 BTF_INFO_KIND(kern_mtype->info)) {
1161 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1162 map->name, mname, BTF_INFO_KIND(mtype->info),
1163 BTF_INFO_KIND(kern_mtype->info));
1164 return -ENOTSUP;
1165 }
1166
1167 if (btf_is_ptr(mtype)) {
1168 struct bpf_program *prog;
1169
1170 /* Update the value from the shadow type */
1171 prog = *(void **)mdata;
1172 st_ops->progs[i] = prog;
1173 if (!prog)
1174 continue;
1175 if (!is_valid_st_ops_program(obj, prog)) {
1176 pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
1177 map->name, mname);
1178 return -ENOTSUP;
1179 }
1180
1181 kern_mtype = skip_mods_and_typedefs(kern_btf,
1182 kern_mtype->type,
1183 &kern_mtype_id);
1184
1185 /* mtype->type must be a func_proto which was
1186 * guaranteed in bpf_object__collect_st_ops_relos(),
1187 * so only check kern_mtype for func_proto here.
1188 */
1189 if (!btf_is_func_proto(kern_mtype)) {
1190 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1191 map->name, mname);
1192 return -ENOTSUP;
1193 }
1194
1195 if (mod_btf)
1196 prog->attach_btf_obj_fd = mod_btf->fd;
1197
1198 /* if we haven't yet processed this BPF program, record proper
1199 * attach_btf_id and member_idx
1200 */
1201 if (!prog->attach_btf_id) {
1202 prog->attach_btf_id = kern_type_id;
1203 prog->expected_attach_type = kern_member_idx;
1204 }
1205
1206 /* struct_ops BPF prog can be re-used between multiple
1207 * .struct_ops & .struct_ops.link as long as it's the
1208 * same struct_ops struct definition and the same
1209 * function pointer field
1210 */
1211 if (prog->attach_btf_id != kern_type_id) {
1212 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
1213 map->name, mname, prog->name, prog->sec_name, prog->type,
1214 prog->attach_btf_id, kern_type_id);
1215 return -EINVAL;
1216 }
1217 if (prog->expected_attach_type != kern_member_idx) {
1218 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
1219 map->name, mname, prog->name, prog->sec_name, prog->type,
1220 prog->expected_attach_type, kern_member_idx);
1221 return -EINVAL;
1222 }
1223
1224 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1225
1226 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1227 map->name, mname, prog->name, moff,
1228 kern_moff);
1229
1230 continue;
1231 }
1232
1233 msize = btf__resolve_size(btf, mtype_id);
1234 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1235 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
1236 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1237 map->name, mname, (ssize_t)msize,
1238 (ssize_t)kern_msize);
1239 return -ENOTSUP;
1240 }
1241
1242 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1243 map->name, mname, (unsigned int)msize,
1244 moff, kern_moff);
1245 memcpy(kern_mdata, mdata, msize);
1246 }
1247
1248 return 0;
1249}
1250
1251static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1252{
1253 struct bpf_map *map;
1254 size_t i;
1255 int err;
1256
1257 for (i = 0; i < obj->nr_maps; i++) {
1258 map = &obj->maps[i];
1259
1260 if (!bpf_map__is_struct_ops(map))
1261 continue;
1262
1263 if (!map->autocreate)
1264 continue;
1265
1266 err = bpf_map__init_kern_struct_ops(map);
1267 if (err)
1268 return err;
1269 }
1270
1271 return 0;
1272}
1273
1274static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
1275 int shndx, Elf_Data *data)
1276{
1277 const struct btf_type *type, *datasec;
1278 const struct btf_var_secinfo *vsi;
1279 struct bpf_struct_ops *st_ops;
1280 const char *tname, *var_name;
1281 __s32 type_id, datasec_id;
1282 const struct btf *btf;
1283 struct bpf_map *map;
1284 __u32 i;
1285
1286 if (shndx == -1)
1287 return 0;
1288
1289 btf = obj->btf;
1290 datasec_id = btf__find_by_name_kind(btf, sec_name,
1291 BTF_KIND_DATASEC);
1292 if (datasec_id < 0) {
1293 pr_warn("struct_ops init: DATASEC %s not found\n",
1294 sec_name);
1295 return -EINVAL;
1296 }
1297
1298 datasec = btf__type_by_id(btf, datasec_id);
1299 vsi = btf_var_secinfos(datasec);
1300 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1301 type = btf__type_by_id(obj->btf, vsi->type);
1302 var_name = btf__name_by_offset(obj->btf, type->name_off);
1303
1304 type_id = btf__resolve_type(obj->btf, vsi->type);
1305 if (type_id < 0) {
1306 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1307 vsi->type, sec_name);
1308 return -EINVAL;
1309 }
1310
1311 type = btf__type_by_id(obj->btf, type_id);
1312 tname = btf__name_by_offset(obj->btf, type->name_off);
1313 if (!tname[0]) {
1314 pr_warn("struct_ops init: anonymous type is not supported\n");
1315 return -ENOTSUP;
1316 }
1317 if (!btf_is_struct(type)) {
1318 pr_warn("struct_ops init: %s is not a struct\n", tname);
1319 return -EINVAL;
1320 }
1321
1322 map = bpf_object__add_map(obj);
1323 if (IS_ERR(map))
1324 return PTR_ERR(map);
1325
1326 map->sec_idx = shndx;
1327 map->sec_offset = vsi->offset;
1328 map->name = strdup(var_name);
1329 if (!map->name)
1330 return -ENOMEM;
1331 map->btf_value_type_id = type_id;
1332
1333 /* Follow same convention as for programs autoload:
1334 * SEC("?.struct_ops") means map is not created by default.
1335 */
1336 if (sec_name[0] == '?') {
1337 map->autocreate = false;
1338 /* from now on forget there was ? in section name */
1339 sec_name++;
1340 }
1341
1342 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1343 map->def.key_size = sizeof(int);
1344 map->def.value_size = type->size;
1345 map->def.max_entries = 1;
1346 map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
1347
1348 map->st_ops = calloc(1, sizeof(*map->st_ops));
1349 if (!map->st_ops)
1350 return -ENOMEM;
1351 st_ops = map->st_ops;
1352 st_ops->data = malloc(type->size);
1353 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1354 st_ops->kern_func_off = malloc(btf_vlen(type) *
1355 sizeof(*st_ops->kern_func_off));
1356 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1357 return -ENOMEM;
1358
1359 if (vsi->offset + type->size > data->d_size) {
1360 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1361 var_name, sec_name);
1362 return -EINVAL;
1363 }
1364
1365 memcpy(st_ops->data,
1366 data->d_buf + vsi->offset,
1367 type->size);
1368 st_ops->tname = tname;
1369 st_ops->type = type;
1370 st_ops->type_id = type_id;
1371
1372 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1373 tname, type_id, var_name, vsi->offset);
1374 }
1375
1376 return 0;
1377}
1378
1379static int bpf_object_init_struct_ops(struct bpf_object *obj)
1380{
1381 const char *sec_name;
1382 int sec_idx, err;
1383
1384 for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
1385 struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
1386
1387 if (desc->sec_type != SEC_ST_OPS)
1388 continue;
1389
1390 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1391 if (!sec_name)
1392 return -LIBBPF_ERRNO__FORMAT;
1393
1394 err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
1395 if (err)
1396 return err;
1397 }
1398
1399 return 0;
1400}
1401
1402static struct bpf_object *bpf_object__new(const char *path,
1403 const void *obj_buf,
1404 size_t obj_buf_sz,
1405 const char *obj_name)
1406{
1407 struct bpf_object *obj;
1408 char *end;
1409
1410 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1411 if (!obj) {
1412 pr_warn("alloc memory failed for %s\n", path);
1413 return ERR_PTR(-ENOMEM);
1414 }
1415
1416 strcpy(obj->path, path);
1417 if (obj_name) {
1418 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1419 } else {
1420 /* Using basename() GNU version which doesn't modify arg. */
1421 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1422 end = strchr(obj->name, '.');
1423 if (end)
1424 *end = 0;
1425 }
1426
1427 obj->efile.fd = -1;
1428 /*
1429 * Caller of this function should also call
1430 * bpf_object__elf_finish() after data collection to return
1431 * obj_buf to user. If not, we should duplicate the buffer to
1432 * avoid user freeing them before elf finish.
1433 */
1434 obj->efile.obj_buf = obj_buf;
1435 obj->efile.obj_buf_sz = obj_buf_sz;
1436 obj->efile.btf_maps_shndx = -1;
1437 obj->kconfig_map_idx = -1;
1438
1439 obj->kern_version = get_kernel_version();
1440 obj->loaded = false;
1441
1442 return obj;
1443}
1444
1445static void bpf_object__elf_finish(struct bpf_object *obj)
1446{
1447 if (!obj->efile.elf)
1448 return;
1449
1450 elf_end(obj->efile.elf);
1451 obj->efile.elf = NULL;
1452 obj->efile.symbols = NULL;
1453 obj->efile.arena_data = NULL;
1454
1455 zfree(&obj->efile.secs);
1456 obj->efile.sec_cnt = 0;
1457 zclose(obj->efile.fd);
1458 obj->efile.obj_buf = NULL;
1459 obj->efile.obj_buf_sz = 0;
1460}
1461
1462static int bpf_object__elf_init(struct bpf_object *obj)
1463{
1464 Elf64_Ehdr *ehdr;
1465 int err = 0;
1466 Elf *elf;
1467
1468 if (obj->efile.elf) {
1469 pr_warn("elf: init internal error\n");
1470 return -LIBBPF_ERRNO__LIBELF;
1471 }
1472
1473 if (obj->efile.obj_buf_sz > 0) {
1474 /* obj_buf should have been validated by bpf_object__open_mem(). */
1475 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1476 } else {
1477 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1478 if (obj->efile.fd < 0) {
1479 char errmsg[STRERR_BUFSIZE], *cp;
1480
1481 err = -errno;
1482 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1483 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1484 return err;
1485 }
1486
1487 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1488 }
1489
1490 if (!elf) {
1491 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1492 err = -LIBBPF_ERRNO__LIBELF;
1493 goto errout;
1494 }
1495
1496 obj->efile.elf = elf;
1497
1498 if (elf_kind(elf) != ELF_K_ELF) {
1499 err = -LIBBPF_ERRNO__FORMAT;
1500 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1501 goto errout;
1502 }
1503
1504 if (gelf_getclass(elf) != ELFCLASS64) {
1505 err = -LIBBPF_ERRNO__FORMAT;
1506 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1507 goto errout;
1508 }
1509
1510 obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1511 if (!obj->efile.ehdr) {
1512 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1513 err = -LIBBPF_ERRNO__FORMAT;
1514 goto errout;
1515 }
1516
1517 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1518 pr_warn("elf: failed to get section names section index for %s: %s\n",
1519 obj->path, elf_errmsg(-1));
1520 err = -LIBBPF_ERRNO__FORMAT;
1521 goto errout;
1522 }
1523
1524 /* ELF is corrupted/truncated, avoid calling elf_strptr. */
1525 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1526 pr_warn("elf: failed to get section names strings from %s: %s\n",
1527 obj->path, elf_errmsg(-1));
1528 err = -LIBBPF_ERRNO__FORMAT;
1529 goto errout;
1530 }
1531
1532 /* Old LLVM set e_machine to EM_NONE */
1533 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1534 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1535 err = -LIBBPF_ERRNO__FORMAT;
1536 goto errout;
1537 }
1538
1539 return 0;
1540errout:
1541 bpf_object__elf_finish(obj);
1542 return err;
1543}
1544
1545static int bpf_object__check_endianness(struct bpf_object *obj)
1546{
1547#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1548 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1549 return 0;
1550#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1551 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1552 return 0;
1553#else
1554# error "Unrecognized __BYTE_ORDER__"
1555#endif
1556 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1557 return -LIBBPF_ERRNO__ENDIAN;
1558}
1559
1560static int
1561bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1562{
1563 if (!data) {
1564 pr_warn("invalid license section in %s\n", obj->path);
1565 return -LIBBPF_ERRNO__FORMAT;
1566 }
1567 /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1568 * go over allowed ELF data section buffer
1569 */
1570 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1571 pr_debug("license of %s is %s\n", obj->path, obj->license);
1572 return 0;
1573}
1574
1575static int
1576bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1577{
1578 __u32 kver;
1579
1580 if (!data || size != sizeof(kver)) {
1581 pr_warn("invalid kver section in %s\n", obj->path);
1582 return -LIBBPF_ERRNO__FORMAT;
1583 }
1584 memcpy(&kver, data, sizeof(kver));
1585 obj->kern_version = kver;
1586 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1587 return 0;
1588}
1589
1590static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1591{
1592 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1593 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1594 return true;
1595 return false;
1596}
1597
1598static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1599{
1600 Elf_Data *data;
1601 Elf_Scn *scn;
1602
1603 if (!name)
1604 return -EINVAL;
1605
1606 scn = elf_sec_by_name(obj, name);
1607 data = elf_sec_data(obj, scn);
1608 if (data) {
1609 *size = data->d_size;
1610 return 0; /* found it */
1611 }
1612
1613 return -ENOENT;
1614}
1615
1616static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
1617{
1618 Elf_Data *symbols = obj->efile.symbols;
1619 const char *sname;
1620 size_t si;
1621
1622 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1623 Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1624
1625 if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1626 continue;
1627
1628 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1629 ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1630 continue;
1631
1632 sname = elf_sym_str(obj, sym->st_name);
1633 if (!sname) {
1634 pr_warn("failed to get sym name string for var %s\n", name);
1635 return ERR_PTR(-EIO);
1636 }
1637 if (strcmp(name, sname) == 0)
1638 return sym;
1639 }
1640
1641 return ERR_PTR(-ENOENT);
1642}
1643
1644/* Some versions of Android don't provide memfd_create() in their libc
1645 * implementation, so avoid complications and just go straight to Linux
1646 * syscall.
1647 */
1648static int sys_memfd_create(const char *name, unsigned flags)
1649{
1650 return syscall(__NR_memfd_create, name, flags);
1651}
1652
1653#ifndef MFD_CLOEXEC
1654#define MFD_CLOEXEC 0x0001U
1655#endif
1656
1657static int create_placeholder_fd(void)
1658{
1659 int fd;
1660
1661 fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
1662 if (fd < 0)
1663 return -errno;
1664 return fd;
1665}
1666
1667static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1668{
1669 struct bpf_map *map;
1670 int err;
1671
1672 err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1673 sizeof(*obj->maps), obj->nr_maps + 1);
1674 if (err)
1675 return ERR_PTR(err);
1676
1677 map = &obj->maps[obj->nr_maps++];
1678 map->obj = obj;
1679 /* Preallocate map FD without actually creating BPF map just yet.
1680 * These map FD "placeholders" will be reused later without changing
1681 * FD value when map is actually created in the kernel.
1682 *
1683 * This is useful to be able to perform BPF program relocations
1684 * without having to create BPF maps before that step. This allows us
1685 * to finalize and load BTF very late in BPF object's loading phase,
1686 * right before BPF maps have to be created and BPF programs have to
1687 * be loaded. By having these map FD placeholders we can perform all
1688 * the sanitizations, relocations, and any other adjustments before we
1689 * start creating actual BPF kernel objects (BTF, maps, progs).
1690 */
1691 map->fd = create_placeholder_fd();
1692 if (map->fd < 0)
1693 return ERR_PTR(map->fd);
1694 map->inner_map_fd = -1;
1695 map->autocreate = true;
1696
1697 return map;
1698}
1699
1700static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
1701{
1702 const long page_sz = sysconf(_SC_PAGE_SIZE);
1703 size_t map_sz;
1704
1705 map_sz = (size_t)roundup(value_sz, 8) * max_entries;
1706 map_sz = roundup(map_sz, page_sz);
1707 return map_sz;
1708}
1709
1710static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1711{
1712 const long page_sz = sysconf(_SC_PAGE_SIZE);
1713
1714 switch (map->def.type) {
1715 case BPF_MAP_TYPE_ARRAY:
1716 return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
1717 case BPF_MAP_TYPE_ARENA:
1718 return page_sz * map->def.max_entries;
1719 default:
1720 return 0; /* not supported */
1721 }
1722}
1723
1724static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
1725{
1726 void *mmaped;
1727
1728 if (!map->mmaped)
1729 return -EINVAL;
1730
1731 if (old_sz == new_sz)
1732 return 0;
1733
1734 mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1735 if (mmaped == MAP_FAILED)
1736 return -errno;
1737
1738 memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
1739 munmap(map->mmaped, old_sz);
1740 map->mmaped = mmaped;
1741 return 0;
1742}
1743
1744static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1745{
1746 char map_name[BPF_OBJ_NAME_LEN], *p;
1747 int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1748
1749 /* This is one of the more confusing parts of libbpf for various
1750 * reasons, some of which are historical. The original idea for naming
1751 * internal names was to include as much of BPF object name prefix as
1752 * possible, so that it can be distinguished from similar internal
1753 * maps of a different BPF object.
1754 * As an example, let's say we have bpf_object named 'my_object_name'
1755 * and internal map corresponding to '.rodata' ELF section. The final
1756 * map name advertised to user and to the kernel will be
1757 * 'my_objec.rodata', taking first 8 characters of object name and
1758 * entire 7 characters of '.rodata'.
1759 * Somewhat confusingly, if internal map ELF section name is shorter
1760 * than 7 characters, e.g., '.bss', we still reserve 7 characters
1761 * for the suffix, even though we only have 4 actual characters, and
1762 * resulting map will be called 'my_objec.bss', not even using all 15
1763 * characters allowed by the kernel. Oh well, at least the truncated
1764 * object name is somewhat consistent in this case. But if the map
1765 * name is '.kconfig', we'll still have entirety of '.kconfig' added
1766 * (8 chars) and thus will be left with only first 7 characters of the
1767 * object name ('my_obje'). Happy guessing, user, that the final map
1768 * name will be "my_obje.kconfig".
1769 * Now, with libbpf starting to support arbitrarily named .rodata.*
1770 * and .data.* data sections, it's possible that ELF section name is
1771 * longer than allowed 15 chars, so we now need to be careful to take
1772 * only up to 15 first characters of ELF name, taking no BPF object
1773 * name characters at all. So '.rodata.abracadabra' will result in
1774 * '.rodata.abracad' kernel and user-visible name.
1775 * We need to keep this convoluted logic intact for .data, .bss and
1776 * .rodata maps, but for new custom .data.custom and .rodata.custom
1777 * maps we use their ELF names as is, not prepending bpf_object name
1778 * in front. We still need to truncate them to 15 characters for the
1779 * kernel. Full name can be recovered for such maps by using DATASEC
1780 * BTF type associated with such map's value type, though.
1781 */
1782 if (sfx_len >= BPF_OBJ_NAME_LEN)
1783 sfx_len = BPF_OBJ_NAME_LEN - 1;
1784
1785 /* if there are two or more dots in map name, it's a custom dot map */
1786 if (strchr(real_name + 1, '.') != NULL)
1787 pfx_len = 0;
1788 else
1789 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1790
1791 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1792 sfx_len, real_name);
1793
1794 /* sanitise map name to characters allowed by kernel */
1795 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1796 if (!isalnum(*p) && *p != '_' && *p != '.')
1797 *p = '_';
1798
1799 return strdup(map_name);
1800}
1801
1802static int
1803map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
1804
1805/* Internal BPF map is mmap()'able only if at least one of corresponding
1806 * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
1807 * variable and it's not marked as __hidden (which turns it into, effectively,
1808 * a STATIC variable).
1809 */
1810static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
1811{
1812 const struct btf_type *t, *vt;
1813 struct btf_var_secinfo *vsi;
1814 int i, n;
1815
1816 if (!map->btf_value_type_id)
1817 return false;
1818
1819 t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1820 if (!btf_is_datasec(t))
1821 return false;
1822
1823 vsi = btf_var_secinfos(t);
1824 for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
1825 vt = btf__type_by_id(obj->btf, vsi->type);
1826 if (!btf_is_var(vt))
1827 continue;
1828
1829 if (btf_var(vt)->linkage != BTF_VAR_STATIC)
1830 return true;
1831 }
1832
1833 return false;
1834}
1835
1836static int
1837bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1838 const char *real_name, int sec_idx, void *data, size_t data_sz)
1839{
1840 struct bpf_map_def *def;
1841 struct bpf_map *map;
1842 size_t mmap_sz;
1843 int err;
1844
1845 map = bpf_object__add_map(obj);
1846 if (IS_ERR(map))
1847 return PTR_ERR(map);
1848
1849 map->libbpf_type = type;
1850 map->sec_idx = sec_idx;
1851 map->sec_offset = 0;
1852 map->real_name = strdup(real_name);
1853 map->name = internal_map_name(obj, real_name);
1854 if (!map->real_name || !map->name) {
1855 zfree(&map->real_name);
1856 zfree(&map->name);
1857 return -ENOMEM;
1858 }
1859
1860 def = &map->def;
1861 def->type = BPF_MAP_TYPE_ARRAY;
1862 def->key_size = sizeof(int);
1863 def->value_size = data_sz;
1864 def->max_entries = 1;
1865 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1866 ? BPF_F_RDONLY_PROG : 0;
1867
1868 /* failures are fine because of maps like .rodata.str1.1 */
1869 (void) map_fill_btf_type_info(obj, map);
1870
1871 if (map_is_mmapable(obj, map))
1872 def->map_flags |= BPF_F_MMAPABLE;
1873
1874 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1875 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1876
1877 mmap_sz = bpf_map_mmap_sz(map);
1878 map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1879 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1880 if (map->mmaped == MAP_FAILED) {
1881 err = -errno;
1882 map->mmaped = NULL;
1883 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1884 map->name, err);
1885 zfree(&map->real_name);
1886 zfree(&map->name);
1887 return err;
1888 }
1889
1890 if (data)
1891 memcpy(map->mmaped, data, data_sz);
1892
1893 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1894 return 0;
1895}
1896
1897static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1898{
1899 struct elf_sec_desc *sec_desc;
1900 const char *sec_name;
1901 int err = 0, sec_idx;
1902
1903 /*
1904 * Populate obj->maps with libbpf internal maps.
1905 */
1906 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1907 sec_desc = &obj->efile.secs[sec_idx];
1908
1909 /* Skip recognized sections with size 0. */
1910 if (!sec_desc->data || sec_desc->data->d_size == 0)
1911 continue;
1912
1913 switch (sec_desc->sec_type) {
1914 case SEC_DATA:
1915 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1916 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1917 sec_name, sec_idx,
1918 sec_desc->data->d_buf,
1919 sec_desc->data->d_size);
1920 break;
1921 case SEC_RODATA:
1922 obj->has_rodata = true;
1923 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1924 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1925 sec_name, sec_idx,
1926 sec_desc->data->d_buf,
1927 sec_desc->data->d_size);
1928 break;
1929 case SEC_BSS:
1930 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1931 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1932 sec_name, sec_idx,
1933 NULL,
1934 sec_desc->data->d_size);
1935 break;
1936 default:
1937 /* skip */
1938 break;
1939 }
1940 if (err)
1941 return err;
1942 }
1943 return 0;
1944}
1945
1946
1947static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1948 const void *name)
1949{
1950 int i;
1951
1952 for (i = 0; i < obj->nr_extern; i++) {
1953 if (strcmp(obj->externs[i].name, name) == 0)
1954 return &obj->externs[i];
1955 }
1956 return NULL;
1957}
1958
1959static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1960 char value)
1961{
1962 switch (ext->kcfg.type) {
1963 case KCFG_BOOL:
1964 if (value == 'm') {
1965 pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
1966 ext->name, value);
1967 return -EINVAL;
1968 }
1969 *(bool *)ext_val = value == 'y' ? true : false;
1970 break;
1971 case KCFG_TRISTATE:
1972 if (value == 'y')
1973 *(enum libbpf_tristate *)ext_val = TRI_YES;
1974 else if (value == 'm')
1975 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1976 else /* value == 'n' */
1977 *(enum libbpf_tristate *)ext_val = TRI_NO;
1978 break;
1979 case KCFG_CHAR:
1980 *(char *)ext_val = value;
1981 break;
1982 case KCFG_UNKNOWN:
1983 case KCFG_INT:
1984 case KCFG_CHAR_ARR:
1985 default:
1986 pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
1987 ext->name, value);
1988 return -EINVAL;
1989 }
1990 ext->is_set = true;
1991 return 0;
1992}
1993
1994static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1995 const char *value)
1996{
1997 size_t len;
1998
1999 if (ext->kcfg.type != KCFG_CHAR_ARR) {
2000 pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
2001 ext->name, value);
2002 return -EINVAL;
2003 }
2004
2005 len = strlen(value);
2006 if (value[len - 1] != '"') {
2007 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
2008 ext->name, value);
2009 return -EINVAL;
2010 }
2011
2012 /* strip quotes */
2013 len -= 2;
2014 if (len >= ext->kcfg.sz) {
2015 pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
2016 ext->name, value, len, ext->kcfg.sz - 1);
2017 len = ext->kcfg.sz - 1;
2018 }
2019 memcpy(ext_val, value + 1, len);
2020 ext_val[len] = '\0';
2021 ext->is_set = true;
2022 return 0;
2023}
2024
2025static int parse_u64(const char *value, __u64 *res)
2026{
2027 char *value_end;
2028 int err;
2029
2030 errno = 0;
2031 *res = strtoull(value, &value_end, 0);
2032 if (errno) {
2033 err = -errno;
2034 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
2035 return err;
2036 }
2037 if (*value_end) {
2038 pr_warn("failed to parse '%s' as integer completely\n", value);
2039 return -EINVAL;
2040 }
2041 return 0;
2042}
2043
2044static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
2045{
2046 int bit_sz = ext->kcfg.sz * 8;
2047
2048 if (ext->kcfg.sz == 8)
2049 return true;
2050
2051 /* Validate that value stored in u64 fits in integer of `ext->sz`
2052 * bytes size without any loss of information. If the target integer
2053 * is signed, we rely on the following limits of integer type of
2054 * Y bits and subsequent transformation:
2055 *
2056 * -2^(Y-1) <= X <= 2^(Y-1) - 1
2057 * 0 <= X + 2^(Y-1) <= 2^Y - 1
2058 * 0 <= X + 2^(Y-1) < 2^Y
2059 *
2060 * For unsigned target integer, check that all the (64 - Y) bits are
2061 * zero.
2062 */
2063 if (ext->kcfg.is_signed)
2064 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
2065 else
2066 return (v >> bit_sz) == 0;
2067}
2068
2069static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
2070 __u64 value)
2071{
2072 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
2073 ext->kcfg.type != KCFG_BOOL) {
2074 pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
2075 ext->name, (unsigned long long)value);
2076 return -EINVAL;
2077 }
2078 if (ext->kcfg.type == KCFG_BOOL && value > 1) {
2079 pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
2080 ext->name, (unsigned long long)value);
2081 return -EINVAL;
2082
2083 }
2084 if (!is_kcfg_value_in_range(ext, value)) {
2085 pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
2086 ext->name, (unsigned long long)value, ext->kcfg.sz);
2087 return -ERANGE;
2088 }
2089 switch (ext->kcfg.sz) {
2090 case 1:
2091 *(__u8 *)ext_val = value;
2092 break;
2093 case 2:
2094 *(__u16 *)ext_val = value;
2095 break;
2096 case 4:
2097 *(__u32 *)ext_val = value;
2098 break;
2099 case 8:
2100 *(__u64 *)ext_val = value;
2101 break;
2102 default:
2103 return -EINVAL;
2104 }
2105 ext->is_set = true;
2106 return 0;
2107}
2108
2109static int bpf_object__process_kconfig_line(struct bpf_object *obj,
2110 char *buf, void *data)
2111{
2112 struct extern_desc *ext;
2113 char *sep, *value;
2114 int len, err = 0;
2115 void *ext_val;
2116 __u64 num;
2117
2118 if (!str_has_pfx(buf, "CONFIG_"))
2119 return 0;
2120
2121 sep = strchr(buf, '=');
2122 if (!sep) {
2123 pr_warn("failed to parse '%s': no separator\n", buf);
2124 return -EINVAL;
2125 }
2126
2127 /* Trim ending '\n' */
2128 len = strlen(buf);
2129 if (buf[len - 1] == '\n')
2130 buf[len - 1] = '\0';
2131 /* Split on '=' and ensure that a value is present. */
2132 *sep = '\0';
2133 if (!sep[1]) {
2134 *sep = '=';
2135 pr_warn("failed to parse '%s': no value\n", buf);
2136 return -EINVAL;
2137 }
2138
2139 ext = find_extern_by_name(obj, buf);
2140 if (!ext || ext->is_set)
2141 return 0;
2142
2143 ext_val = data + ext->kcfg.data_off;
2144 value = sep + 1;
2145
2146 switch (*value) {
2147 case 'y': case 'n': case 'm':
2148 err = set_kcfg_value_tri(ext, ext_val, *value);
2149 break;
2150 case '"':
2151 err = set_kcfg_value_str(ext, ext_val, value);
2152 break;
2153 default:
2154 /* assume integer */
2155 err = parse_u64(value, &num);
2156 if (err) {
2157 pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
2158 return err;
2159 }
2160 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
2161 pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
2162 return -EINVAL;
2163 }
2164 err = set_kcfg_value_num(ext, ext_val, num);
2165 break;
2166 }
2167 if (err)
2168 return err;
2169 pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
2170 return 0;
2171}
2172
2173static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
2174{
2175 char buf[PATH_MAX];
2176 struct utsname uts;
2177 int len, err = 0;
2178 gzFile file;
2179
2180 uname(&uts);
2181 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
2182 if (len < 0)
2183 return -EINVAL;
2184 else if (len >= PATH_MAX)
2185 return -ENAMETOOLONG;
2186
2187 /* gzopen also accepts uncompressed files. */
2188 file = gzopen(buf, "re");
2189 if (!file)
2190 file = gzopen("/proc/config.gz", "re");
2191
2192 if (!file) {
2193 pr_warn("failed to open system Kconfig\n");
2194 return -ENOENT;
2195 }
2196
2197 while (gzgets(file, buf, sizeof(buf))) {
2198 err = bpf_object__process_kconfig_line(obj, buf, data);
2199 if (err) {
2200 pr_warn("error parsing system Kconfig line '%s': %d\n",
2201 buf, err);
2202 goto out;
2203 }
2204 }
2205
2206out:
2207 gzclose(file);
2208 return err;
2209}
2210
2211static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
2212 const char *config, void *data)
2213{
2214 char buf[PATH_MAX];
2215 int err = 0;
2216 FILE *file;
2217
2218 file = fmemopen((void *)config, strlen(config), "r");
2219 if (!file) {
2220 err = -errno;
2221 pr_warn("failed to open in-memory Kconfig: %d\n", err);
2222 return err;
2223 }
2224
2225 while (fgets(buf, sizeof(buf), file)) {
2226 err = bpf_object__process_kconfig_line(obj, buf, data);
2227 if (err) {
2228 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
2229 buf, err);
2230 break;
2231 }
2232 }
2233
2234 fclose(file);
2235 return err;
2236}
2237
2238static int bpf_object__init_kconfig_map(struct bpf_object *obj)
2239{
2240 struct extern_desc *last_ext = NULL, *ext;
2241 size_t map_sz;
2242 int i, err;
2243
2244 for (i = 0; i < obj->nr_extern; i++) {
2245 ext = &obj->externs[i];
2246 if (ext->type == EXT_KCFG)
2247 last_ext = ext;
2248 }
2249
2250 if (!last_ext)
2251 return 0;
2252
2253 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
2254 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
2255 ".kconfig", obj->efile.symbols_shndx,
2256 NULL, map_sz);
2257 if (err)
2258 return err;
2259
2260 obj->kconfig_map_idx = obj->nr_maps - 1;
2261
2262 return 0;
2263}
2264
2265const struct btf_type *
2266skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
2267{
2268 const struct btf_type *t = btf__type_by_id(btf, id);
2269
2270 if (res_id)
2271 *res_id = id;
2272
2273 while (btf_is_mod(t) || btf_is_typedef(t)) {
2274 if (res_id)
2275 *res_id = t->type;
2276 t = btf__type_by_id(btf, t->type);
2277 }
2278
2279 return t;
2280}
2281
2282static const struct btf_type *
2283resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2284{
2285 const struct btf_type *t;
2286
2287 t = skip_mods_and_typedefs(btf, id, NULL);
2288 if (!btf_is_ptr(t))
2289 return NULL;
2290
2291 t = skip_mods_and_typedefs(btf, t->type, res_id);
2292
2293 return btf_is_func_proto(t) ? t : NULL;
2294}
2295
2296static const char *__btf_kind_str(__u16 kind)
2297{
2298 switch (kind) {
2299 case BTF_KIND_UNKN: return "void";
2300 case BTF_KIND_INT: return "int";
2301 case BTF_KIND_PTR: return "ptr";
2302 case BTF_KIND_ARRAY: return "array";
2303 case BTF_KIND_STRUCT: return "struct";
2304 case BTF_KIND_UNION: return "union";
2305 case BTF_KIND_ENUM: return "enum";
2306 case BTF_KIND_FWD: return "fwd";
2307 case BTF_KIND_TYPEDEF: return "typedef";
2308 case BTF_KIND_VOLATILE: return "volatile";
2309 case BTF_KIND_CONST: return "const";
2310 case BTF_KIND_RESTRICT: return "restrict";
2311 case BTF_KIND_FUNC: return "func";
2312 case BTF_KIND_FUNC_PROTO: return "func_proto";
2313 case BTF_KIND_VAR: return "var";
2314 case BTF_KIND_DATASEC: return "datasec";
2315 case BTF_KIND_FLOAT: return "float";
2316 case BTF_KIND_DECL_TAG: return "decl_tag";
2317 case BTF_KIND_TYPE_TAG: return "type_tag";
2318 case BTF_KIND_ENUM64: return "enum64";
2319 default: return "unknown";
2320 }
2321}
2322
2323const char *btf_kind_str(const struct btf_type *t)
2324{
2325 return __btf_kind_str(btf_kind(t));
2326}
2327
2328/*
2329 * Fetch integer attribute of BTF map definition. Such attributes are
2330 * represented using a pointer to an array, in which dimensionality of array
2331 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2332 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2333 * type definition, while using only sizeof(void *) space in ELF data section.
2334 */
2335static bool get_map_field_int(const char *map_name, const struct btf *btf,
2336 const struct btf_member *m, __u32 *res)
2337{
2338 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2339 const char *name = btf__name_by_offset(btf, m->name_off);
2340 const struct btf_array *arr_info;
2341 const struct btf_type *arr_t;
2342
2343 if (!btf_is_ptr(t)) {
2344 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2345 map_name, name, btf_kind_str(t));
2346 return false;
2347 }
2348
2349 arr_t = btf__type_by_id(btf, t->type);
2350 if (!arr_t) {
2351 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2352 map_name, name, t->type);
2353 return false;
2354 }
2355 if (!btf_is_array(arr_t)) {
2356 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2357 map_name, name, btf_kind_str(arr_t));
2358 return false;
2359 }
2360 arr_info = btf_array(arr_t);
2361 *res = arr_info->nelems;
2362 return true;
2363}
2364
2365static bool get_map_field_long(const char *map_name, const struct btf *btf,
2366 const struct btf_member *m, __u64 *res)
2367{
2368 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2369 const char *name = btf__name_by_offset(btf, m->name_off);
2370
2371 if (btf_is_ptr(t)) {
2372 __u32 res32;
2373 bool ret;
2374
2375 ret = get_map_field_int(map_name, btf, m, &res32);
2376 if (ret)
2377 *res = (__u64)res32;
2378 return ret;
2379 }
2380
2381 if (!btf_is_enum(t) && !btf_is_enum64(t)) {
2382 pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n",
2383 map_name, name, btf_kind_str(t));
2384 return false;
2385 }
2386
2387 if (btf_vlen(t) != 1) {
2388 pr_warn("map '%s': attr '%s': invalid __ulong\n",
2389 map_name, name);
2390 return false;
2391 }
2392
2393 if (btf_is_enum(t)) {
2394 const struct btf_enum *e = btf_enum(t);
2395
2396 *res = e->val;
2397 } else {
2398 const struct btf_enum64 *e = btf_enum64(t);
2399
2400 *res = btf_enum64_value(e);
2401 }
2402 return true;
2403}
2404
2405static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
2406{
2407 int len;
2408
2409 len = snprintf(buf, buf_sz, "%s/%s", path, name);
2410 if (len < 0)
2411 return -EINVAL;
2412 if (len >= buf_sz)
2413 return -ENAMETOOLONG;
2414
2415 return 0;
2416}
2417
2418static int build_map_pin_path(struct bpf_map *map, const char *path)
2419{
2420 char buf[PATH_MAX];
2421 int err;
2422
2423 if (!path)
2424 path = BPF_FS_DEFAULT_PATH;
2425
2426 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
2427 if (err)
2428 return err;
2429
2430 return bpf_map__set_pin_path(map, buf);
2431}
2432
2433/* should match definition in bpf_helpers.h */
2434enum libbpf_pin_type {
2435 LIBBPF_PIN_NONE,
2436 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
2437 LIBBPF_PIN_BY_NAME,
2438};
2439
2440int parse_btf_map_def(const char *map_name, struct btf *btf,
2441 const struct btf_type *def_t, bool strict,
2442 struct btf_map_def *map_def, struct btf_map_def *inner_def)
2443{
2444 const struct btf_type *t;
2445 const struct btf_member *m;
2446 bool is_inner = inner_def == NULL;
2447 int vlen, i;
2448
2449 vlen = btf_vlen(def_t);
2450 m = btf_members(def_t);
2451 for (i = 0; i < vlen; i++, m++) {
2452 const char *name = btf__name_by_offset(btf, m->name_off);
2453
2454 if (!name) {
2455 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2456 return -EINVAL;
2457 }
2458 if (strcmp(name, "type") == 0) {
2459 if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2460 return -EINVAL;
2461 map_def->parts |= MAP_DEF_MAP_TYPE;
2462 } else if (strcmp(name, "max_entries") == 0) {
2463 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2464 return -EINVAL;
2465 map_def->parts |= MAP_DEF_MAX_ENTRIES;
2466 } else if (strcmp(name, "map_flags") == 0) {
2467 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2468 return -EINVAL;
2469 map_def->parts |= MAP_DEF_MAP_FLAGS;
2470 } else if (strcmp(name, "numa_node") == 0) {
2471 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2472 return -EINVAL;
2473 map_def->parts |= MAP_DEF_NUMA_NODE;
2474 } else if (strcmp(name, "key_size") == 0) {
2475 __u32 sz;
2476
2477 if (!get_map_field_int(map_name, btf, m, &sz))
2478 return -EINVAL;
2479 if (map_def->key_size && map_def->key_size != sz) {
2480 pr_warn("map '%s': conflicting key size %u != %u.\n",
2481 map_name, map_def->key_size, sz);
2482 return -EINVAL;
2483 }
2484 map_def->key_size = sz;
2485 map_def->parts |= MAP_DEF_KEY_SIZE;
2486 } else if (strcmp(name, "key") == 0) {
2487 __s64 sz;
2488
2489 t = btf__type_by_id(btf, m->type);
2490 if (!t) {
2491 pr_warn("map '%s': key type [%d] not found.\n",
2492 map_name, m->type);
2493 return -EINVAL;
2494 }
2495 if (!btf_is_ptr(t)) {
2496 pr_warn("map '%s': key spec is not PTR: %s.\n",
2497 map_name, btf_kind_str(t));
2498 return -EINVAL;
2499 }
2500 sz = btf__resolve_size(btf, t->type);
2501 if (sz < 0) {
2502 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2503 map_name, t->type, (ssize_t)sz);
2504 return sz;
2505 }
2506 if (map_def->key_size && map_def->key_size != sz) {
2507 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2508 map_name, map_def->key_size, (ssize_t)sz);
2509 return -EINVAL;
2510 }
2511 map_def->key_size = sz;
2512 map_def->key_type_id = t->type;
2513 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2514 } else if (strcmp(name, "value_size") == 0) {
2515 __u32 sz;
2516
2517 if (!get_map_field_int(map_name, btf, m, &sz))
2518 return -EINVAL;
2519 if (map_def->value_size && map_def->value_size != sz) {
2520 pr_warn("map '%s': conflicting value size %u != %u.\n",
2521 map_name, map_def->value_size, sz);
2522 return -EINVAL;
2523 }
2524 map_def->value_size = sz;
2525 map_def->parts |= MAP_DEF_VALUE_SIZE;
2526 } else if (strcmp(name, "value") == 0) {
2527 __s64 sz;
2528
2529 t = btf__type_by_id(btf, m->type);
2530 if (!t) {
2531 pr_warn("map '%s': value type [%d] not found.\n",
2532 map_name, m->type);
2533 return -EINVAL;
2534 }
2535 if (!btf_is_ptr(t)) {
2536 pr_warn("map '%s': value spec is not PTR: %s.\n",
2537 map_name, btf_kind_str(t));
2538 return -EINVAL;
2539 }
2540 sz = btf__resolve_size(btf, t->type);
2541 if (sz < 0) {
2542 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2543 map_name, t->type, (ssize_t)sz);
2544 return sz;
2545 }
2546 if (map_def->value_size && map_def->value_size != sz) {
2547 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2548 map_name, map_def->value_size, (ssize_t)sz);
2549 return -EINVAL;
2550 }
2551 map_def->value_size = sz;
2552 map_def->value_type_id = t->type;
2553 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2554 }
2555 else if (strcmp(name, "values") == 0) {
2556 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2557 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2558 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2559 char inner_map_name[128];
2560 int err;
2561
2562 if (is_inner) {
2563 pr_warn("map '%s': multi-level inner maps not supported.\n",
2564 map_name);
2565 return -ENOTSUP;
2566 }
2567 if (i != vlen - 1) {
2568 pr_warn("map '%s': '%s' member should be last.\n",
2569 map_name, name);
2570 return -EINVAL;
2571 }
2572 if (!is_map_in_map && !is_prog_array) {
2573 pr_warn("map '%s': should be map-in-map or prog-array.\n",
2574 map_name);
2575 return -ENOTSUP;
2576 }
2577 if (map_def->value_size && map_def->value_size != 4) {
2578 pr_warn("map '%s': conflicting value size %u != 4.\n",
2579 map_name, map_def->value_size);
2580 return -EINVAL;
2581 }
2582 map_def->value_size = 4;
2583 t = btf__type_by_id(btf, m->type);
2584 if (!t) {
2585 pr_warn("map '%s': %s type [%d] not found.\n",
2586 map_name, desc, m->type);
2587 return -EINVAL;
2588 }
2589 if (!btf_is_array(t) || btf_array(t)->nelems) {
2590 pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2591 map_name, desc);
2592 return -EINVAL;
2593 }
2594 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2595 if (!btf_is_ptr(t)) {
2596 pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2597 map_name, desc, btf_kind_str(t));
2598 return -EINVAL;
2599 }
2600 t = skip_mods_and_typedefs(btf, t->type, NULL);
2601 if (is_prog_array) {
2602 if (!btf_is_func_proto(t)) {
2603 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2604 map_name, btf_kind_str(t));
2605 return -EINVAL;
2606 }
2607 continue;
2608 }
2609 if (!btf_is_struct(t)) {
2610 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2611 map_name, btf_kind_str(t));
2612 return -EINVAL;
2613 }
2614
2615 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2616 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2617 if (err)
2618 return err;
2619
2620 map_def->parts |= MAP_DEF_INNER_MAP;
2621 } else if (strcmp(name, "pinning") == 0) {
2622 __u32 val;
2623
2624 if (is_inner) {
2625 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2626 return -EINVAL;
2627 }
2628 if (!get_map_field_int(map_name, btf, m, &val))
2629 return -EINVAL;
2630 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2631 pr_warn("map '%s': invalid pinning value %u.\n",
2632 map_name, val);
2633 return -EINVAL;
2634 }
2635 map_def->pinning = val;
2636 map_def->parts |= MAP_DEF_PINNING;
2637 } else if (strcmp(name, "map_extra") == 0) {
2638 __u64 map_extra;
2639
2640 if (!get_map_field_long(map_name, btf, m, &map_extra))
2641 return -EINVAL;
2642 map_def->map_extra = map_extra;
2643 map_def->parts |= MAP_DEF_MAP_EXTRA;
2644 } else {
2645 if (strict) {
2646 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2647 return -ENOTSUP;
2648 }
2649 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2650 }
2651 }
2652
2653 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2654 pr_warn("map '%s': map type isn't specified.\n", map_name);
2655 return -EINVAL;
2656 }
2657
2658 return 0;
2659}
2660
2661static size_t adjust_ringbuf_sz(size_t sz)
2662{
2663 __u32 page_sz = sysconf(_SC_PAGE_SIZE);
2664 __u32 mul;
2665
2666 /* if user forgot to set any size, make sure they see error */
2667 if (sz == 0)
2668 return 0;
2669 /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2670 * a power-of-2 multiple of kernel's page size. If user diligently
2671 * satisified these conditions, pass the size through.
2672 */
2673 if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2674 return sz;
2675
2676 /* Otherwise find closest (page_sz * power_of_2) product bigger than
2677 * user-set size to satisfy both user size request and kernel
2678 * requirements and substitute correct max_entries for map creation.
2679 */
2680 for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2681 if (mul * page_sz > sz)
2682 return mul * page_sz;
2683 }
2684
2685 /* if it's impossible to satisfy the conditions (i.e., user size is
2686 * very close to UINT_MAX but is not a power-of-2 multiple of
2687 * page_size) then just return original size and let kernel reject it
2688 */
2689 return sz;
2690}
2691
2692static bool map_is_ringbuf(const struct bpf_map *map)
2693{
2694 return map->def.type == BPF_MAP_TYPE_RINGBUF ||
2695 map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
2696}
2697
2698static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2699{
2700 map->def.type = def->map_type;
2701 map->def.key_size = def->key_size;
2702 map->def.value_size = def->value_size;
2703 map->def.max_entries = def->max_entries;
2704 map->def.map_flags = def->map_flags;
2705 map->map_extra = def->map_extra;
2706
2707 map->numa_node = def->numa_node;
2708 map->btf_key_type_id = def->key_type_id;
2709 map->btf_value_type_id = def->value_type_id;
2710
2711 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
2712 if (map_is_ringbuf(map))
2713 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2714
2715 if (def->parts & MAP_DEF_MAP_TYPE)
2716 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2717
2718 if (def->parts & MAP_DEF_KEY_TYPE)
2719 pr_debug("map '%s': found key [%u], sz = %u.\n",
2720 map->name, def->key_type_id, def->key_size);
2721 else if (def->parts & MAP_DEF_KEY_SIZE)
2722 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2723
2724 if (def->parts & MAP_DEF_VALUE_TYPE)
2725 pr_debug("map '%s': found value [%u], sz = %u.\n",
2726 map->name, def->value_type_id, def->value_size);
2727 else if (def->parts & MAP_DEF_VALUE_SIZE)
2728 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2729
2730 if (def->parts & MAP_DEF_MAX_ENTRIES)
2731 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2732 if (def->parts & MAP_DEF_MAP_FLAGS)
2733 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2734 if (def->parts & MAP_DEF_MAP_EXTRA)
2735 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2736 (unsigned long long)def->map_extra);
2737 if (def->parts & MAP_DEF_PINNING)
2738 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2739 if (def->parts & MAP_DEF_NUMA_NODE)
2740 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2741
2742 if (def->parts & MAP_DEF_INNER_MAP)
2743 pr_debug("map '%s': found inner map definition.\n", map->name);
2744}
2745
2746static const char *btf_var_linkage_str(__u32 linkage)
2747{
2748 switch (linkage) {
2749 case BTF_VAR_STATIC: return "static";
2750 case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2751 case BTF_VAR_GLOBAL_EXTERN: return "extern";
2752 default: return "unknown";
2753 }
2754}
2755
2756static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2757 const struct btf_type *sec,
2758 int var_idx, int sec_idx,
2759 const Elf_Data *data, bool strict,
2760 const char *pin_root_path)
2761{
2762 struct btf_map_def map_def = {}, inner_def = {};
2763 const struct btf_type *var, *def;
2764 const struct btf_var_secinfo *vi;
2765 const struct btf_var *var_extra;
2766 const char *map_name;
2767 struct bpf_map *map;
2768 int err;
2769
2770 vi = btf_var_secinfos(sec) + var_idx;
2771 var = btf__type_by_id(obj->btf, vi->type);
2772 var_extra = btf_var(var);
2773 map_name = btf__name_by_offset(obj->btf, var->name_off);
2774
2775 if (map_name == NULL || map_name[0] == '\0') {
2776 pr_warn("map #%d: empty name.\n", var_idx);
2777 return -EINVAL;
2778 }
2779 if ((__u64)vi->offset + vi->size > data->d_size) {
2780 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2781 return -EINVAL;
2782 }
2783 if (!btf_is_var(var)) {
2784 pr_warn("map '%s': unexpected var kind %s.\n",
2785 map_name, btf_kind_str(var));
2786 return -EINVAL;
2787 }
2788 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2789 pr_warn("map '%s': unsupported map linkage %s.\n",
2790 map_name, btf_var_linkage_str(var_extra->linkage));
2791 return -EOPNOTSUPP;
2792 }
2793
2794 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2795 if (!btf_is_struct(def)) {
2796 pr_warn("map '%s': unexpected def kind %s.\n",
2797 map_name, btf_kind_str(var));
2798 return -EINVAL;
2799 }
2800 if (def->size > vi->size) {
2801 pr_warn("map '%s': invalid def size.\n", map_name);
2802 return -EINVAL;
2803 }
2804
2805 map = bpf_object__add_map(obj);
2806 if (IS_ERR(map))
2807 return PTR_ERR(map);
2808 map->name = strdup(map_name);
2809 if (!map->name) {
2810 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2811 return -ENOMEM;
2812 }
2813 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2814 map->def.type = BPF_MAP_TYPE_UNSPEC;
2815 map->sec_idx = sec_idx;
2816 map->sec_offset = vi->offset;
2817 map->btf_var_idx = var_idx;
2818 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2819 map_name, map->sec_idx, map->sec_offset);
2820
2821 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2822 if (err)
2823 return err;
2824
2825 fill_map_from_def(map, &map_def);
2826
2827 if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2828 err = build_map_pin_path(map, pin_root_path);
2829 if (err) {
2830 pr_warn("map '%s': couldn't build pin path.\n", map->name);
2831 return err;
2832 }
2833 }
2834
2835 if (map_def.parts & MAP_DEF_INNER_MAP) {
2836 map->inner_map = calloc(1, sizeof(*map->inner_map));
2837 if (!map->inner_map)
2838 return -ENOMEM;
2839 map->inner_map->fd = create_placeholder_fd();
2840 if (map->inner_map->fd < 0)
2841 return map->inner_map->fd;
2842 map->inner_map->sec_idx = sec_idx;
2843 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2844 if (!map->inner_map->name)
2845 return -ENOMEM;
2846 sprintf(map->inner_map->name, "%s.inner", map_name);
2847
2848 fill_map_from_def(map->inner_map, &inner_def);
2849 }
2850
2851 err = map_fill_btf_type_info(obj, map);
2852 if (err)
2853 return err;
2854
2855 return 0;
2856}
2857
2858static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
2859 const char *sec_name, int sec_idx,
2860 void *data, size_t data_sz)
2861{
2862 const long page_sz = sysconf(_SC_PAGE_SIZE);
2863 size_t mmap_sz;
2864
2865 mmap_sz = bpf_map_mmap_sz(obj->arena_map);
2866 if (roundup(data_sz, page_sz) > mmap_sz) {
2867 pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
2868 sec_name, mmap_sz, data_sz);
2869 return -E2BIG;
2870 }
2871
2872 obj->arena_data = malloc(data_sz);
2873 if (!obj->arena_data)
2874 return -ENOMEM;
2875 memcpy(obj->arena_data, data, data_sz);
2876 obj->arena_data_sz = data_sz;
2877
2878 /* make bpf_map__init_value() work for ARENA maps */
2879 map->mmaped = obj->arena_data;
2880
2881 return 0;
2882}
2883
2884static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2885 const char *pin_root_path)
2886{
2887 const struct btf_type *sec = NULL;
2888 int nr_types, i, vlen, err;
2889 const struct btf_type *t;
2890 const char *name;
2891 Elf_Data *data;
2892 Elf_Scn *scn;
2893
2894 if (obj->efile.btf_maps_shndx < 0)
2895 return 0;
2896
2897 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2898 data = elf_sec_data(obj, scn);
2899 if (!scn || !data) {
2900 pr_warn("elf: failed to get %s map definitions for %s\n",
2901 MAPS_ELF_SEC, obj->path);
2902 return -EINVAL;
2903 }
2904
2905 nr_types = btf__type_cnt(obj->btf);
2906 for (i = 1; i < nr_types; i++) {
2907 t = btf__type_by_id(obj->btf, i);
2908 if (!btf_is_datasec(t))
2909 continue;
2910 name = btf__name_by_offset(obj->btf, t->name_off);
2911 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2912 sec = t;
2913 obj->efile.btf_maps_sec_btf_id = i;
2914 break;
2915 }
2916 }
2917
2918 if (!sec) {
2919 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2920 return -ENOENT;
2921 }
2922
2923 vlen = btf_vlen(sec);
2924 for (i = 0; i < vlen; i++) {
2925 err = bpf_object__init_user_btf_map(obj, sec, i,
2926 obj->efile.btf_maps_shndx,
2927 data, strict,
2928 pin_root_path);
2929 if (err)
2930 return err;
2931 }
2932
2933 for (i = 0; i < obj->nr_maps; i++) {
2934 struct bpf_map *map = &obj->maps[i];
2935
2936 if (map->def.type != BPF_MAP_TYPE_ARENA)
2937 continue;
2938
2939 if (obj->arena_map) {
2940 pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
2941 map->name, obj->arena_map->name);
2942 return -EINVAL;
2943 }
2944 obj->arena_map = map;
2945
2946 if (obj->efile.arena_data) {
2947 err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
2948 obj->efile.arena_data->d_buf,
2949 obj->efile.arena_data->d_size);
2950 if (err)
2951 return err;
2952 }
2953 }
2954 if (obj->efile.arena_data && !obj->arena_map) {
2955 pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
2956 ARENA_SEC);
2957 return -ENOENT;
2958 }
2959
2960 return 0;
2961}
2962
2963static int bpf_object__init_maps(struct bpf_object *obj,
2964 const struct bpf_object_open_opts *opts)
2965{
2966 const char *pin_root_path;
2967 bool strict;
2968 int err = 0;
2969
2970 strict = !OPTS_GET(opts, relaxed_maps, false);
2971 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2972
2973 err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2974 err = err ?: bpf_object__init_global_data_maps(obj);
2975 err = err ?: bpf_object__init_kconfig_map(obj);
2976 err = err ?: bpf_object_init_struct_ops(obj);
2977
2978 return err;
2979}
2980
2981static bool section_have_execinstr(struct bpf_object *obj, int idx)
2982{
2983 Elf64_Shdr *sh;
2984
2985 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
2986 if (!sh)
2987 return false;
2988
2989 return sh->sh_flags & SHF_EXECINSTR;
2990}
2991
2992static bool starts_with_qmark(const char *s)
2993{
2994 return s && s[0] == '?';
2995}
2996
2997static bool btf_needs_sanitization(struct bpf_object *obj)
2998{
2999 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3000 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3001 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3002 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
3003 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
3004 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
3005 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
3006 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
3007
3008 return !has_func || !has_datasec || !has_func_global || !has_float ||
3009 !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
3010}
3011
3012static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
3013{
3014 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3015 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3016 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3017 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
3018 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
3019 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
3020 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
3021 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
3022 int enum64_placeholder_id = 0;
3023 struct btf_type *t;
3024 int i, j, vlen;
3025
3026 for (i = 1; i < btf__type_cnt(btf); i++) {
3027 t = (struct btf_type *)btf__type_by_id(btf, i);
3028
3029 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
3030 /* replace VAR/DECL_TAG with INT */
3031 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
3032 /*
3033 * using size = 1 is the safest choice, 4 will be too
3034 * big and cause kernel BTF validation failure if
3035 * original variable took less than 4 bytes
3036 */
3037 t->size = 1;
3038 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
3039 } else if (!has_datasec && btf_is_datasec(t)) {
3040 /* replace DATASEC with STRUCT */
3041 const struct btf_var_secinfo *v = btf_var_secinfos(t);
3042 struct btf_member *m = btf_members(t);
3043 struct btf_type *vt;
3044 char *name;
3045
3046 name = (char *)btf__name_by_offset(btf, t->name_off);
3047 while (*name) {
3048 if (*name == '.' || *name == '?')
3049 *name = '_';
3050 name++;
3051 }
3052
3053 vlen = btf_vlen(t);
3054 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
3055 for (j = 0; j < vlen; j++, v++, m++) {
3056 /* order of field assignments is important */
3057 m->offset = v->offset * 8;
3058 m->type = v->type;
3059 /* preserve variable name as member name */
3060 vt = (void *)btf__type_by_id(btf, v->type);
3061 m->name_off = vt->name_off;
3062 }
3063 } else if (!has_qmark_datasec && btf_is_datasec(t) &&
3064 starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
3065 /* replace '?' prefix with '_' for DATASEC names */
3066 char *name;
3067
3068 name = (char *)btf__name_by_offset(btf, t->name_off);
3069 if (name[0] == '?')
3070 name[0] = '_';
3071 } else if (!has_func && btf_is_func_proto(t)) {
3072 /* replace FUNC_PROTO with ENUM */
3073 vlen = btf_vlen(t);
3074 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
3075 t->size = sizeof(__u32); /* kernel enforced */
3076 } else if (!has_func && btf_is_func(t)) {
3077 /* replace FUNC with TYPEDEF */
3078 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
3079 } else if (!has_func_global && btf_is_func(t)) {
3080 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
3081 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
3082 } else if (!has_float && btf_is_float(t)) {
3083 /* replace FLOAT with an equally-sized empty STRUCT;
3084 * since C compilers do not accept e.g. "float" as a
3085 * valid struct name, make it anonymous
3086 */
3087 t->name_off = 0;
3088 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
3089 } else if (!has_type_tag && btf_is_type_tag(t)) {
3090 /* replace TYPE_TAG with a CONST */
3091 t->name_off = 0;
3092 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
3093 } else if (!has_enum64 && btf_is_enum(t)) {
3094 /* clear the kflag */
3095 t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
3096 } else if (!has_enum64 && btf_is_enum64(t)) {
3097 /* replace ENUM64 with a union */
3098 struct btf_member *m;
3099
3100 if (enum64_placeholder_id == 0) {
3101 enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
3102 if (enum64_placeholder_id < 0)
3103 return enum64_placeholder_id;
3104
3105 t = (struct btf_type *)btf__type_by_id(btf, i);
3106 }
3107
3108 m = btf_members(t);
3109 vlen = btf_vlen(t);
3110 t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
3111 for (j = 0; j < vlen; j++, m++) {
3112 m->type = enum64_placeholder_id;
3113 m->offset = 0;
3114 }
3115 }
3116 }
3117
3118 return 0;
3119}
3120
3121static bool libbpf_needs_btf(const struct bpf_object *obj)
3122{
3123 return obj->efile.btf_maps_shndx >= 0 ||
3124 obj->efile.has_st_ops ||
3125 obj->nr_extern > 0;
3126}
3127
3128static bool kernel_needs_btf(const struct bpf_object *obj)
3129{
3130 return obj->efile.has_st_ops;
3131}
3132
3133static int bpf_object__init_btf(struct bpf_object *obj,
3134 Elf_Data *btf_data,
3135 Elf_Data *btf_ext_data)
3136{
3137 int err = -ENOENT;
3138
3139 if (btf_data) {
3140 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
3141 err = libbpf_get_error(obj->btf);
3142 if (err) {
3143 obj->btf = NULL;
3144 pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
3145 goto out;
3146 }
3147 /* enforce 8-byte pointers for BPF-targeted BTFs */
3148 btf__set_pointer_size(obj->btf, 8);
3149 }
3150 if (btf_ext_data) {
3151 struct btf_ext_info *ext_segs[3];
3152 int seg_num, sec_num;
3153
3154 if (!obj->btf) {
3155 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
3156 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
3157 goto out;
3158 }
3159 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
3160 err = libbpf_get_error(obj->btf_ext);
3161 if (err) {
3162 pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
3163 BTF_EXT_ELF_SEC, err);
3164 obj->btf_ext = NULL;
3165 goto out;
3166 }
3167
3168 /* setup .BTF.ext to ELF section mapping */
3169 ext_segs[0] = &obj->btf_ext->func_info;
3170 ext_segs[1] = &obj->btf_ext->line_info;
3171 ext_segs[2] = &obj->btf_ext->core_relo_info;
3172 for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
3173 struct btf_ext_info *seg = ext_segs[seg_num];
3174 const struct btf_ext_info_sec *sec;
3175 const char *sec_name;
3176 Elf_Scn *scn;
3177
3178 if (seg->sec_cnt == 0)
3179 continue;
3180
3181 seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
3182 if (!seg->sec_idxs) {
3183 err = -ENOMEM;
3184 goto out;
3185 }
3186
3187 sec_num = 0;
3188 for_each_btf_ext_sec(seg, sec) {
3189 /* preventively increment index to avoid doing
3190 * this before every continue below
3191 */
3192 sec_num++;
3193
3194 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3195 if (str_is_empty(sec_name))
3196 continue;
3197 scn = elf_sec_by_name(obj, sec_name);
3198 if (!scn)
3199 continue;
3200
3201 seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
3202 }
3203 }
3204 }
3205out:
3206 if (err && libbpf_needs_btf(obj)) {
3207 pr_warn("BTF is required, but is missing or corrupted.\n");
3208 return err;
3209 }
3210 return 0;
3211}
3212
3213static int compare_vsi_off(const void *_a, const void *_b)
3214{
3215 const struct btf_var_secinfo *a = _a;
3216 const struct btf_var_secinfo *b = _b;
3217
3218 return a->offset - b->offset;
3219}
3220
3221static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
3222 struct btf_type *t)
3223{
3224 __u32 size = 0, i, vars = btf_vlen(t);
3225 const char *sec_name = btf__name_by_offset(btf, t->name_off);
3226 struct btf_var_secinfo *vsi;
3227 bool fixup_offsets = false;
3228 int err;
3229
3230 if (!sec_name) {
3231 pr_debug("No name found in string section for DATASEC kind.\n");
3232 return -ENOENT;
3233 }
3234
3235 /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
3236 * variable offsets set at the previous step. Further, not every
3237 * extern BTF VAR has corresponding ELF symbol preserved, so we skip
3238 * all fixups altogether for such sections and go straight to sorting
3239 * VARs within their DATASEC.
3240 */
3241 if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
3242 goto sort_vars;
3243
3244 /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
3245 * fix this up. But BPF static linker already fixes this up and fills
3246 * all the sizes and offsets during static linking. So this step has
3247 * to be optional. But the STV_HIDDEN handling is non-optional for any
3248 * non-extern DATASEC, so the variable fixup loop below handles both
3249 * functions at the same time, paying the cost of BTF VAR <-> ELF
3250 * symbol matching just once.
3251 */
3252 if (t->size == 0) {
3253 err = find_elf_sec_sz(obj, sec_name, &size);
3254 if (err || !size) {
3255 pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
3256 sec_name, size, err);
3257 return -ENOENT;
3258 }
3259
3260 t->size = size;
3261 fixup_offsets = true;
3262 }
3263
3264 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
3265 const struct btf_type *t_var;
3266 struct btf_var *var;
3267 const char *var_name;
3268 Elf64_Sym *sym;
3269
3270 t_var = btf__type_by_id(btf, vsi->type);
3271 if (!t_var || !btf_is_var(t_var)) {
3272 pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
3273 return -EINVAL;
3274 }
3275
3276 var = btf_var(t_var);
3277 if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
3278 continue;
3279
3280 var_name = btf__name_by_offset(btf, t_var->name_off);
3281 if (!var_name) {
3282 pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
3283 sec_name, i);
3284 return -ENOENT;
3285 }
3286
3287 sym = find_elf_var_sym(obj, var_name);
3288 if (IS_ERR(sym)) {
3289 pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
3290 sec_name, var_name);
3291 return -ENOENT;
3292 }
3293
3294 if (fixup_offsets)
3295 vsi->offset = sym->st_value;
3296
3297 /* if variable is a global/weak symbol, but has restricted
3298 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
3299 * as static. This follows similar logic for functions (BPF
3300 * subprogs) and influences libbpf's further decisions about
3301 * whether to make global data BPF array maps as
3302 * BPF_F_MMAPABLE.
3303 */
3304 if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
3305 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
3306 var->linkage = BTF_VAR_STATIC;
3307 }
3308
3309sort_vars:
3310 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
3311 return 0;
3312}
3313
3314static int bpf_object_fixup_btf(struct bpf_object *obj)
3315{
3316 int i, n, err = 0;
3317
3318 if (!obj->btf)
3319 return 0;
3320
3321 n = btf__type_cnt(obj->btf);
3322 for (i = 1; i < n; i++) {
3323 struct btf_type *t = btf_type_by_id(obj->btf, i);
3324
3325 /* Loader needs to fix up some of the things compiler
3326 * couldn't get its hands on while emitting BTF. This
3327 * is section size and global variable offset. We use
3328 * the info from the ELF itself for this purpose.
3329 */
3330 if (btf_is_datasec(t)) {
3331 err = btf_fixup_datasec(obj, obj->btf, t);
3332 if (err)
3333 return err;
3334 }
3335 }
3336
3337 return 0;
3338}
3339
3340static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
3341{
3342 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
3343 prog->type == BPF_PROG_TYPE_LSM)
3344 return true;
3345
3346 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
3347 * also need vmlinux BTF
3348 */
3349 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
3350 return true;
3351
3352 return false;
3353}
3354
3355static bool map_needs_vmlinux_btf(struct bpf_map *map)
3356{
3357 return bpf_map__is_struct_ops(map);
3358}
3359
3360static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
3361{
3362 struct bpf_program *prog;
3363 struct bpf_map *map;
3364 int i;
3365
3366 /* CO-RE relocations need kernel BTF, only when btf_custom_path
3367 * is not specified
3368 */
3369 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
3370 return true;
3371
3372 /* Support for typed ksyms needs kernel BTF */
3373 for (i = 0; i < obj->nr_extern; i++) {
3374 const struct extern_desc *ext;
3375
3376 ext = &obj->externs[i];
3377 if (ext->type == EXT_KSYM && ext->ksym.type_id)
3378 return true;
3379 }
3380
3381 bpf_object__for_each_program(prog, obj) {
3382 if (!prog->autoload)
3383 continue;
3384 if (prog_needs_vmlinux_btf(prog))
3385 return true;
3386 }
3387
3388 bpf_object__for_each_map(map, obj) {
3389 if (map_needs_vmlinux_btf(map))
3390 return true;
3391 }
3392
3393 return false;
3394}
3395
3396static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
3397{
3398 int err;
3399
3400 /* btf_vmlinux could be loaded earlier */
3401 if (obj->btf_vmlinux || obj->gen_loader)
3402 return 0;
3403
3404 if (!force && !obj_needs_vmlinux_btf(obj))
3405 return 0;
3406
3407 obj->btf_vmlinux = btf__load_vmlinux_btf();
3408 err = libbpf_get_error(obj->btf_vmlinux);
3409 if (err) {
3410 pr_warn("Error loading vmlinux BTF: %d\n", err);
3411 obj->btf_vmlinux = NULL;
3412 return err;
3413 }
3414 return 0;
3415}
3416
3417static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
3418{
3419 struct btf *kern_btf = obj->btf;
3420 bool btf_mandatory, sanitize;
3421 int i, err = 0;
3422
3423 if (!obj->btf)
3424 return 0;
3425
3426 if (!kernel_supports(obj, FEAT_BTF)) {
3427 if (kernel_needs_btf(obj)) {
3428 err = -EOPNOTSUPP;
3429 goto report;
3430 }
3431 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
3432 return 0;
3433 }
3434
3435 /* Even though some subprogs are global/weak, user might prefer more
3436 * permissive BPF verification process that BPF verifier performs for
3437 * static functions, taking into account more context from the caller
3438 * functions. In such case, they need to mark such subprogs with
3439 * __attribute__((visibility("hidden"))) and libbpf will adjust
3440 * corresponding FUNC BTF type to be marked as static and trigger more
3441 * involved BPF verification process.
3442 */
3443 for (i = 0; i < obj->nr_programs; i++) {
3444 struct bpf_program *prog = &obj->programs[i];
3445 struct btf_type *t;
3446 const char *name;
3447 int j, n;
3448
3449 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3450 continue;
3451
3452 n = btf__type_cnt(obj->btf);
3453 for (j = 1; j < n; j++) {
3454 t = btf_type_by_id(obj->btf, j);
3455 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3456 continue;
3457
3458 name = btf__str_by_offset(obj->btf, t->name_off);
3459 if (strcmp(name, prog->name) != 0)
3460 continue;
3461
3462 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3463 break;
3464 }
3465 }
3466
3467 sanitize = btf_needs_sanitization(obj);
3468 if (sanitize) {
3469 const void *raw_data;
3470 __u32 sz;
3471
3472 /* clone BTF to sanitize a copy and leave the original intact */
3473 raw_data = btf__raw_data(obj->btf, &sz);
3474 kern_btf = btf__new(raw_data, sz);
3475 err = libbpf_get_error(kern_btf);
3476 if (err)
3477 return err;
3478
3479 /* enforce 8-byte pointers for BPF-targeted BTFs */
3480 btf__set_pointer_size(obj->btf, 8);
3481 err = bpf_object__sanitize_btf(obj, kern_btf);
3482 if (err)
3483 return err;
3484 }
3485
3486 if (obj->gen_loader) {
3487 __u32 raw_size = 0;
3488 const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3489
3490 if (!raw_data)
3491 return -ENOMEM;
3492 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3493 /* Pretend to have valid FD to pass various fd >= 0 checks.
3494 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3495 */
3496 btf__set_fd(kern_btf, 0);
3497 } else {
3498 /* currently BPF_BTF_LOAD only supports log_level 1 */
3499 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3500 obj->log_level ? 1 : 0, obj->token_fd);
3501 }
3502 if (sanitize) {
3503 if (!err) {
3504 /* move fd to libbpf's BTF */
3505 btf__set_fd(obj->btf, btf__fd(kern_btf));
3506 btf__set_fd(kern_btf, -1);
3507 }
3508 btf__free(kern_btf);
3509 }
3510report:
3511 if (err) {
3512 btf_mandatory = kernel_needs_btf(obj);
3513 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3514 btf_mandatory ? "BTF is mandatory, can't proceed."
3515 : "BTF is optional, ignoring.");
3516 if (!btf_mandatory)
3517 err = 0;
3518 }
3519 return err;
3520}
3521
3522static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3523{
3524 const char *name;
3525
3526 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3527 if (!name) {
3528 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3529 off, obj->path, elf_errmsg(-1));
3530 return NULL;
3531 }
3532
3533 return name;
3534}
3535
3536static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3537{
3538 const char *name;
3539
3540 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3541 if (!name) {
3542 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3543 off, obj->path, elf_errmsg(-1));
3544 return NULL;
3545 }
3546
3547 return name;
3548}
3549
3550static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3551{
3552 Elf_Scn *scn;
3553
3554 scn = elf_getscn(obj->efile.elf, idx);
3555 if (!scn) {
3556 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3557 idx, obj->path, elf_errmsg(-1));
3558 return NULL;
3559 }
3560 return scn;
3561}
3562
3563static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3564{
3565 Elf_Scn *scn = NULL;
3566 Elf *elf = obj->efile.elf;
3567 const char *sec_name;
3568
3569 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3570 sec_name = elf_sec_name(obj, scn);
3571 if (!sec_name)
3572 return NULL;
3573
3574 if (strcmp(sec_name, name) != 0)
3575 continue;
3576
3577 return scn;
3578 }
3579 return NULL;
3580}
3581
3582static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3583{
3584 Elf64_Shdr *shdr;
3585
3586 if (!scn)
3587 return NULL;
3588
3589 shdr = elf64_getshdr(scn);
3590 if (!shdr) {
3591 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3592 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3593 return NULL;
3594 }
3595
3596 return shdr;
3597}
3598
3599static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3600{
3601 const char *name;
3602 Elf64_Shdr *sh;
3603
3604 if (!scn)
3605 return NULL;
3606
3607 sh = elf_sec_hdr(obj, scn);
3608 if (!sh)
3609 return NULL;
3610
3611 name = elf_sec_str(obj, sh->sh_name);
3612 if (!name) {
3613 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3614 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3615 return NULL;
3616 }
3617
3618 return name;
3619}
3620
3621static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3622{
3623 Elf_Data *data;
3624
3625 if (!scn)
3626 return NULL;
3627
3628 data = elf_getdata(scn, 0);
3629 if (!data) {
3630 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3631 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3632 obj->path, elf_errmsg(-1));
3633 return NULL;
3634 }
3635
3636 return data;
3637}
3638
3639static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3640{
3641 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3642 return NULL;
3643
3644 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3645}
3646
3647static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3648{
3649 if (idx >= data->d_size / sizeof(Elf64_Rel))
3650 return NULL;
3651
3652 return (Elf64_Rel *)data->d_buf + idx;
3653}
3654
3655static bool is_sec_name_dwarf(const char *name)
3656{
3657 /* approximation, but the actual list is too long */
3658 return str_has_pfx(name, ".debug_");
3659}
3660
3661static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3662{
3663 /* no special handling of .strtab */
3664 if (hdr->sh_type == SHT_STRTAB)
3665 return true;
3666
3667 /* ignore .llvm_addrsig section as well */
3668 if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3669 return true;
3670
3671 /* no subprograms will lead to an empty .text section, ignore it */
3672 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3673 strcmp(name, ".text") == 0)
3674 return true;
3675
3676 /* DWARF sections */
3677 if (is_sec_name_dwarf(name))
3678 return true;
3679
3680 if (str_has_pfx(name, ".rel")) {
3681 name += sizeof(".rel") - 1;
3682 /* DWARF section relocations */
3683 if (is_sec_name_dwarf(name))
3684 return true;
3685
3686 /* .BTF and .BTF.ext don't need relocations */
3687 if (strcmp(name, BTF_ELF_SEC) == 0 ||
3688 strcmp(name, BTF_EXT_ELF_SEC) == 0)
3689 return true;
3690 }
3691
3692 return false;
3693}
3694
3695static int cmp_progs(const void *_a, const void *_b)
3696{
3697 const struct bpf_program *a = _a;
3698 const struct bpf_program *b = _b;
3699
3700 if (a->sec_idx != b->sec_idx)
3701 return a->sec_idx < b->sec_idx ? -1 : 1;
3702
3703 /* sec_insn_off can't be the same within the section */
3704 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3705}
3706
3707static int bpf_object__elf_collect(struct bpf_object *obj)
3708{
3709 struct elf_sec_desc *sec_desc;
3710 Elf *elf = obj->efile.elf;
3711 Elf_Data *btf_ext_data = NULL;
3712 Elf_Data *btf_data = NULL;
3713 int idx = 0, err = 0;
3714 const char *name;
3715 Elf_Data *data;
3716 Elf_Scn *scn;
3717 Elf64_Shdr *sh;
3718
3719 /* ELF section indices are 0-based, but sec #0 is special "invalid"
3720 * section. Since section count retrieved by elf_getshdrnum() does
3721 * include sec #0, it is already the necessary size of an array to keep
3722 * all the sections.
3723 */
3724 if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3725 pr_warn("elf: failed to get the number of sections for %s: %s\n",
3726 obj->path, elf_errmsg(-1));
3727 return -LIBBPF_ERRNO__FORMAT;
3728 }
3729 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3730 if (!obj->efile.secs)
3731 return -ENOMEM;
3732
3733 /* a bunch of ELF parsing functionality depends on processing symbols,
3734 * so do the first pass and find the symbol table
3735 */
3736 scn = NULL;
3737 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3738 sh = elf_sec_hdr(obj, scn);
3739 if (!sh)
3740 return -LIBBPF_ERRNO__FORMAT;
3741
3742 if (sh->sh_type == SHT_SYMTAB) {
3743 if (obj->efile.symbols) {
3744 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3745 return -LIBBPF_ERRNO__FORMAT;
3746 }
3747
3748 data = elf_sec_data(obj, scn);
3749 if (!data)
3750 return -LIBBPF_ERRNO__FORMAT;
3751
3752 idx = elf_ndxscn(scn);
3753
3754 obj->efile.symbols = data;
3755 obj->efile.symbols_shndx = idx;
3756 obj->efile.strtabidx = sh->sh_link;
3757 }
3758 }
3759
3760 if (!obj->efile.symbols) {
3761 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3762 obj->path);
3763 return -ENOENT;
3764 }
3765
3766 scn = NULL;
3767 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3768 idx = elf_ndxscn(scn);
3769 sec_desc = &obj->efile.secs[idx];
3770
3771 sh = elf_sec_hdr(obj, scn);
3772 if (!sh)
3773 return -LIBBPF_ERRNO__FORMAT;
3774
3775 name = elf_sec_str(obj, sh->sh_name);
3776 if (!name)
3777 return -LIBBPF_ERRNO__FORMAT;
3778
3779 if (ignore_elf_section(sh, name))
3780 continue;
3781
3782 data = elf_sec_data(obj, scn);
3783 if (!data)
3784 return -LIBBPF_ERRNO__FORMAT;
3785
3786 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3787 idx, name, (unsigned long)data->d_size,
3788 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3789 (int)sh->sh_type);
3790
3791 if (strcmp(name, "license") == 0) {
3792 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3793 if (err)
3794 return err;
3795 } else if (strcmp(name, "version") == 0) {
3796 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3797 if (err)
3798 return err;
3799 } else if (strcmp(name, "maps") == 0) {
3800 pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
3801 return -ENOTSUP;
3802 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3803 obj->efile.btf_maps_shndx = idx;
3804 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
3805 if (sh->sh_type != SHT_PROGBITS)
3806 return -LIBBPF_ERRNO__FORMAT;
3807 btf_data = data;
3808 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3809 if (sh->sh_type != SHT_PROGBITS)
3810 return -LIBBPF_ERRNO__FORMAT;
3811 btf_ext_data = data;
3812 } else if (sh->sh_type == SHT_SYMTAB) {
3813 /* already processed during the first pass above */
3814 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3815 if (sh->sh_flags & SHF_EXECINSTR) {
3816 if (strcmp(name, ".text") == 0)
3817 obj->efile.text_shndx = idx;
3818 err = bpf_object__add_programs(obj, data, name, idx);
3819 if (err)
3820 return err;
3821 } else if (strcmp(name, DATA_SEC) == 0 ||
3822 str_has_pfx(name, DATA_SEC ".")) {
3823 sec_desc->sec_type = SEC_DATA;
3824 sec_desc->shdr = sh;
3825 sec_desc->data = data;
3826 } else if (strcmp(name, RODATA_SEC) == 0 ||
3827 str_has_pfx(name, RODATA_SEC ".")) {
3828 sec_desc->sec_type = SEC_RODATA;
3829 sec_desc->shdr = sh;
3830 sec_desc->data = data;
3831 } else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
3832 strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
3833 strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
3834 strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
3835 sec_desc->sec_type = SEC_ST_OPS;
3836 sec_desc->shdr = sh;
3837 sec_desc->data = data;
3838 obj->efile.has_st_ops = true;
3839 } else if (strcmp(name, ARENA_SEC) == 0) {
3840 obj->efile.arena_data = data;
3841 obj->efile.arena_data_shndx = idx;
3842 } else {
3843 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3844 idx, name);
3845 }
3846 } else if (sh->sh_type == SHT_REL) {
3847 int targ_sec_idx = sh->sh_info; /* points to other section */
3848
3849 if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3850 targ_sec_idx >= obj->efile.sec_cnt)
3851 return -LIBBPF_ERRNO__FORMAT;
3852
3853 /* Only do relo for section with exec instructions */
3854 if (!section_have_execinstr(obj, targ_sec_idx) &&
3855 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3856 strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
3857 strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
3858 strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
3859 strcmp(name, ".rel" MAPS_ELF_SEC)) {
3860 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3861 idx, name, targ_sec_idx,
3862 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3863 continue;
3864 }
3865
3866 sec_desc->sec_type = SEC_RELO;
3867 sec_desc->shdr = sh;
3868 sec_desc->data = data;
3869 } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
3870 str_has_pfx(name, BSS_SEC "."))) {
3871 sec_desc->sec_type = SEC_BSS;
3872 sec_desc->shdr = sh;
3873 sec_desc->data = data;
3874 } else {
3875 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3876 (size_t)sh->sh_size);
3877 }
3878 }
3879
3880 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3881 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3882 return -LIBBPF_ERRNO__FORMAT;
3883 }
3884
3885 /* sort BPF programs by section name and in-section instruction offset
3886 * for faster search
3887 */
3888 if (obj->nr_programs)
3889 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3890
3891 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3892}
3893
3894static bool sym_is_extern(const Elf64_Sym *sym)
3895{
3896 int bind = ELF64_ST_BIND(sym->st_info);
3897 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3898 return sym->st_shndx == SHN_UNDEF &&
3899 (bind == STB_GLOBAL || bind == STB_WEAK) &&
3900 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
3901}
3902
3903static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
3904{
3905 int bind = ELF64_ST_BIND(sym->st_info);
3906 int type = ELF64_ST_TYPE(sym->st_info);
3907
3908 /* in .text section */
3909 if (sym->st_shndx != text_shndx)
3910 return false;
3911
3912 /* local function */
3913 if (bind == STB_LOCAL && type == STT_SECTION)
3914 return true;
3915
3916 /* global function */
3917 return bind == STB_GLOBAL && type == STT_FUNC;
3918}
3919
3920static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3921{
3922 const struct btf_type *t;
3923 const char *tname;
3924 int i, n;
3925
3926 if (!btf)
3927 return -ESRCH;
3928
3929 n = btf__type_cnt(btf);
3930 for (i = 1; i < n; i++) {
3931 t = btf__type_by_id(btf, i);
3932
3933 if (!btf_is_var(t) && !btf_is_func(t))
3934 continue;
3935
3936 tname = btf__name_by_offset(btf, t->name_off);
3937 if (strcmp(tname, ext_name))
3938 continue;
3939
3940 if (btf_is_var(t) &&
3941 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3942 return -EINVAL;
3943
3944 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3945 return -EINVAL;
3946
3947 return i;
3948 }
3949
3950 return -ENOENT;
3951}
3952
3953static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3954 const struct btf_var_secinfo *vs;
3955 const struct btf_type *t;
3956 int i, j, n;
3957
3958 if (!btf)
3959 return -ESRCH;
3960
3961 n = btf__type_cnt(btf);
3962 for (i = 1; i < n; i++) {
3963 t = btf__type_by_id(btf, i);
3964
3965 if (!btf_is_datasec(t))
3966 continue;
3967
3968 vs = btf_var_secinfos(t);
3969 for (j = 0; j < btf_vlen(t); j++, vs++) {
3970 if (vs->type == ext_btf_id)
3971 return i;
3972 }
3973 }
3974
3975 return -ENOENT;
3976}
3977
3978static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3979 bool *is_signed)
3980{
3981 const struct btf_type *t;
3982 const char *name;
3983
3984 t = skip_mods_and_typedefs(btf, id, NULL);
3985 name = btf__name_by_offset(btf, t->name_off);
3986
3987 if (is_signed)
3988 *is_signed = false;
3989 switch (btf_kind(t)) {
3990 case BTF_KIND_INT: {
3991 int enc = btf_int_encoding(t);
3992
3993 if (enc & BTF_INT_BOOL)
3994 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3995 if (is_signed)
3996 *is_signed = enc & BTF_INT_SIGNED;
3997 if (t->size == 1)
3998 return KCFG_CHAR;
3999 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
4000 return KCFG_UNKNOWN;
4001 return KCFG_INT;
4002 }
4003 case BTF_KIND_ENUM:
4004 if (t->size != 4)
4005 return KCFG_UNKNOWN;
4006 if (strcmp(name, "libbpf_tristate"))
4007 return KCFG_UNKNOWN;
4008 return KCFG_TRISTATE;
4009 case BTF_KIND_ENUM64:
4010 if (strcmp(name, "libbpf_tristate"))
4011 return KCFG_UNKNOWN;
4012 return KCFG_TRISTATE;
4013 case BTF_KIND_ARRAY:
4014 if (btf_array(t)->nelems == 0)
4015 return KCFG_UNKNOWN;
4016 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
4017 return KCFG_UNKNOWN;
4018 return KCFG_CHAR_ARR;
4019 default:
4020 return KCFG_UNKNOWN;
4021 }
4022}
4023
4024static int cmp_externs(const void *_a, const void *_b)
4025{
4026 const struct extern_desc *a = _a;
4027 const struct extern_desc *b = _b;
4028
4029 if (a->type != b->type)
4030 return a->type < b->type ? -1 : 1;
4031
4032 if (a->type == EXT_KCFG) {
4033 /* descending order by alignment requirements */
4034 if (a->kcfg.align != b->kcfg.align)
4035 return a->kcfg.align > b->kcfg.align ? -1 : 1;
4036 /* ascending order by size, within same alignment class */
4037 if (a->kcfg.sz != b->kcfg.sz)
4038 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
4039 }
4040
4041 /* resolve ties by name */
4042 return strcmp(a->name, b->name);
4043}
4044
4045static int find_int_btf_id(const struct btf *btf)
4046{
4047 const struct btf_type *t;
4048 int i, n;
4049
4050 n = btf__type_cnt(btf);
4051 for (i = 1; i < n; i++) {
4052 t = btf__type_by_id(btf, i);
4053
4054 if (btf_is_int(t) && btf_int_bits(t) == 32)
4055 return i;
4056 }
4057
4058 return 0;
4059}
4060
4061static int add_dummy_ksym_var(struct btf *btf)
4062{
4063 int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
4064 const struct btf_var_secinfo *vs;
4065 const struct btf_type *sec;
4066
4067 if (!btf)
4068 return 0;
4069
4070 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
4071 BTF_KIND_DATASEC);
4072 if (sec_btf_id < 0)
4073 return 0;
4074
4075 sec = btf__type_by_id(btf, sec_btf_id);
4076 vs = btf_var_secinfos(sec);
4077 for (i = 0; i < btf_vlen(sec); i++, vs++) {
4078 const struct btf_type *vt;
4079
4080 vt = btf__type_by_id(btf, vs->type);
4081 if (btf_is_func(vt))
4082 break;
4083 }
4084
4085 /* No func in ksyms sec. No need to add dummy var. */
4086 if (i == btf_vlen(sec))
4087 return 0;
4088
4089 int_btf_id = find_int_btf_id(btf);
4090 dummy_var_btf_id = btf__add_var(btf,
4091 "dummy_ksym",
4092 BTF_VAR_GLOBAL_ALLOCATED,
4093 int_btf_id);
4094 if (dummy_var_btf_id < 0)
4095 pr_warn("cannot create a dummy_ksym var\n");
4096
4097 return dummy_var_btf_id;
4098}
4099
4100static int bpf_object__collect_externs(struct bpf_object *obj)
4101{
4102 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
4103 const struct btf_type *t;
4104 struct extern_desc *ext;
4105 int i, n, off, dummy_var_btf_id;
4106 const char *ext_name, *sec_name;
4107 size_t ext_essent_len;
4108 Elf_Scn *scn;
4109 Elf64_Shdr *sh;
4110
4111 if (!obj->efile.symbols)
4112 return 0;
4113
4114 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
4115 sh = elf_sec_hdr(obj, scn);
4116 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
4117 return -LIBBPF_ERRNO__FORMAT;
4118
4119 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
4120 if (dummy_var_btf_id < 0)
4121 return dummy_var_btf_id;
4122
4123 n = sh->sh_size / sh->sh_entsize;
4124 pr_debug("looking for externs among %d symbols...\n", n);
4125
4126 for (i = 0; i < n; i++) {
4127 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
4128
4129 if (!sym)
4130 return -LIBBPF_ERRNO__FORMAT;
4131 if (!sym_is_extern(sym))
4132 continue;
4133 ext_name = elf_sym_str(obj, sym->st_name);
4134 if (!ext_name || !ext_name[0])
4135 continue;
4136
4137 ext = obj->externs;
4138 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
4139 if (!ext)
4140 return -ENOMEM;
4141 obj->externs = ext;
4142 ext = &ext[obj->nr_extern];
4143 memset(ext, 0, sizeof(*ext));
4144 obj->nr_extern++;
4145
4146 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
4147 if (ext->btf_id <= 0) {
4148 pr_warn("failed to find BTF for extern '%s': %d\n",
4149 ext_name, ext->btf_id);
4150 return ext->btf_id;
4151 }
4152 t = btf__type_by_id(obj->btf, ext->btf_id);
4153 ext->name = btf__name_by_offset(obj->btf, t->name_off);
4154 ext->sym_idx = i;
4155 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
4156
4157 ext_essent_len = bpf_core_essential_name_len(ext->name);
4158 ext->essent_name = NULL;
4159 if (ext_essent_len != strlen(ext->name)) {
4160 ext->essent_name = strndup(ext->name, ext_essent_len);
4161 if (!ext->essent_name)
4162 return -ENOMEM;
4163 }
4164
4165 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
4166 if (ext->sec_btf_id <= 0) {
4167 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
4168 ext_name, ext->btf_id, ext->sec_btf_id);
4169 return ext->sec_btf_id;
4170 }
4171 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
4172 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
4173
4174 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
4175 if (btf_is_func(t)) {
4176 pr_warn("extern function %s is unsupported under %s section\n",
4177 ext->name, KCONFIG_SEC);
4178 return -ENOTSUP;
4179 }
4180 kcfg_sec = sec;
4181 ext->type = EXT_KCFG;
4182 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
4183 if (ext->kcfg.sz <= 0) {
4184 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
4185 ext_name, ext->kcfg.sz);
4186 return ext->kcfg.sz;
4187 }
4188 ext->kcfg.align = btf__align_of(obj->btf, t->type);
4189 if (ext->kcfg.align <= 0) {
4190 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
4191 ext_name, ext->kcfg.align);
4192 return -EINVAL;
4193 }
4194 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
4195 &ext->kcfg.is_signed);
4196 if (ext->kcfg.type == KCFG_UNKNOWN) {
4197 pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
4198 return -ENOTSUP;
4199 }
4200 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
4201 ksym_sec = sec;
4202 ext->type = EXT_KSYM;
4203 skip_mods_and_typedefs(obj->btf, t->type,
4204 &ext->ksym.type_id);
4205 } else {
4206 pr_warn("unrecognized extern section '%s'\n", sec_name);
4207 return -ENOTSUP;
4208 }
4209 }
4210 pr_debug("collected %d externs total\n", obj->nr_extern);
4211
4212 if (!obj->nr_extern)
4213 return 0;
4214
4215 /* sort externs by type, for kcfg ones also by (align, size, name) */
4216 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
4217
4218 /* for .ksyms section, we need to turn all externs into allocated
4219 * variables in BTF to pass kernel verification; we do this by
4220 * pretending that each extern is a 8-byte variable
4221 */
4222 if (ksym_sec) {
4223 /* find existing 4-byte integer type in BTF to use for fake
4224 * extern variables in DATASEC
4225 */
4226 int int_btf_id = find_int_btf_id(obj->btf);
4227 /* For extern function, a dummy_var added earlier
4228 * will be used to replace the vs->type and
4229 * its name string will be used to refill
4230 * the missing param's name.
4231 */
4232 const struct btf_type *dummy_var;
4233
4234 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
4235 for (i = 0; i < obj->nr_extern; i++) {
4236 ext = &obj->externs[i];
4237 if (ext->type != EXT_KSYM)
4238 continue;
4239 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
4240 i, ext->sym_idx, ext->name);
4241 }
4242
4243 sec = ksym_sec;
4244 n = btf_vlen(sec);
4245 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
4246 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4247 struct btf_type *vt;
4248
4249 vt = (void *)btf__type_by_id(obj->btf, vs->type);
4250 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
4251 ext = find_extern_by_name(obj, ext_name);
4252 if (!ext) {
4253 pr_warn("failed to find extern definition for BTF %s '%s'\n",
4254 btf_kind_str(vt), ext_name);
4255 return -ESRCH;
4256 }
4257 if (btf_is_func(vt)) {
4258 const struct btf_type *func_proto;
4259 struct btf_param *param;
4260 int j;
4261
4262 func_proto = btf__type_by_id(obj->btf,
4263 vt->type);
4264 param = btf_params(func_proto);
4265 /* Reuse the dummy_var string if the
4266 * func proto does not have param name.
4267 */
4268 for (j = 0; j < btf_vlen(func_proto); j++)
4269 if (param[j].type && !param[j].name_off)
4270 param[j].name_off =
4271 dummy_var->name_off;
4272 vs->type = dummy_var_btf_id;
4273 vt->info &= ~0xffff;
4274 vt->info |= BTF_FUNC_GLOBAL;
4275 } else {
4276 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4277 vt->type = int_btf_id;
4278 }
4279 vs->offset = off;
4280 vs->size = sizeof(int);
4281 }
4282 sec->size = off;
4283 }
4284
4285 if (kcfg_sec) {
4286 sec = kcfg_sec;
4287 /* for kcfg externs calculate their offsets within a .kconfig map */
4288 off = 0;
4289 for (i = 0; i < obj->nr_extern; i++) {
4290 ext = &obj->externs[i];
4291 if (ext->type != EXT_KCFG)
4292 continue;
4293
4294 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
4295 off = ext->kcfg.data_off + ext->kcfg.sz;
4296 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
4297 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
4298 }
4299 sec->size = off;
4300 n = btf_vlen(sec);
4301 for (i = 0; i < n; i++) {
4302 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4303
4304 t = btf__type_by_id(obj->btf, vs->type);
4305 ext_name = btf__name_by_offset(obj->btf, t->name_off);
4306 ext = find_extern_by_name(obj, ext_name);
4307 if (!ext) {
4308 pr_warn("failed to find extern definition for BTF var '%s'\n",
4309 ext_name);
4310 return -ESRCH;
4311 }
4312 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4313 vs->offset = ext->kcfg.data_off;
4314 }
4315 }
4316 return 0;
4317}
4318
4319static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
4320{
4321 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
4322}
4323
4324struct bpf_program *
4325bpf_object__find_program_by_name(const struct bpf_object *obj,
4326 const char *name)
4327{
4328 struct bpf_program *prog;
4329
4330 bpf_object__for_each_program(prog, obj) {
4331 if (prog_is_subprog(obj, prog))
4332 continue;
4333 if (!strcmp(prog->name, name))
4334 return prog;
4335 }
4336 return errno = ENOENT, NULL;
4337}
4338
4339static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
4340 int shndx)
4341{
4342 switch (obj->efile.secs[shndx].sec_type) {
4343 case SEC_BSS:
4344 case SEC_DATA:
4345 case SEC_RODATA:
4346 return true;
4347 default:
4348 return false;
4349 }
4350}
4351
4352static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
4353 int shndx)
4354{
4355 return shndx == obj->efile.btf_maps_shndx;
4356}
4357
4358static enum libbpf_map_type
4359bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
4360{
4361 if (shndx == obj->efile.symbols_shndx)
4362 return LIBBPF_MAP_KCONFIG;
4363
4364 switch (obj->efile.secs[shndx].sec_type) {
4365 case SEC_BSS:
4366 return LIBBPF_MAP_BSS;
4367 case SEC_DATA:
4368 return LIBBPF_MAP_DATA;
4369 case SEC_RODATA:
4370 return LIBBPF_MAP_RODATA;
4371 default:
4372 return LIBBPF_MAP_UNSPEC;
4373 }
4374}
4375
4376static int bpf_program__record_reloc(struct bpf_program *prog,
4377 struct reloc_desc *reloc_desc,
4378 __u32 insn_idx, const char *sym_name,
4379 const Elf64_Sym *sym, const Elf64_Rel *rel)
4380{
4381 struct bpf_insn *insn = &prog->insns[insn_idx];
4382 size_t map_idx, nr_maps = prog->obj->nr_maps;
4383 struct bpf_object *obj = prog->obj;
4384 __u32 shdr_idx = sym->st_shndx;
4385 enum libbpf_map_type type;
4386 const char *sym_sec_name;
4387 struct bpf_map *map;
4388
4389 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
4390 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
4391 prog->name, sym_name, insn_idx, insn->code);
4392 return -LIBBPF_ERRNO__RELOC;
4393 }
4394
4395 if (sym_is_extern(sym)) {
4396 int sym_idx = ELF64_R_SYM(rel->r_info);
4397 int i, n = obj->nr_extern;
4398 struct extern_desc *ext;
4399
4400 for (i = 0; i < n; i++) {
4401 ext = &obj->externs[i];
4402 if (ext->sym_idx == sym_idx)
4403 break;
4404 }
4405 if (i >= n) {
4406 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
4407 prog->name, sym_name, sym_idx);
4408 return -LIBBPF_ERRNO__RELOC;
4409 }
4410 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
4411 prog->name, i, ext->name, ext->sym_idx, insn_idx);
4412 if (insn->code == (BPF_JMP | BPF_CALL))
4413 reloc_desc->type = RELO_EXTERN_CALL;
4414 else
4415 reloc_desc->type = RELO_EXTERN_LD64;
4416 reloc_desc->insn_idx = insn_idx;
4417 reloc_desc->ext_idx = i;
4418 return 0;
4419 }
4420
4421 /* sub-program call relocation */
4422 if (is_call_insn(insn)) {
4423 if (insn->src_reg != BPF_PSEUDO_CALL) {
4424 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4425 return -LIBBPF_ERRNO__RELOC;
4426 }
4427 /* text_shndx can be 0, if no default "main" program exists */
4428 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4429 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4430 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
4431 prog->name, sym_name, sym_sec_name);
4432 return -LIBBPF_ERRNO__RELOC;
4433 }
4434 if (sym->st_value % BPF_INSN_SZ) {
4435 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
4436 prog->name, sym_name, (size_t)sym->st_value);
4437 return -LIBBPF_ERRNO__RELOC;
4438 }
4439 reloc_desc->type = RELO_CALL;
4440 reloc_desc->insn_idx = insn_idx;
4441 reloc_desc->sym_off = sym->st_value;
4442 return 0;
4443 }
4444
4445 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
4446 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
4447 prog->name, sym_name, shdr_idx);
4448 return -LIBBPF_ERRNO__RELOC;
4449 }
4450
4451 /* loading subprog addresses */
4452 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4453 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
4454 * local_func: sym->st_value = 0, insn->imm = offset in the section.
4455 */
4456 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4457 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
4458 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4459 return -LIBBPF_ERRNO__RELOC;
4460 }
4461
4462 reloc_desc->type = RELO_SUBPROG_ADDR;
4463 reloc_desc->insn_idx = insn_idx;
4464 reloc_desc->sym_off = sym->st_value;
4465 return 0;
4466 }
4467
4468 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4469 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4470
4471 /* arena data relocation */
4472 if (shdr_idx == obj->efile.arena_data_shndx) {
4473 reloc_desc->type = RELO_DATA;
4474 reloc_desc->insn_idx = insn_idx;
4475 reloc_desc->map_idx = obj->arena_map - obj->maps;
4476 reloc_desc->sym_off = sym->st_value;
4477 return 0;
4478 }
4479
4480 /* generic map reference relocation */
4481 if (type == LIBBPF_MAP_UNSPEC) {
4482 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4483 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4484 prog->name, sym_name, sym_sec_name);
4485 return -LIBBPF_ERRNO__RELOC;
4486 }
4487 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4488 map = &obj->maps[map_idx];
4489 if (map->libbpf_type != type ||
4490 map->sec_idx != sym->st_shndx ||
4491 map->sec_offset != sym->st_value)
4492 continue;
4493 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4494 prog->name, map_idx, map->name, map->sec_idx,
4495 map->sec_offset, insn_idx);
4496 break;
4497 }
4498 if (map_idx >= nr_maps) {
4499 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4500 prog->name, sym_sec_name, (size_t)sym->st_value);
4501 return -LIBBPF_ERRNO__RELOC;
4502 }
4503 reloc_desc->type = RELO_LD64;
4504 reloc_desc->insn_idx = insn_idx;
4505 reloc_desc->map_idx = map_idx;
4506 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
4507 return 0;
4508 }
4509
4510 /* global data map relocation */
4511 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4512 pr_warn("prog '%s': bad data relo against section '%s'\n",
4513 prog->name, sym_sec_name);
4514 return -LIBBPF_ERRNO__RELOC;
4515 }
4516 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4517 map = &obj->maps[map_idx];
4518 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4519 continue;
4520 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4521 prog->name, map_idx, map->name, map->sec_idx,
4522 map->sec_offset, insn_idx);
4523 break;
4524 }
4525 if (map_idx >= nr_maps) {
4526 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4527 prog->name, sym_sec_name);
4528 return -LIBBPF_ERRNO__RELOC;
4529 }
4530
4531 reloc_desc->type = RELO_DATA;
4532 reloc_desc->insn_idx = insn_idx;
4533 reloc_desc->map_idx = map_idx;
4534 reloc_desc->sym_off = sym->st_value;
4535 return 0;
4536}
4537
4538static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4539{
4540 return insn_idx >= prog->sec_insn_off &&
4541 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4542}
4543
4544static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4545 size_t sec_idx, size_t insn_idx)
4546{
4547 int l = 0, r = obj->nr_programs - 1, m;
4548 struct bpf_program *prog;
4549
4550 if (!obj->nr_programs)
4551 return NULL;
4552
4553 while (l < r) {
4554 m = l + (r - l + 1) / 2;
4555 prog = &obj->programs[m];
4556
4557 if (prog->sec_idx < sec_idx ||
4558 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4559 l = m;
4560 else
4561 r = m - 1;
4562 }
4563 /* matching program could be at index l, but it still might be the
4564 * wrong one, so we need to double check conditions for the last time
4565 */
4566 prog = &obj->programs[l];
4567 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4568 return prog;
4569 return NULL;
4570}
4571
4572static int
4573bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4574{
4575 const char *relo_sec_name, *sec_name;
4576 size_t sec_idx = shdr->sh_info, sym_idx;
4577 struct bpf_program *prog;
4578 struct reloc_desc *relos;
4579 int err, i, nrels;
4580 const char *sym_name;
4581 __u32 insn_idx;
4582 Elf_Scn *scn;
4583 Elf_Data *scn_data;
4584 Elf64_Sym *sym;
4585 Elf64_Rel *rel;
4586
4587 if (sec_idx >= obj->efile.sec_cnt)
4588 return -EINVAL;
4589
4590 scn = elf_sec_by_idx(obj, sec_idx);
4591 scn_data = elf_sec_data(obj, scn);
4592 if (!scn_data)
4593 return -LIBBPF_ERRNO__FORMAT;
4594
4595 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4596 sec_name = elf_sec_name(obj, scn);
4597 if (!relo_sec_name || !sec_name)
4598 return -EINVAL;
4599
4600 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4601 relo_sec_name, sec_idx, sec_name);
4602 nrels = shdr->sh_size / shdr->sh_entsize;
4603
4604 for (i = 0; i < nrels; i++) {
4605 rel = elf_rel_by_idx(data, i);
4606 if (!rel) {
4607 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4608 return -LIBBPF_ERRNO__FORMAT;
4609 }
4610
4611 sym_idx = ELF64_R_SYM(rel->r_info);
4612 sym = elf_sym_by_idx(obj, sym_idx);
4613 if (!sym) {
4614 pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4615 relo_sec_name, sym_idx, i);
4616 return -LIBBPF_ERRNO__FORMAT;
4617 }
4618
4619 if (sym->st_shndx >= obj->efile.sec_cnt) {
4620 pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4621 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4622 return -LIBBPF_ERRNO__FORMAT;
4623 }
4624
4625 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4626 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4627 relo_sec_name, (size_t)rel->r_offset, i);
4628 return -LIBBPF_ERRNO__FORMAT;
4629 }
4630
4631 insn_idx = rel->r_offset / BPF_INSN_SZ;
4632 /* relocations against static functions are recorded as
4633 * relocations against the section that contains a function;
4634 * in such case, symbol will be STT_SECTION and sym.st_name
4635 * will point to empty string (0), so fetch section name
4636 * instead
4637 */
4638 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4639 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4640 else
4641 sym_name = elf_sym_str(obj, sym->st_name);
4642 sym_name = sym_name ?: "<?";
4643
4644 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4645 relo_sec_name, i, insn_idx, sym_name);
4646
4647 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4648 if (!prog) {
4649 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4650 relo_sec_name, i, sec_name, insn_idx);
4651 continue;
4652 }
4653
4654 relos = libbpf_reallocarray(prog->reloc_desc,
4655 prog->nr_reloc + 1, sizeof(*relos));
4656 if (!relos)
4657 return -ENOMEM;
4658 prog->reloc_desc = relos;
4659
4660 /* adjust insn_idx to local BPF program frame of reference */
4661 insn_idx -= prog->sec_insn_off;
4662 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4663 insn_idx, sym_name, sym, rel);
4664 if (err)
4665 return err;
4666
4667 prog->nr_reloc++;
4668 }
4669 return 0;
4670}
4671
4672static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
4673{
4674 int id;
4675
4676 if (!obj->btf)
4677 return -ENOENT;
4678
4679 /* if it's BTF-defined map, we don't need to search for type IDs.
4680 * For struct_ops map, it does not need btf_key_type_id and
4681 * btf_value_type_id.
4682 */
4683 if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
4684 return 0;
4685
4686 /*
4687 * LLVM annotates global data differently in BTF, that is,
4688 * only as '.data', '.bss' or '.rodata'.
4689 */
4690 if (!bpf_map__is_internal(map))
4691 return -ENOENT;
4692
4693 id = btf__find_by_name(obj->btf, map->real_name);
4694 if (id < 0)
4695 return id;
4696
4697 map->btf_key_type_id = 0;
4698 map->btf_value_type_id = id;
4699 return 0;
4700}
4701
4702static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4703{
4704 char file[PATH_MAX], buff[4096];
4705 FILE *fp;
4706 __u32 val;
4707 int err;
4708
4709 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4710 memset(info, 0, sizeof(*info));
4711
4712 fp = fopen(file, "re");
4713 if (!fp) {
4714 err = -errno;
4715 pr_warn("failed to open %s: %d. No procfs support?\n", file,
4716 err);
4717 return err;
4718 }
4719
4720 while (fgets(buff, sizeof(buff), fp)) {
4721 if (sscanf(buff, "map_type:\t%u", &val) == 1)
4722 info->type = val;
4723 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4724 info->key_size = val;
4725 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4726 info->value_size = val;
4727 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4728 info->max_entries = val;
4729 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4730 info->map_flags = val;
4731 }
4732
4733 fclose(fp);
4734
4735 return 0;
4736}
4737
4738bool bpf_map__autocreate(const struct bpf_map *map)
4739{
4740 return map->autocreate;
4741}
4742
4743int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4744{
4745 if (map->obj->loaded)
4746 return libbpf_err(-EBUSY);
4747
4748 map->autocreate = autocreate;
4749 return 0;
4750}
4751
4752int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4753{
4754 struct bpf_map_info info;
4755 __u32 len = sizeof(info), name_len;
4756 int new_fd, err;
4757 char *new_name;
4758
4759 memset(&info, 0, len);
4760 err = bpf_map_get_info_by_fd(fd, &info, &len);
4761 if (err && errno == EINVAL)
4762 err = bpf_get_map_info_from_fdinfo(fd, &info);
4763 if (err)
4764 return libbpf_err(err);
4765
4766 name_len = strlen(info.name);
4767 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4768 new_name = strdup(map->name);
4769 else
4770 new_name = strdup(info.name);
4771
4772 if (!new_name)
4773 return libbpf_err(-errno);
4774
4775 /*
4776 * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set.
4777 * This is similar to what we do in ensure_good_fd(), but without
4778 * closing original FD.
4779 */
4780 new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
4781 if (new_fd < 0) {
4782 err = -errno;
4783 goto err_free_new_name;
4784 }
4785
4786 err = reuse_fd(map->fd, new_fd);
4787 if (err)
4788 goto err_free_new_name;
4789
4790 free(map->name);
4791
4792 map->name = new_name;
4793 map->def.type = info.type;
4794 map->def.key_size = info.key_size;
4795 map->def.value_size = info.value_size;
4796 map->def.max_entries = info.max_entries;
4797 map->def.map_flags = info.map_flags;
4798 map->btf_key_type_id = info.btf_key_type_id;
4799 map->btf_value_type_id = info.btf_value_type_id;
4800 map->reused = true;
4801 map->map_extra = info.map_extra;
4802
4803 return 0;
4804
4805err_free_new_name:
4806 free(new_name);
4807 return libbpf_err(err);
4808}
4809
4810__u32 bpf_map__max_entries(const struct bpf_map *map)
4811{
4812 return map->def.max_entries;
4813}
4814
4815struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4816{
4817 if (!bpf_map_type__is_map_in_map(map->def.type))
4818 return errno = EINVAL, NULL;
4819
4820 return map->inner_map;
4821}
4822
4823int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4824{
4825 if (map->obj->loaded)
4826 return libbpf_err(-EBUSY);
4827
4828 map->def.max_entries = max_entries;
4829
4830 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
4831 if (map_is_ringbuf(map))
4832 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4833
4834 return 0;
4835}
4836
4837static int bpf_object_prepare_token(struct bpf_object *obj)
4838{
4839 const char *bpffs_path;
4840 int bpffs_fd = -1, token_fd, err;
4841 bool mandatory;
4842 enum libbpf_print_level level;
4843
4844 /* token is explicitly prevented */
4845 if (obj->token_path && obj->token_path[0] == '\0') {
4846 pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
4847 return 0;
4848 }
4849
4850 mandatory = obj->token_path != NULL;
4851 level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
4852
4853 bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
4854 bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
4855 if (bpffs_fd < 0) {
4856 err = -errno;
4857 __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
4858 obj->name, err, bpffs_path,
4859 mandatory ? "" : ", skipping optional step...");
4860 return mandatory ? err : 0;
4861 }
4862
4863 token_fd = bpf_token_create(bpffs_fd, 0);
4864 close(bpffs_fd);
4865 if (token_fd < 0) {
4866 if (!mandatory && token_fd == -ENOENT) {
4867 pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
4868 obj->name, bpffs_path);
4869 return 0;
4870 }
4871 __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
4872 obj->name, token_fd, bpffs_path,
4873 mandatory ? "" : ", skipping optional step...");
4874 return mandatory ? token_fd : 0;
4875 }
4876
4877 obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
4878 if (!obj->feat_cache) {
4879 close(token_fd);
4880 return -ENOMEM;
4881 }
4882
4883 obj->token_fd = token_fd;
4884 obj->feat_cache->token_fd = token_fd;
4885
4886 return 0;
4887}
4888
4889static int
4890bpf_object__probe_loading(struct bpf_object *obj)
4891{
4892 char *cp, errmsg[STRERR_BUFSIZE];
4893 struct bpf_insn insns[] = {
4894 BPF_MOV64_IMM(BPF_REG_0, 0),
4895 BPF_EXIT_INSN(),
4896 };
4897 int ret, insn_cnt = ARRAY_SIZE(insns);
4898 LIBBPF_OPTS(bpf_prog_load_opts, opts,
4899 .token_fd = obj->token_fd,
4900 .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
4901 );
4902
4903 if (obj->gen_loader)
4904 return 0;
4905
4906 ret = bump_rlimit_memlock();
4907 if (ret)
4908 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4909
4910 /* make sure basic loading works */
4911 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
4912 if (ret < 0)
4913 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
4914 if (ret < 0) {
4915 ret = errno;
4916 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4917 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4918 "program. Make sure your kernel supports BPF "
4919 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4920 "set to big enough value.\n", __func__, cp, ret);
4921 return -ret;
4922 }
4923 close(ret);
4924
4925 return 0;
4926}
4927
4928bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4929{
4930 if (obj->gen_loader)
4931 /* To generate loader program assume the latest kernel
4932 * to avoid doing extra prog_load, map_create syscalls.
4933 */
4934 return true;
4935
4936 if (obj->token_fd)
4937 return feat_supported(obj->feat_cache, feat_id);
4938
4939 return feat_supported(NULL, feat_id);
4940}
4941
4942static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4943{
4944 struct bpf_map_info map_info;
4945 char msg[STRERR_BUFSIZE];
4946 __u32 map_info_len = sizeof(map_info);
4947 int err;
4948
4949 memset(&map_info, 0, map_info_len);
4950 err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
4951 if (err && errno == EINVAL)
4952 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4953 if (err) {
4954 pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
4955 libbpf_strerror_r(errno, msg, sizeof(msg)));
4956 return false;
4957 }
4958
4959 return (map_info.type == map->def.type &&
4960 map_info.key_size == map->def.key_size &&
4961 map_info.value_size == map->def.value_size &&
4962 map_info.max_entries == map->def.max_entries &&
4963 map_info.map_flags == map->def.map_flags &&
4964 map_info.map_extra == map->map_extra);
4965}
4966
4967static int
4968bpf_object__reuse_map(struct bpf_map *map)
4969{
4970 char *cp, errmsg[STRERR_BUFSIZE];
4971 int err, pin_fd;
4972
4973 pin_fd = bpf_obj_get(map->pin_path);
4974 if (pin_fd < 0) {
4975 err = -errno;
4976 if (err == -ENOENT) {
4977 pr_debug("found no pinned map to reuse at '%s'\n",
4978 map->pin_path);
4979 return 0;
4980 }
4981
4982 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4983 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4984 map->pin_path, cp);
4985 return err;
4986 }
4987
4988 if (!map_is_reuse_compat(map, pin_fd)) {
4989 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4990 map->pin_path);
4991 close(pin_fd);
4992 return -EINVAL;
4993 }
4994
4995 err = bpf_map__reuse_fd(map, pin_fd);
4996 close(pin_fd);
4997 if (err)
4998 return err;
4999
5000 map->pinned = true;
5001 pr_debug("reused pinned map at '%s'\n", map->pin_path);
5002
5003 return 0;
5004}
5005
5006static int
5007bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
5008{
5009 enum libbpf_map_type map_type = map->libbpf_type;
5010 char *cp, errmsg[STRERR_BUFSIZE];
5011 int err, zero = 0;
5012
5013 if (obj->gen_loader) {
5014 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
5015 map->mmaped, map->def.value_size);
5016 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
5017 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
5018 return 0;
5019 }
5020
5021 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
5022 if (err) {
5023 err = -errno;
5024 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5025 pr_warn("Error setting initial map(%s) contents: %s\n",
5026 map->name, cp);
5027 return err;
5028 }
5029
5030 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
5031 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
5032 err = bpf_map_freeze(map->fd);
5033 if (err) {
5034 err = -errno;
5035 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5036 pr_warn("Error freezing map(%s) as read-only: %s\n",
5037 map->name, cp);
5038 return err;
5039 }
5040 }
5041 return 0;
5042}
5043
5044static void bpf_map__destroy(struct bpf_map *map);
5045
5046static bool map_is_created(const struct bpf_map *map)
5047{
5048 return map->obj->loaded || map->reused;
5049}
5050
5051static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
5052{
5053 LIBBPF_OPTS(bpf_map_create_opts, create_attr);
5054 struct bpf_map_def *def = &map->def;
5055 const char *map_name = NULL;
5056 int err = 0, map_fd;
5057
5058 if (kernel_supports(obj, FEAT_PROG_NAME))
5059 map_name = map->name;
5060 create_attr.map_ifindex = map->map_ifindex;
5061 create_attr.map_flags = def->map_flags;
5062 create_attr.numa_node = map->numa_node;
5063 create_attr.map_extra = map->map_extra;
5064 create_attr.token_fd = obj->token_fd;
5065 if (obj->token_fd)
5066 create_attr.map_flags |= BPF_F_TOKEN_FD;
5067
5068 if (bpf_map__is_struct_ops(map)) {
5069 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
5070 if (map->mod_btf_fd >= 0) {
5071 create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
5072 create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
5073 }
5074 }
5075
5076 if (obj->btf && btf__fd(obj->btf) >= 0) {
5077 create_attr.btf_fd = btf__fd(obj->btf);
5078 create_attr.btf_key_type_id = map->btf_key_type_id;
5079 create_attr.btf_value_type_id = map->btf_value_type_id;
5080 }
5081
5082 if (bpf_map_type__is_map_in_map(def->type)) {
5083 if (map->inner_map) {
5084 err = map_set_def_max_entries(map->inner_map);
5085 if (err)
5086 return err;
5087 err = bpf_object__create_map(obj, map->inner_map, true);
5088 if (err) {
5089 pr_warn("map '%s': failed to create inner map: %d\n",
5090 map->name, err);
5091 return err;
5092 }
5093 map->inner_map_fd = map->inner_map->fd;
5094 }
5095 if (map->inner_map_fd >= 0)
5096 create_attr.inner_map_fd = map->inner_map_fd;
5097 }
5098
5099 switch (def->type) {
5100 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5101 case BPF_MAP_TYPE_CGROUP_ARRAY:
5102 case BPF_MAP_TYPE_STACK_TRACE:
5103 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5104 case BPF_MAP_TYPE_HASH_OF_MAPS:
5105 case BPF_MAP_TYPE_DEVMAP:
5106 case BPF_MAP_TYPE_DEVMAP_HASH:
5107 case BPF_MAP_TYPE_CPUMAP:
5108 case BPF_MAP_TYPE_XSKMAP:
5109 case BPF_MAP_TYPE_SOCKMAP:
5110 case BPF_MAP_TYPE_SOCKHASH:
5111 case BPF_MAP_TYPE_QUEUE:
5112 case BPF_MAP_TYPE_STACK:
5113 case BPF_MAP_TYPE_ARENA:
5114 create_attr.btf_fd = 0;
5115 create_attr.btf_key_type_id = 0;
5116 create_attr.btf_value_type_id = 0;
5117 map->btf_key_type_id = 0;
5118 map->btf_value_type_id = 0;
5119 break;
5120 case BPF_MAP_TYPE_STRUCT_OPS:
5121 create_attr.btf_value_type_id = 0;
5122 break;
5123 default:
5124 break;
5125 }
5126
5127 if (obj->gen_loader) {
5128 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
5129 def->key_size, def->value_size, def->max_entries,
5130 &create_attr, is_inner ? -1 : map - obj->maps);
5131 /* We keep pretenting we have valid FD to pass various fd >= 0
5132 * checks by just keeping original placeholder FDs in place.
5133 * See bpf_object__add_map() comment.
5134 * This placeholder fd will not be used with any syscall and
5135 * will be reset to -1 eventually.
5136 */
5137 map_fd = map->fd;
5138 } else {
5139 map_fd = bpf_map_create(def->type, map_name,
5140 def->key_size, def->value_size,
5141 def->max_entries, &create_attr);
5142 }
5143 if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
5144 char *cp, errmsg[STRERR_BUFSIZE];
5145
5146 err = -errno;
5147 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5148 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
5149 map->name, cp, err);
5150 create_attr.btf_fd = 0;
5151 create_attr.btf_key_type_id = 0;
5152 create_attr.btf_value_type_id = 0;
5153 map->btf_key_type_id = 0;
5154 map->btf_value_type_id = 0;
5155 map_fd = bpf_map_create(def->type, map_name,
5156 def->key_size, def->value_size,
5157 def->max_entries, &create_attr);
5158 }
5159
5160 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
5161 if (obj->gen_loader)
5162 map->inner_map->fd = -1;
5163 bpf_map__destroy(map->inner_map);
5164 zfree(&map->inner_map);
5165 }
5166
5167 if (map_fd < 0)
5168 return map_fd;
5169
5170 /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
5171 if (map->fd == map_fd)
5172 return 0;
5173
5174 /* Keep placeholder FD value but now point it to the BPF map object.
5175 * This way everything that relied on this map's FD (e.g., relocated
5176 * ldimm64 instructions) will stay valid and won't need adjustments.
5177 * map->fd stays valid but now point to what map_fd points to.
5178 */
5179 return reuse_fd(map->fd, map_fd);
5180}
5181
5182static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
5183{
5184 const struct bpf_map *targ_map;
5185 unsigned int i;
5186 int fd, err = 0;
5187
5188 for (i = 0; i < map->init_slots_sz; i++) {
5189 if (!map->init_slots[i])
5190 continue;
5191
5192 targ_map = map->init_slots[i];
5193 fd = targ_map->fd;
5194
5195 if (obj->gen_loader) {
5196 bpf_gen__populate_outer_map(obj->gen_loader,
5197 map - obj->maps, i,
5198 targ_map - obj->maps);
5199 } else {
5200 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5201 }
5202 if (err) {
5203 err = -errno;
5204 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
5205 map->name, i, targ_map->name, fd, err);
5206 return err;
5207 }
5208 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5209 map->name, i, targ_map->name, fd);
5210 }
5211
5212 zfree(&map->init_slots);
5213 map->init_slots_sz = 0;
5214
5215 return 0;
5216}
5217
5218static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5219{
5220 const struct bpf_program *targ_prog;
5221 unsigned int i;
5222 int fd, err;
5223
5224 if (obj->gen_loader)
5225 return -ENOTSUP;
5226
5227 for (i = 0; i < map->init_slots_sz; i++) {
5228 if (!map->init_slots[i])
5229 continue;
5230
5231 targ_prog = map->init_slots[i];
5232 fd = bpf_program__fd(targ_prog);
5233
5234 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5235 if (err) {
5236 err = -errno;
5237 pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5238 map->name, i, targ_prog->name, fd, err);
5239 return err;
5240 }
5241 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5242 map->name, i, targ_prog->name, fd);
5243 }
5244
5245 zfree(&map->init_slots);
5246 map->init_slots_sz = 0;
5247
5248 return 0;
5249}
5250
5251static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5252{
5253 struct bpf_map *map;
5254 int i, err;
5255
5256 for (i = 0; i < obj->nr_maps; i++) {
5257 map = &obj->maps[i];
5258
5259 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5260 continue;
5261
5262 err = init_prog_array_slots(obj, map);
5263 if (err < 0)
5264 return err;
5265 }
5266 return 0;
5267}
5268
5269static int map_set_def_max_entries(struct bpf_map *map)
5270{
5271 if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5272 int nr_cpus;
5273
5274 nr_cpus = libbpf_num_possible_cpus();
5275 if (nr_cpus < 0) {
5276 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5277 map->name, nr_cpus);
5278 return nr_cpus;
5279 }
5280 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5281 map->def.max_entries = nr_cpus;
5282 }
5283
5284 return 0;
5285}
5286
5287static int
5288bpf_object__create_maps(struct bpf_object *obj)
5289{
5290 struct bpf_map *map;
5291 char *cp, errmsg[STRERR_BUFSIZE];
5292 unsigned int i, j;
5293 int err;
5294 bool retried;
5295
5296 for (i = 0; i < obj->nr_maps; i++) {
5297 map = &obj->maps[i];
5298
5299 /* To support old kernels, we skip creating global data maps
5300 * (.rodata, .data, .kconfig, etc); later on, during program
5301 * loading, if we detect that at least one of the to-be-loaded
5302 * programs is referencing any global data map, we'll error
5303 * out with program name and relocation index logged.
5304 * This approach allows to accommodate Clang emitting
5305 * unnecessary .rodata.str1.1 sections for string literals,
5306 * but also it allows to have CO-RE applications that use
5307 * global variables in some of BPF programs, but not others.
5308 * If those global variable-using programs are not loaded at
5309 * runtime due to bpf_program__set_autoload(prog, false),
5310 * bpf_object loading will succeed just fine even on old
5311 * kernels.
5312 */
5313 if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5314 map->autocreate = false;
5315
5316 if (!map->autocreate) {
5317 pr_debug("map '%s': skipped auto-creating...\n", map->name);
5318 continue;
5319 }
5320
5321 err = map_set_def_max_entries(map);
5322 if (err)
5323 goto err_out;
5324
5325 retried = false;
5326retry:
5327 if (map->pin_path) {
5328 err = bpf_object__reuse_map(map);
5329 if (err) {
5330 pr_warn("map '%s': error reusing pinned map\n",
5331 map->name);
5332 goto err_out;
5333 }
5334 if (retried && map->fd < 0) {
5335 pr_warn("map '%s': cannot find pinned map\n",
5336 map->name);
5337 err = -ENOENT;
5338 goto err_out;
5339 }
5340 }
5341
5342 if (map->reused) {
5343 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5344 map->name, map->fd);
5345 } else {
5346 err = bpf_object__create_map(obj, map, false);
5347 if (err)
5348 goto err_out;
5349
5350 pr_debug("map '%s': created successfully, fd=%d\n",
5351 map->name, map->fd);
5352
5353 if (bpf_map__is_internal(map)) {
5354 err = bpf_object__populate_internal_map(obj, map);
5355 if (err < 0)
5356 goto err_out;
5357 }
5358 if (map->def.type == BPF_MAP_TYPE_ARENA) {
5359 map->mmaped = mmap((void *)(long)map->map_extra,
5360 bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
5361 map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
5362 map->fd, 0);
5363 if (map->mmaped == MAP_FAILED) {
5364 err = -errno;
5365 map->mmaped = NULL;
5366 pr_warn("map '%s': failed to mmap arena: %d\n",
5367 map->name, err);
5368 return err;
5369 }
5370 if (obj->arena_data) {
5371 memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz);
5372 zfree(&obj->arena_data);
5373 }
5374 }
5375 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5376 err = init_map_in_map_slots(obj, map);
5377 if (err < 0)
5378 goto err_out;
5379 }
5380 }
5381
5382 if (map->pin_path && !map->pinned) {
5383 err = bpf_map__pin(map, NULL);
5384 if (err) {
5385 if (!retried && err == -EEXIST) {
5386 retried = true;
5387 goto retry;
5388 }
5389 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5390 map->name, map->pin_path, err);
5391 goto err_out;
5392 }
5393 }
5394 }
5395
5396 return 0;
5397
5398err_out:
5399 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5400 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5401 pr_perm_msg(err);
5402 for (j = 0; j < i; j++)
5403 zclose(obj->maps[j].fd);
5404 return err;
5405}
5406
5407static bool bpf_core_is_flavor_sep(const char *s)
5408{
5409 /* check X___Y name pattern, where X and Y are not underscores */
5410 return s[0] != '_' && /* X */
5411 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
5412 s[4] != '_'; /* Y */
5413}
5414
5415/* Given 'some_struct_name___with_flavor' return the length of a name prefix
5416 * before last triple underscore. Struct name part after last triple
5417 * underscore is ignored by BPF CO-RE relocation during relocation matching.
5418 */
5419size_t bpf_core_essential_name_len(const char *name)
5420{
5421 size_t n = strlen(name);
5422 int i;
5423
5424 for (i = n - 5; i >= 0; i--) {
5425 if (bpf_core_is_flavor_sep(name + i))
5426 return i + 1;
5427 }
5428 return n;
5429}
5430
5431void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5432{
5433 if (!cands)
5434 return;
5435
5436 free(cands->cands);
5437 free(cands);
5438}
5439
5440int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5441 size_t local_essent_len,
5442 const struct btf *targ_btf,
5443 const char *targ_btf_name,
5444 int targ_start_id,
5445 struct bpf_core_cand_list *cands)
5446{
5447 struct bpf_core_cand *new_cands, *cand;
5448 const struct btf_type *t, *local_t;
5449 const char *targ_name, *local_name;
5450 size_t targ_essent_len;
5451 int n, i;
5452
5453 local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5454 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5455
5456 n = btf__type_cnt(targ_btf);
5457 for (i = targ_start_id; i < n; i++) {
5458 t = btf__type_by_id(targ_btf, i);
5459 if (!btf_kind_core_compat(t, local_t))
5460 continue;
5461
5462 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5463 if (str_is_empty(targ_name))
5464 continue;
5465
5466 targ_essent_len = bpf_core_essential_name_len(targ_name);
5467 if (targ_essent_len != local_essent_len)
5468 continue;
5469
5470 if (strncmp(local_name, targ_name, local_essent_len) != 0)
5471 continue;
5472
5473 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5474 local_cand->id, btf_kind_str(local_t),
5475 local_name, i, btf_kind_str(t), targ_name,
5476 targ_btf_name);
5477 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5478 sizeof(*cands->cands));
5479 if (!new_cands)
5480 return -ENOMEM;
5481
5482 cand = &new_cands[cands->len];
5483 cand->btf = targ_btf;
5484 cand->id = i;
5485
5486 cands->cands = new_cands;
5487 cands->len++;
5488 }
5489 return 0;
5490}
5491
5492static int load_module_btfs(struct bpf_object *obj)
5493{
5494 struct bpf_btf_info info;
5495 struct module_btf *mod_btf;
5496 struct btf *btf;
5497 char name[64];
5498 __u32 id = 0, len;
5499 int err, fd;
5500
5501 if (obj->btf_modules_loaded)
5502 return 0;
5503
5504 if (obj->gen_loader)
5505 return 0;
5506
5507 /* don't do this again, even if we find no module BTFs */
5508 obj->btf_modules_loaded = true;
5509
5510 /* kernel too old to support module BTFs */
5511 if (!kernel_supports(obj, FEAT_MODULE_BTF))
5512 return 0;
5513
5514 while (true) {
5515 err = bpf_btf_get_next_id(id, &id);
5516 if (err && errno == ENOENT)
5517 return 0;
5518 if (err && errno == EPERM) {
5519 pr_debug("skipping module BTFs loading, missing privileges\n");
5520 return 0;
5521 }
5522 if (err) {
5523 err = -errno;
5524 pr_warn("failed to iterate BTF objects: %d\n", err);
5525 return err;
5526 }
5527
5528 fd = bpf_btf_get_fd_by_id(id);
5529 if (fd < 0) {
5530 if (errno == ENOENT)
5531 continue; /* expected race: BTF was unloaded */
5532 err = -errno;
5533 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5534 return err;
5535 }
5536
5537 len = sizeof(info);
5538 memset(&info, 0, sizeof(info));
5539 info.name = ptr_to_u64(name);
5540 info.name_len = sizeof(name);
5541
5542 err = bpf_btf_get_info_by_fd(fd, &info, &len);
5543 if (err) {
5544 err = -errno;
5545 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5546 goto err_out;
5547 }
5548
5549 /* ignore non-module BTFs */
5550 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5551 close(fd);
5552 continue;
5553 }
5554
5555 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5556 err = libbpf_get_error(btf);
5557 if (err) {
5558 pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5559 name, id, err);
5560 goto err_out;
5561 }
5562
5563 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5564 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5565 if (err)
5566 goto err_out;
5567
5568 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5569
5570 mod_btf->btf = btf;
5571 mod_btf->id = id;
5572 mod_btf->fd = fd;
5573 mod_btf->name = strdup(name);
5574 if (!mod_btf->name) {
5575 err = -ENOMEM;
5576 goto err_out;
5577 }
5578 continue;
5579
5580err_out:
5581 close(fd);
5582 return err;
5583 }
5584
5585 return 0;
5586}
5587
5588static struct bpf_core_cand_list *
5589bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5590{
5591 struct bpf_core_cand local_cand = {};
5592 struct bpf_core_cand_list *cands;
5593 const struct btf *main_btf;
5594 const struct btf_type *local_t;
5595 const char *local_name;
5596 size_t local_essent_len;
5597 int err, i;
5598
5599 local_cand.btf = local_btf;
5600 local_cand.id = local_type_id;
5601 local_t = btf__type_by_id(local_btf, local_type_id);
5602 if (!local_t)
5603 return ERR_PTR(-EINVAL);
5604
5605 local_name = btf__name_by_offset(local_btf, local_t->name_off);
5606 if (str_is_empty(local_name))
5607 return ERR_PTR(-EINVAL);
5608 local_essent_len = bpf_core_essential_name_len(local_name);
5609
5610 cands = calloc(1, sizeof(*cands));
5611 if (!cands)
5612 return ERR_PTR(-ENOMEM);
5613
5614 /* Attempt to find target candidates in vmlinux BTF first */
5615 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5616 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5617 if (err)
5618 goto err_out;
5619
5620 /* if vmlinux BTF has any candidate, don't got for module BTFs */
5621 if (cands->len)
5622 return cands;
5623
5624 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5625 if (obj->btf_vmlinux_override)
5626 return cands;
5627
5628 /* now look through module BTFs, trying to still find candidates */
5629 err = load_module_btfs(obj);
5630 if (err)
5631 goto err_out;
5632
5633 for (i = 0; i < obj->btf_module_cnt; i++) {
5634 err = bpf_core_add_cands(&local_cand, local_essent_len,
5635 obj->btf_modules[i].btf,
5636 obj->btf_modules[i].name,
5637 btf__type_cnt(obj->btf_vmlinux),
5638 cands);
5639 if (err)
5640 goto err_out;
5641 }
5642
5643 return cands;
5644err_out:
5645 bpf_core_free_cands(cands);
5646 return ERR_PTR(err);
5647}
5648
5649/* Check local and target types for compatibility. This check is used for
5650 * type-based CO-RE relocations and follow slightly different rules than
5651 * field-based relocations. This function assumes that root types were already
5652 * checked for name match. Beyond that initial root-level name check, names
5653 * are completely ignored. Compatibility rules are as follows:
5654 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5655 * kind should match for local and target types (i.e., STRUCT is not
5656 * compatible with UNION);
5657 * - for ENUMs, the size is ignored;
5658 * - for INT, size and signedness are ignored;
5659 * - for ARRAY, dimensionality is ignored, element types are checked for
5660 * compatibility recursively;
5661 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
5662 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5663 * - FUNC_PROTOs are compatible if they have compatible signature: same
5664 * number of input args and compatible return and argument types.
5665 * These rules are not set in stone and probably will be adjusted as we get
5666 * more experience with using BPF CO-RE relocations.
5667 */
5668int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5669 const struct btf *targ_btf, __u32 targ_id)
5670{
5671 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
5672}
5673
5674int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
5675 const struct btf *targ_btf, __u32 targ_id)
5676{
5677 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
5678}
5679
5680static size_t bpf_core_hash_fn(const long key, void *ctx)
5681{
5682 return key;
5683}
5684
5685static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
5686{
5687 return k1 == k2;
5688}
5689
5690static int record_relo_core(struct bpf_program *prog,
5691 const struct bpf_core_relo *core_relo, int insn_idx)
5692{
5693 struct reloc_desc *relos, *relo;
5694
5695 relos = libbpf_reallocarray(prog->reloc_desc,
5696 prog->nr_reloc + 1, sizeof(*relos));
5697 if (!relos)
5698 return -ENOMEM;
5699 relo = &relos[prog->nr_reloc];
5700 relo->type = RELO_CORE;
5701 relo->insn_idx = insn_idx;
5702 relo->core_relo = core_relo;
5703 prog->reloc_desc = relos;
5704 prog->nr_reloc++;
5705 return 0;
5706}
5707
5708static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
5709{
5710 struct reloc_desc *relo;
5711 int i;
5712
5713 for (i = 0; i < prog->nr_reloc; i++) {
5714 relo = &prog->reloc_desc[i];
5715 if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5716 continue;
5717
5718 return relo->core_relo;
5719 }
5720
5721 return NULL;
5722}
5723
5724static int bpf_core_resolve_relo(struct bpf_program *prog,
5725 const struct bpf_core_relo *relo,
5726 int relo_idx,
5727 const struct btf *local_btf,
5728 struct hashmap *cand_cache,
5729 struct bpf_core_relo_res *targ_res)
5730{
5731 struct bpf_core_spec specs_scratch[3] = {};
5732 struct bpf_core_cand_list *cands = NULL;
5733 const char *prog_name = prog->name;
5734 const struct btf_type *local_type;
5735 const char *local_name;
5736 __u32 local_id = relo->type_id;
5737 int err;
5738
5739 local_type = btf__type_by_id(local_btf, local_id);
5740 if (!local_type)
5741 return -EINVAL;
5742
5743 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5744 if (!local_name)
5745 return -EINVAL;
5746
5747 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
5748 !hashmap__find(cand_cache, local_id, &cands)) {
5749 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5750 if (IS_ERR(cands)) {
5751 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5752 prog_name, relo_idx, local_id, btf_kind_str(local_type),
5753 local_name, PTR_ERR(cands));
5754 return PTR_ERR(cands);
5755 }
5756 err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
5757 if (err) {
5758 bpf_core_free_cands(cands);
5759 return err;
5760 }
5761 }
5762
5763 return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
5764 targ_res);
5765}
5766
5767static int
5768bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5769{
5770 const struct btf_ext_info_sec *sec;
5771 struct bpf_core_relo_res targ_res;
5772 const struct bpf_core_relo *rec;
5773 const struct btf_ext_info *seg;
5774 struct hashmap_entry *entry;
5775 struct hashmap *cand_cache = NULL;
5776 struct bpf_program *prog;
5777 struct bpf_insn *insn;
5778 const char *sec_name;
5779 int i, err = 0, insn_idx, sec_idx, sec_num;
5780
5781 if (obj->btf_ext->core_relo_info.len == 0)
5782 return 0;
5783
5784 if (targ_btf_path) {
5785 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5786 err = libbpf_get_error(obj->btf_vmlinux_override);
5787 if (err) {
5788 pr_warn("failed to parse target BTF: %d\n", err);
5789 return err;
5790 }
5791 }
5792
5793 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5794 if (IS_ERR(cand_cache)) {
5795 err = PTR_ERR(cand_cache);
5796 goto out;
5797 }
5798
5799 seg = &obj->btf_ext->core_relo_info;
5800 sec_num = 0;
5801 for_each_btf_ext_sec(seg, sec) {
5802 sec_idx = seg->sec_idxs[sec_num];
5803 sec_num++;
5804
5805 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5806 if (str_is_empty(sec_name)) {
5807 err = -EINVAL;
5808 goto out;
5809 }
5810
5811 pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
5812
5813 for_each_btf_ext_rec(seg, sec, i, rec) {
5814 if (rec->insn_off % BPF_INSN_SZ)
5815 return -EINVAL;
5816 insn_idx = rec->insn_off / BPF_INSN_SZ;
5817 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5818 if (!prog) {
5819 /* When __weak subprog is "overridden" by another instance
5820 * of the subprog from a different object file, linker still
5821 * appends all the .BTF.ext info that used to belong to that
5822 * eliminated subprogram.
5823 * This is similar to what x86-64 linker does for relocations.
5824 * So just ignore such relocations just like we ignore
5825 * subprog instructions when discovering subprograms.
5826 */
5827 pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
5828 sec_name, i, insn_idx);
5829 continue;
5830 }
5831 /* no need to apply CO-RE relocation if the program is
5832 * not going to be loaded
5833 */
5834 if (!prog->autoload)
5835 continue;
5836
5837 /* adjust insn_idx from section frame of reference to the local
5838 * program's frame of reference; (sub-)program code is not yet
5839 * relocated, so it's enough to just subtract in-section offset
5840 */
5841 insn_idx = insn_idx - prog->sec_insn_off;
5842 if (insn_idx >= prog->insns_cnt)
5843 return -EINVAL;
5844 insn = &prog->insns[insn_idx];
5845
5846 err = record_relo_core(prog, rec, insn_idx);
5847 if (err) {
5848 pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
5849 prog->name, i, err);
5850 goto out;
5851 }
5852
5853 if (prog->obj->gen_loader)
5854 continue;
5855
5856 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
5857 if (err) {
5858 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5859 prog->name, i, err);
5860 goto out;
5861 }
5862
5863 err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
5864 if (err) {
5865 pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
5866 prog->name, i, insn_idx, err);
5867 goto out;
5868 }
5869 }
5870 }
5871
5872out:
5873 /* obj->btf_vmlinux and module BTFs are freed after object load */
5874 btf__free(obj->btf_vmlinux_override);
5875 obj->btf_vmlinux_override = NULL;
5876
5877 if (!IS_ERR_OR_NULL(cand_cache)) {
5878 hashmap__for_each_entry(cand_cache, entry, i) {
5879 bpf_core_free_cands(entry->pvalue);
5880 }
5881 hashmap__free(cand_cache);
5882 }
5883 return err;
5884}
5885
5886/* base map load ldimm64 special constant, used also for log fixup logic */
5887#define POISON_LDIMM64_MAP_BASE 2001000000
5888#define POISON_LDIMM64_MAP_PFX "200100"
5889
5890static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
5891 int insn_idx, struct bpf_insn *insn,
5892 int map_idx, const struct bpf_map *map)
5893{
5894 int i;
5895
5896 pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
5897 prog->name, relo_idx, insn_idx, map_idx, map->name);
5898
5899 /* we turn single ldimm64 into two identical invalid calls */
5900 for (i = 0; i < 2; i++) {
5901 insn->code = BPF_JMP | BPF_CALL;
5902 insn->dst_reg = 0;
5903 insn->src_reg = 0;
5904 insn->off = 0;
5905 /* if this instruction is reachable (not a dead code),
5906 * verifier will complain with something like:
5907 * invalid func unknown#2001000123
5908 * where lower 123 is map index into obj->maps[] array
5909 */
5910 insn->imm = POISON_LDIMM64_MAP_BASE + map_idx;
5911
5912 insn++;
5913 }
5914}
5915
5916/* unresolved kfunc call special constant, used also for log fixup logic */
5917#define POISON_CALL_KFUNC_BASE 2002000000
5918#define POISON_CALL_KFUNC_PFX "2002"
5919
5920static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
5921 int insn_idx, struct bpf_insn *insn,
5922 int ext_idx, const struct extern_desc *ext)
5923{
5924 pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n",
5925 prog->name, relo_idx, insn_idx, ext->name);
5926
5927 /* we turn kfunc call into invalid helper call with identifiable constant */
5928 insn->code = BPF_JMP | BPF_CALL;
5929 insn->dst_reg = 0;
5930 insn->src_reg = 0;
5931 insn->off = 0;
5932 /* if this instruction is reachable (not a dead code),
5933 * verifier will complain with something like:
5934 * invalid func unknown#2001000123
5935 * where lower 123 is extern index into obj->externs[] array
5936 */
5937 insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
5938}
5939
5940/* Relocate data references within program code:
5941 * - map references;
5942 * - global variable references;
5943 * - extern references.
5944 */
5945static int
5946bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5947{
5948 int i;
5949
5950 for (i = 0; i < prog->nr_reloc; i++) {
5951 struct reloc_desc *relo = &prog->reloc_desc[i];
5952 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5953 const struct bpf_map *map;
5954 struct extern_desc *ext;
5955
5956 switch (relo->type) {
5957 case RELO_LD64:
5958 map = &obj->maps[relo->map_idx];
5959 if (obj->gen_loader) {
5960 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
5961 insn[0].imm = relo->map_idx;
5962 } else if (map->autocreate) {
5963 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
5964 insn[0].imm = map->fd;
5965 } else {
5966 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5967 relo->map_idx, map);
5968 }
5969 break;
5970 case RELO_DATA:
5971 map = &obj->maps[relo->map_idx];
5972 insn[1].imm = insn[0].imm + relo->sym_off;
5973 if (obj->gen_loader) {
5974 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5975 insn[0].imm = relo->map_idx;
5976 } else if (map->autocreate) {
5977 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5978 insn[0].imm = map->fd;
5979 } else {
5980 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5981 relo->map_idx, map);
5982 }
5983 break;
5984 case RELO_EXTERN_LD64:
5985 ext = &obj->externs[relo->ext_idx];
5986 if (ext->type == EXT_KCFG) {
5987 if (obj->gen_loader) {
5988 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5989 insn[0].imm = obj->kconfig_map_idx;
5990 } else {
5991 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5992 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
5993 }
5994 insn[1].imm = ext->kcfg.data_off;
5995 } else /* EXT_KSYM */ {
5996 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
5997 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
5998 insn[0].imm = ext->ksym.kernel_btf_id;
5999 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6000 } else { /* typeless ksyms or unresolved typed ksyms */
6001 insn[0].imm = (__u32)ext->ksym.addr;
6002 insn[1].imm = ext->ksym.addr >> 32;
6003 }
6004 }
6005 break;
6006 case RELO_EXTERN_CALL:
6007 ext = &obj->externs[relo->ext_idx];
6008 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
6009 if (ext->is_set) {
6010 insn[0].imm = ext->ksym.kernel_btf_id;
6011 insn[0].off = ext->ksym.btf_fd_idx;
6012 } else { /* unresolved weak kfunc call */
6013 poison_kfunc_call(prog, i, relo->insn_idx, insn,
6014 relo->ext_idx, ext);
6015 }
6016 break;
6017 case RELO_SUBPROG_ADDR:
6018 if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
6019 pr_warn("prog '%s': relo #%d: bad insn\n",
6020 prog->name, i);
6021 return -EINVAL;
6022 }
6023 /* handled already */
6024 break;
6025 case RELO_CALL:
6026 /* handled already */
6027 break;
6028 case RELO_CORE:
6029 /* will be handled by bpf_program_record_relos() */
6030 break;
6031 default:
6032 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6033 prog->name, i, relo->type);
6034 return -EINVAL;
6035 }
6036 }
6037
6038 return 0;
6039}
6040
6041static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6042 const struct bpf_program *prog,
6043 const struct btf_ext_info *ext_info,
6044 void **prog_info, __u32 *prog_rec_cnt,
6045 __u32 *prog_rec_sz)
6046{
6047 void *copy_start = NULL, *copy_end = NULL;
6048 void *rec, *rec_end, *new_prog_info;
6049 const struct btf_ext_info_sec *sec;
6050 size_t old_sz, new_sz;
6051 int i, sec_num, sec_idx, off_adj;
6052
6053 sec_num = 0;
6054 for_each_btf_ext_sec(ext_info, sec) {
6055 sec_idx = ext_info->sec_idxs[sec_num];
6056 sec_num++;
6057 if (prog->sec_idx != sec_idx)
6058 continue;
6059
6060 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6061 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6062
6063 if (insn_off < prog->sec_insn_off)
6064 continue;
6065 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6066 break;
6067
6068 if (!copy_start)
6069 copy_start = rec;
6070 copy_end = rec + ext_info->rec_size;
6071 }
6072
6073 if (!copy_start)
6074 return -ENOENT;
6075
6076 /* append func/line info of a given (sub-)program to the main
6077 * program func/line info
6078 */
6079 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6080 new_sz = old_sz + (copy_end - copy_start);
6081 new_prog_info = realloc(*prog_info, new_sz);
6082 if (!new_prog_info)
6083 return -ENOMEM;
6084 *prog_info = new_prog_info;
6085 *prog_rec_cnt = new_sz / ext_info->rec_size;
6086 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6087
6088 /* Kernel instruction offsets are in units of 8-byte
6089 * instructions, while .BTF.ext instruction offsets generated
6090 * by Clang are in units of bytes. So convert Clang offsets
6091 * into kernel offsets and adjust offset according to program
6092 * relocated position.
6093 */
6094 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6095 rec = new_prog_info + old_sz;
6096 rec_end = new_prog_info + new_sz;
6097 for (; rec < rec_end; rec += ext_info->rec_size) {
6098 __u32 *insn_off = rec;
6099
6100 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6101 }
6102 *prog_rec_sz = ext_info->rec_size;
6103 return 0;
6104 }
6105
6106 return -ENOENT;
6107}
6108
6109static int
6110reloc_prog_func_and_line_info(const struct bpf_object *obj,
6111 struct bpf_program *main_prog,
6112 const struct bpf_program *prog)
6113{
6114 int err;
6115
6116 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6117 * support func/line info
6118 */
6119 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
6120 return 0;
6121
6122 /* only attempt func info relocation if main program's func_info
6123 * relocation was successful
6124 */
6125 if (main_prog != prog && !main_prog->func_info)
6126 goto line_info;
6127
6128 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6129 &main_prog->func_info,
6130 &main_prog->func_info_cnt,
6131 &main_prog->func_info_rec_size);
6132 if (err) {
6133 if (err != -ENOENT) {
6134 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6135 prog->name, err);
6136 return err;
6137 }
6138 if (main_prog->func_info) {
6139 /*
6140 * Some info has already been found but has problem
6141 * in the last btf_ext reloc. Must have to error out.
6142 */
6143 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6144 return err;
6145 }
6146 /* Have problem loading the very first info. Ignore the rest. */
6147 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6148 prog->name);
6149 }
6150
6151line_info:
6152 /* don't relocate line info if main program's relocation failed */
6153 if (main_prog != prog && !main_prog->line_info)
6154 return 0;
6155
6156 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6157 &main_prog->line_info,
6158 &main_prog->line_info_cnt,
6159 &main_prog->line_info_rec_size);
6160 if (err) {
6161 if (err != -ENOENT) {
6162 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6163 prog->name, err);
6164 return err;
6165 }
6166 if (main_prog->line_info) {
6167 /*
6168 * Some info has already been found but has problem
6169 * in the last btf_ext reloc. Must have to error out.
6170 */
6171 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6172 return err;
6173 }
6174 /* Have problem loading the very first info. Ignore the rest. */
6175 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6176 prog->name);
6177 }
6178 return 0;
6179}
6180
6181static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6182{
6183 size_t insn_idx = *(const size_t *)key;
6184 const struct reloc_desc *relo = elem;
6185
6186 if (insn_idx == relo->insn_idx)
6187 return 0;
6188 return insn_idx < relo->insn_idx ? -1 : 1;
6189}
6190
6191static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6192{
6193 if (!prog->nr_reloc)
6194 return NULL;
6195 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6196 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6197}
6198
6199static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6200{
6201 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6202 struct reloc_desc *relos;
6203 int i;
6204
6205 if (main_prog == subprog)
6206 return 0;
6207 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
6208 /* if new count is zero, reallocarray can return a valid NULL result;
6209 * in this case the previous pointer will be freed, so we *have to*
6210 * reassign old pointer to the new value (even if it's NULL)
6211 */
6212 if (!relos && new_cnt)
6213 return -ENOMEM;
6214 if (subprog->nr_reloc)
6215 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6216 sizeof(*relos) * subprog->nr_reloc);
6217
6218 for (i = main_prog->nr_reloc; i < new_cnt; i++)
6219 relos[i].insn_idx += subprog->sub_insn_off;
6220 /* After insn_idx adjustment the 'relos' array is still sorted
6221 * by insn_idx and doesn't break bsearch.
6222 */
6223 main_prog->reloc_desc = relos;
6224 main_prog->nr_reloc = new_cnt;
6225 return 0;
6226}
6227
6228static int
6229bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
6230 struct bpf_program *subprog)
6231{
6232 struct bpf_insn *insns;
6233 size_t new_cnt;
6234 int err;
6235
6236 subprog->sub_insn_off = main_prog->insns_cnt;
6237
6238 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6239 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6240 if (!insns) {
6241 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6242 return -ENOMEM;
6243 }
6244 main_prog->insns = insns;
6245 main_prog->insns_cnt = new_cnt;
6246
6247 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6248 subprog->insns_cnt * sizeof(*insns));
6249
6250 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6251 main_prog->name, subprog->insns_cnt, subprog->name);
6252
6253 /* The subprog insns are now appended. Append its relos too. */
6254 err = append_subprog_relos(main_prog, subprog);
6255 if (err)
6256 return err;
6257 return 0;
6258}
6259
6260static int
6261bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6262 struct bpf_program *prog)
6263{
6264 size_t sub_insn_idx, insn_idx;
6265 struct bpf_program *subprog;
6266 struct reloc_desc *relo;
6267 struct bpf_insn *insn;
6268 int err;
6269
6270 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6271 if (err)
6272 return err;
6273
6274 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6275 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6276 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6277 continue;
6278
6279 relo = find_prog_insn_relo(prog, insn_idx);
6280 if (relo && relo->type == RELO_EXTERN_CALL)
6281 /* kfunc relocations will be handled later
6282 * in bpf_object__relocate_data()
6283 */
6284 continue;
6285 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6286 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6287 prog->name, insn_idx, relo->type);
6288 return -LIBBPF_ERRNO__RELOC;
6289 }
6290 if (relo) {
6291 /* sub-program instruction index is a combination of
6292 * an offset of a symbol pointed to by relocation and
6293 * call instruction's imm field; for global functions,
6294 * call always has imm = -1, but for static functions
6295 * relocation is against STT_SECTION and insn->imm
6296 * points to a start of a static function
6297 *
6298 * for subprog addr relocation, the relo->sym_off + insn->imm is
6299 * the byte offset in the corresponding section.
6300 */
6301 if (relo->type == RELO_CALL)
6302 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6303 else
6304 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6305 } else if (insn_is_pseudo_func(insn)) {
6306 /*
6307 * RELO_SUBPROG_ADDR relo is always emitted even if both
6308 * functions are in the same section, so it shouldn't reach here.
6309 */
6310 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6311 prog->name, insn_idx);
6312 return -LIBBPF_ERRNO__RELOC;
6313 } else {
6314 /* if subprogram call is to a static function within
6315 * the same ELF section, there won't be any relocation
6316 * emitted, but it also means there is no additional
6317 * offset necessary, insns->imm is relative to
6318 * instruction's original position within the section
6319 */
6320 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6321 }
6322
6323 /* we enforce that sub-programs should be in .text section */
6324 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6325 if (!subprog) {
6326 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6327 prog->name);
6328 return -LIBBPF_ERRNO__RELOC;
6329 }
6330
6331 /* if it's the first call instruction calling into this
6332 * subprogram (meaning this subprog hasn't been processed
6333 * yet) within the context of current main program:
6334 * - append it at the end of main program's instructions blog;
6335 * - process is recursively, while current program is put on hold;
6336 * - if that subprogram calls some other not yet processes
6337 * subprogram, same thing will happen recursively until
6338 * there are no more unprocesses subprograms left to append
6339 * and relocate.
6340 */
6341 if (subprog->sub_insn_off == 0) {
6342 err = bpf_object__append_subprog_code(obj, main_prog, subprog);
6343 if (err)
6344 return err;
6345 err = bpf_object__reloc_code(obj, main_prog, subprog);
6346 if (err)
6347 return err;
6348 }
6349
6350 /* main_prog->insns memory could have been re-allocated, so
6351 * calculate pointer again
6352 */
6353 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6354 /* calculate correct instruction position within current main
6355 * prog; each main prog can have a different set of
6356 * subprograms appended (potentially in different order as
6357 * well), so position of any subprog can be different for
6358 * different main programs
6359 */
6360 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6361
6362 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6363 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6364 }
6365
6366 return 0;
6367}
6368
6369/*
6370 * Relocate sub-program calls.
6371 *
6372 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6373 * main prog) is processed separately. For each subprog (non-entry functions,
6374 * that can be called from either entry progs or other subprogs) gets their
6375 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6376 * hasn't been yet appended and relocated within current main prog. Once its
6377 * relocated, sub_insn_off will point at the position within current main prog
6378 * where given subprog was appended. This will further be used to relocate all
6379 * the call instructions jumping into this subprog.
6380 *
6381 * We start with main program and process all call instructions. If the call
6382 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6383 * is zero), subprog instructions are appended at the end of main program's
6384 * instruction array. Then main program is "put on hold" while we recursively
6385 * process newly appended subprogram. If that subprogram calls into another
6386 * subprogram that hasn't been appended, new subprogram is appended again to
6387 * the *main* prog's instructions (subprog's instructions are always left
6388 * untouched, as they need to be in unmodified state for subsequent main progs
6389 * and subprog instructions are always sent only as part of a main prog) and
6390 * the process continues recursively. Once all the subprogs called from a main
6391 * prog or any of its subprogs are appended (and relocated), all their
6392 * positions within finalized instructions array are known, so it's easy to
6393 * rewrite call instructions with correct relative offsets, corresponding to
6394 * desired target subprog.
6395 *
6396 * Its important to realize that some subprogs might not be called from some
6397 * main prog and any of its called/used subprogs. Those will keep their
6398 * subprog->sub_insn_off as zero at all times and won't be appended to current
6399 * main prog and won't be relocated within the context of current main prog.
6400 * They might still be used from other main progs later.
6401 *
6402 * Visually this process can be shown as below. Suppose we have two main
6403 * programs mainA and mainB and BPF object contains three subprogs: subA,
6404 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6405 * subC both call subB:
6406 *
6407 * +--------+ +-------+
6408 * | v v |
6409 * +--+---+ +--+-+-+ +---+--+
6410 * | subA | | subB | | subC |
6411 * +--+---+ +------+ +---+--+
6412 * ^ ^
6413 * | |
6414 * +---+-------+ +------+----+
6415 * | mainA | | mainB |
6416 * +-----------+ +-----------+
6417 *
6418 * We'll start relocating mainA, will find subA, append it and start
6419 * processing sub A recursively:
6420 *
6421 * +-----------+------+
6422 * | mainA | subA |
6423 * +-----------+------+
6424 *
6425 * At this point we notice that subB is used from subA, so we append it and
6426 * relocate (there are no further subcalls from subB):
6427 *
6428 * +-----------+------+------+
6429 * | mainA | subA | subB |
6430 * +-----------+------+------+
6431 *
6432 * At this point, we relocate subA calls, then go one level up and finish with
6433 * relocatin mainA calls. mainA is done.
6434 *
6435 * For mainB process is similar but results in different order. We start with
6436 * mainB and skip subA and subB, as mainB never calls them (at least
6437 * directly), but we see subC is needed, so we append and start processing it:
6438 *
6439 * +-----------+------+
6440 * | mainB | subC |
6441 * +-----------+------+
6442 * Now we see subC needs subB, so we go back to it, append and relocate it:
6443 *
6444 * +-----------+------+------+
6445 * | mainB | subC | subB |
6446 * +-----------+------+------+
6447 *
6448 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6449 */
6450static int
6451bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6452{
6453 struct bpf_program *subprog;
6454 int i, err;
6455
6456 /* mark all subprogs as not relocated (yet) within the context of
6457 * current main program
6458 */
6459 for (i = 0; i < obj->nr_programs; i++) {
6460 subprog = &obj->programs[i];
6461 if (!prog_is_subprog(obj, subprog))
6462 continue;
6463
6464 subprog->sub_insn_off = 0;
6465 }
6466
6467 err = bpf_object__reloc_code(obj, prog, prog);
6468 if (err)
6469 return err;
6470
6471 return 0;
6472}
6473
6474static void
6475bpf_object__free_relocs(struct bpf_object *obj)
6476{
6477 struct bpf_program *prog;
6478 int i;
6479
6480 /* free up relocation descriptors */
6481 for (i = 0; i < obj->nr_programs; i++) {
6482 prog = &obj->programs[i];
6483 zfree(&prog->reloc_desc);
6484 prog->nr_reloc = 0;
6485 }
6486}
6487
6488static int cmp_relocs(const void *_a, const void *_b)
6489{
6490 const struct reloc_desc *a = _a;
6491 const struct reloc_desc *b = _b;
6492
6493 if (a->insn_idx != b->insn_idx)
6494 return a->insn_idx < b->insn_idx ? -1 : 1;
6495
6496 /* no two relocations should have the same insn_idx, but ... */
6497 if (a->type != b->type)
6498 return a->type < b->type ? -1 : 1;
6499
6500 return 0;
6501}
6502
6503static void bpf_object__sort_relos(struct bpf_object *obj)
6504{
6505 int i;
6506
6507 for (i = 0; i < obj->nr_programs; i++) {
6508 struct bpf_program *p = &obj->programs[i];
6509
6510 if (!p->nr_reloc)
6511 continue;
6512
6513 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6514 }
6515}
6516
6517static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog)
6518{
6519 const char *str = "exception_callback:";
6520 size_t pfx_len = strlen(str);
6521 int i, j, n;
6522
6523 if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG))
6524 return 0;
6525
6526 n = btf__type_cnt(obj->btf);
6527 for (i = 1; i < n; i++) {
6528 const char *name;
6529 struct btf_type *t;
6530
6531 t = btf_type_by_id(obj->btf, i);
6532 if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
6533 continue;
6534
6535 name = btf__str_by_offset(obj->btf, t->name_off);
6536 if (strncmp(name, str, pfx_len) != 0)
6537 continue;
6538
6539 t = btf_type_by_id(obj->btf, t->type);
6540 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
6541 pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
6542 prog->name);
6543 return -EINVAL;
6544 }
6545 if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0)
6546 continue;
6547 /* Multiple callbacks are specified for the same prog,
6548 * the verifier will eventually return an error for this
6549 * case, hence simply skip appending a subprog.
6550 */
6551 if (prog->exception_cb_idx >= 0) {
6552 prog->exception_cb_idx = -1;
6553 break;
6554 }
6555
6556 name += pfx_len;
6557 if (str_is_empty(name)) {
6558 pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
6559 prog->name);
6560 return -EINVAL;
6561 }
6562
6563 for (j = 0; j < obj->nr_programs; j++) {
6564 struct bpf_program *subprog = &obj->programs[j];
6565
6566 if (!prog_is_subprog(obj, subprog))
6567 continue;
6568 if (strcmp(name, subprog->name) != 0)
6569 continue;
6570 /* Enforce non-hidden, as from verifier point of
6571 * view it expects global functions, whereas the
6572 * mark_btf_static fixes up linkage as static.
6573 */
6574 if (!subprog->sym_global || subprog->mark_btf_static) {
6575 pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
6576 prog->name, subprog->name);
6577 return -EINVAL;
6578 }
6579 /* Let's see if we already saw a static exception callback with the same name */
6580 if (prog->exception_cb_idx >= 0) {
6581 pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
6582 prog->name, subprog->name);
6583 return -EINVAL;
6584 }
6585 prog->exception_cb_idx = j;
6586 break;
6587 }
6588
6589 if (prog->exception_cb_idx >= 0)
6590 continue;
6591
6592 pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
6593 return -ENOENT;
6594 }
6595
6596 return 0;
6597}
6598
6599static struct {
6600 enum bpf_prog_type prog_type;
6601 const char *ctx_name;
6602} global_ctx_map[] = {
6603 { BPF_PROG_TYPE_CGROUP_DEVICE, "bpf_cgroup_dev_ctx" },
6604 { BPF_PROG_TYPE_CGROUP_SKB, "__sk_buff" },
6605 { BPF_PROG_TYPE_CGROUP_SOCK, "bpf_sock" },
6606 { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, "bpf_sock_addr" },
6607 { BPF_PROG_TYPE_CGROUP_SOCKOPT, "bpf_sockopt" },
6608 { BPF_PROG_TYPE_CGROUP_SYSCTL, "bpf_sysctl" },
6609 { BPF_PROG_TYPE_FLOW_DISSECTOR, "__sk_buff" },
6610 { BPF_PROG_TYPE_KPROBE, "bpf_user_pt_regs_t" },
6611 { BPF_PROG_TYPE_LWT_IN, "__sk_buff" },
6612 { BPF_PROG_TYPE_LWT_OUT, "__sk_buff" },
6613 { BPF_PROG_TYPE_LWT_SEG6LOCAL, "__sk_buff" },
6614 { BPF_PROG_TYPE_LWT_XMIT, "__sk_buff" },
6615 { BPF_PROG_TYPE_NETFILTER, "bpf_nf_ctx" },
6616 { BPF_PROG_TYPE_PERF_EVENT, "bpf_perf_event_data" },
6617 { BPF_PROG_TYPE_RAW_TRACEPOINT, "bpf_raw_tracepoint_args" },
6618 { BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" },
6619 { BPF_PROG_TYPE_SCHED_ACT, "__sk_buff" },
6620 { BPF_PROG_TYPE_SCHED_CLS, "__sk_buff" },
6621 { BPF_PROG_TYPE_SK_LOOKUP, "bpf_sk_lookup" },
6622 { BPF_PROG_TYPE_SK_MSG, "sk_msg_md" },
6623 { BPF_PROG_TYPE_SK_REUSEPORT, "sk_reuseport_md" },
6624 { BPF_PROG_TYPE_SK_SKB, "__sk_buff" },
6625 { BPF_PROG_TYPE_SOCK_OPS, "bpf_sock_ops" },
6626 { BPF_PROG_TYPE_SOCKET_FILTER, "__sk_buff" },
6627 { BPF_PROG_TYPE_XDP, "xdp_md" },
6628 /* all other program types don't have "named" context structs */
6629};
6630
6631/* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
6632 * for below __builtin_types_compatible_p() checks;
6633 * with this approach we don't need any extra arch-specific #ifdef guards
6634 */
6635struct pt_regs;
6636struct user_pt_regs;
6637struct user_regs_struct;
6638
6639static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
6640 const char *subprog_name, int arg_idx,
6641 int arg_type_id, const char *ctx_name)
6642{
6643 const struct btf_type *t;
6644 const char *tname;
6645
6646 /* check if existing parameter already matches verifier expectations */
6647 t = skip_mods_and_typedefs(btf, arg_type_id, NULL);
6648 if (!btf_is_ptr(t))
6649 goto out_warn;
6650
6651 /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe
6652 * and perf_event programs, so check this case early on and forget
6653 * about it for subsequent checks
6654 */
6655 while (btf_is_mod(t))
6656 t = btf__type_by_id(btf, t->type);
6657 if (btf_is_typedef(t) &&
6658 (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) {
6659 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6660 if (strcmp(tname, "bpf_user_pt_regs_t") == 0)
6661 return false; /* canonical type for kprobe/perf_event */
6662 }
6663
6664 /* now we can ignore typedefs moving forward */
6665 t = skip_mods_and_typedefs(btf, t->type, NULL);
6666
6667 /* if it's `void *`, definitely fix up BTF info */
6668 if (btf_is_void(t))
6669 return true;
6670
6671 /* if it's already proper canonical type, no need to fix up */
6672 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6673 if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0)
6674 return false;
6675
6676 /* special cases */
6677 switch (prog->type) {
6678 case BPF_PROG_TYPE_KPROBE:
6679 /* `struct pt_regs *` is expected, but we need to fix up */
6680 if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6681 return true;
6682 break;
6683 case BPF_PROG_TYPE_PERF_EVENT:
6684 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6685 btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6686 return true;
6687 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6688 btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
6689 return true;
6690 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6691 btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
6692 return true;
6693 break;
6694 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6695 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6696 /* allow u64* as ctx */
6697 if (btf_is_int(t) && t->size == 8)
6698 return true;
6699 break;
6700 default:
6701 break;
6702 }
6703
6704out_warn:
6705 pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n",
6706 prog->name, subprog_name, arg_idx, ctx_name);
6707 return false;
6708}
6709
6710static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog)
6711{
6712 int fn_id, fn_proto_id, ret_type_id, orig_proto_id;
6713 int i, err, arg_cnt, fn_name_off, linkage;
6714 struct btf_type *fn_t, *fn_proto_t, *t;
6715 struct btf_param *p;
6716
6717 /* caller already validated FUNC -> FUNC_PROTO validity */
6718 fn_t = btf_type_by_id(btf, orig_fn_id);
6719 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6720
6721 /* Note that each btf__add_xxx() operation invalidates
6722 * all btf_type and string pointers, so we need to be
6723 * very careful when cloning BTF types. BTF type
6724 * pointers have to be always refetched. And to avoid
6725 * problems with invalidated string pointers, we
6726 * add empty strings initially, then just fix up
6727 * name_off offsets in place. Offsets are stable for
6728 * existing strings, so that works out.
6729 */
6730 fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */
6731 linkage = btf_func_linkage(fn_t);
6732 orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */
6733 ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */
6734 arg_cnt = btf_vlen(fn_proto_t);
6735
6736 /* clone FUNC_PROTO and its params */
6737 fn_proto_id = btf__add_func_proto(btf, ret_type_id);
6738 if (fn_proto_id < 0)
6739 return -EINVAL;
6740
6741 for (i = 0; i < arg_cnt; i++) {
6742 int name_off;
6743
6744 /* copy original parameter data */
6745 t = btf_type_by_id(btf, orig_proto_id);
6746 p = &btf_params(t)[i];
6747 name_off = p->name_off;
6748
6749 err = btf__add_func_param(btf, "", p->type);
6750 if (err)
6751 return err;
6752
6753 fn_proto_t = btf_type_by_id(btf, fn_proto_id);
6754 p = &btf_params(fn_proto_t)[i];
6755 p->name_off = name_off; /* use remembered str offset */
6756 }
6757
6758 /* clone FUNC now, btf__add_func() enforces non-empty name, so use
6759 * entry program's name as a placeholder, which we replace immediately
6760 * with original name_off
6761 */
6762 fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id);
6763 if (fn_id < 0)
6764 return -EINVAL;
6765
6766 fn_t = btf_type_by_id(btf, fn_id);
6767 fn_t->name_off = fn_name_off; /* reuse original string */
6768
6769 return fn_id;
6770}
6771
6772/* Check if main program or global subprog's function prototype has `arg:ctx`
6773 * argument tags, and, if necessary, substitute correct type to match what BPF
6774 * verifier would expect, taking into account specific program type. This
6775 * allows to support __arg_ctx tag transparently on old kernels that don't yet
6776 * have a native support for it in the verifier, making user's life much
6777 * easier.
6778 */
6779static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog)
6780{
6781 const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name;
6782 struct bpf_func_info_min *func_rec;
6783 struct btf_type *fn_t, *fn_proto_t;
6784 struct btf *btf = obj->btf;
6785 const struct btf_type *t;
6786 struct btf_param *p;
6787 int ptr_id = 0, struct_id, tag_id, orig_fn_id;
6788 int i, n, arg_idx, arg_cnt, err, rec_idx;
6789 int *orig_ids;
6790
6791 /* no .BTF.ext, no problem */
6792 if (!obj->btf_ext || !prog->func_info)
6793 return 0;
6794
6795 /* don't do any fix ups if kernel natively supports __arg_ctx */
6796 if (kernel_supports(obj, FEAT_ARG_CTX_TAG))
6797 return 0;
6798
6799 /* some BPF program types just don't have named context structs, so
6800 * this fallback mechanism doesn't work for them
6801 */
6802 for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) {
6803 if (global_ctx_map[i].prog_type != prog->type)
6804 continue;
6805 ctx_name = global_ctx_map[i].ctx_name;
6806 break;
6807 }
6808 if (!ctx_name)
6809 return 0;
6810
6811 /* remember original func BTF IDs to detect if we already cloned them */
6812 orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids));
6813 if (!orig_ids)
6814 return -ENOMEM;
6815 for (i = 0; i < prog->func_info_cnt; i++) {
6816 func_rec = prog->func_info + prog->func_info_rec_size * i;
6817 orig_ids[i] = func_rec->type_id;
6818 }
6819
6820 /* go through each DECL_TAG with "arg:ctx" and see if it points to one
6821 * of our subprogs; if yes and subprog is global and needs adjustment,
6822 * clone and adjust FUNC -> FUNC_PROTO combo
6823 */
6824 for (i = 1, n = btf__type_cnt(btf); i < n; i++) {
6825 /* only DECL_TAG with "arg:ctx" value are interesting */
6826 t = btf__type_by_id(btf, i);
6827 if (!btf_is_decl_tag(t))
6828 continue;
6829 if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0)
6830 continue;
6831
6832 /* only global funcs need adjustment, if at all */
6833 orig_fn_id = t->type;
6834 fn_t = btf_type_by_id(btf, orig_fn_id);
6835 if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL)
6836 continue;
6837
6838 /* sanity check FUNC -> FUNC_PROTO chain, just in case */
6839 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6840 if (!fn_proto_t || !btf_is_func_proto(fn_proto_t))
6841 continue;
6842
6843 /* find corresponding func_info record */
6844 func_rec = NULL;
6845 for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) {
6846 if (orig_ids[rec_idx] == t->type) {
6847 func_rec = prog->func_info + prog->func_info_rec_size * rec_idx;
6848 break;
6849 }
6850 }
6851 /* current main program doesn't call into this subprog */
6852 if (!func_rec)
6853 continue;
6854
6855 /* some more sanity checking of DECL_TAG */
6856 arg_cnt = btf_vlen(fn_proto_t);
6857 arg_idx = btf_decl_tag(t)->component_idx;
6858 if (arg_idx < 0 || arg_idx >= arg_cnt)
6859 continue;
6860
6861 /* check if we should fix up argument type */
6862 p = &btf_params(fn_proto_t)[arg_idx];
6863 fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>";
6864 if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name))
6865 continue;
6866
6867 /* clone fn/fn_proto, unless we already did it for another arg */
6868 if (func_rec->type_id == orig_fn_id) {
6869 int fn_id;
6870
6871 fn_id = clone_func_btf_info(btf, orig_fn_id, prog);
6872 if (fn_id < 0) {
6873 err = fn_id;
6874 goto err_out;
6875 }
6876
6877 /* point func_info record to a cloned FUNC type */
6878 func_rec->type_id = fn_id;
6879 }
6880
6881 /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument;
6882 * we do it just once per main BPF program, as all global
6883 * funcs share the same program type, so need only PTR ->
6884 * STRUCT type chain
6885 */
6886 if (ptr_id == 0) {
6887 struct_id = btf__add_struct(btf, ctx_name, 0);
6888 ptr_id = btf__add_ptr(btf, struct_id);
6889 if (ptr_id < 0 || struct_id < 0) {
6890 err = -EINVAL;
6891 goto err_out;
6892 }
6893 }
6894
6895 /* for completeness, clone DECL_TAG and point it to cloned param */
6896 tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx);
6897 if (tag_id < 0) {
6898 err = -EINVAL;
6899 goto err_out;
6900 }
6901
6902 /* all the BTF manipulations invalidated pointers, refetch them */
6903 fn_t = btf_type_by_id(btf, func_rec->type_id);
6904 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6905
6906 /* fix up type ID pointed to by param */
6907 p = &btf_params(fn_proto_t)[arg_idx];
6908 p->type = ptr_id;
6909 }
6910
6911 free(orig_ids);
6912 return 0;
6913err_out:
6914 free(orig_ids);
6915 return err;
6916}
6917
6918static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6919{
6920 struct bpf_program *prog;
6921 size_t i, j;
6922 int err;
6923
6924 if (obj->btf_ext) {
6925 err = bpf_object__relocate_core(obj, targ_btf_path);
6926 if (err) {
6927 pr_warn("failed to perform CO-RE relocations: %d\n",
6928 err);
6929 return err;
6930 }
6931 bpf_object__sort_relos(obj);
6932 }
6933
6934 /* Before relocating calls pre-process relocations and mark
6935 * few ld_imm64 instructions that points to subprogs.
6936 * Otherwise bpf_object__reloc_code() later would have to consider
6937 * all ld_imm64 insns as relocation candidates. That would
6938 * reduce relocation speed, since amount of find_prog_insn_relo()
6939 * would increase and most of them will fail to find a relo.
6940 */
6941 for (i = 0; i < obj->nr_programs; i++) {
6942 prog = &obj->programs[i];
6943 for (j = 0; j < prog->nr_reloc; j++) {
6944 struct reloc_desc *relo = &prog->reloc_desc[j];
6945 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6946
6947 /* mark the insn, so it's recognized by insn_is_pseudo_func() */
6948 if (relo->type == RELO_SUBPROG_ADDR)
6949 insn[0].src_reg = BPF_PSEUDO_FUNC;
6950 }
6951 }
6952
6953 /* relocate subprogram calls and append used subprograms to main
6954 * programs; each copy of subprogram code needs to be relocated
6955 * differently for each main program, because its code location might
6956 * have changed.
6957 * Append subprog relos to main programs to allow data relos to be
6958 * processed after text is completely relocated.
6959 */
6960 for (i = 0; i < obj->nr_programs; i++) {
6961 prog = &obj->programs[i];
6962 /* sub-program's sub-calls are relocated within the context of
6963 * its main program only
6964 */
6965 if (prog_is_subprog(obj, prog))
6966 continue;
6967 if (!prog->autoload)
6968 continue;
6969
6970 err = bpf_object__relocate_calls(obj, prog);
6971 if (err) {
6972 pr_warn("prog '%s': failed to relocate calls: %d\n",
6973 prog->name, err);
6974 return err;
6975 }
6976
6977 err = bpf_prog_assign_exc_cb(obj, prog);
6978 if (err)
6979 return err;
6980 /* Now, also append exception callback if it has not been done already. */
6981 if (prog->exception_cb_idx >= 0) {
6982 struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
6983
6984 /* Calling exception callback directly is disallowed, which the
6985 * verifier will reject later. In case it was processed already,
6986 * we can skip this step, otherwise for all other valid cases we
6987 * have to append exception callback now.
6988 */
6989 if (subprog->sub_insn_off == 0) {
6990 err = bpf_object__append_subprog_code(obj, prog, subprog);
6991 if (err)
6992 return err;
6993 err = bpf_object__reloc_code(obj, prog, subprog);
6994 if (err)
6995 return err;
6996 }
6997 }
6998 }
6999 for (i = 0; i < obj->nr_programs; i++) {
7000 prog = &obj->programs[i];
7001 if (prog_is_subprog(obj, prog))
7002 continue;
7003 if (!prog->autoload)
7004 continue;
7005
7006 /* Process data relos for main programs */
7007 err = bpf_object__relocate_data(obj, prog);
7008 if (err) {
7009 pr_warn("prog '%s': failed to relocate data references: %d\n",
7010 prog->name, err);
7011 return err;
7012 }
7013
7014 /* Fix up .BTF.ext information, if necessary */
7015 err = bpf_program_fixup_func_info(obj, prog);
7016 if (err) {
7017 pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n",
7018 prog->name, err);
7019 return err;
7020 }
7021 }
7022
7023 return 0;
7024}
7025
7026static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
7027 Elf64_Shdr *shdr, Elf_Data *data);
7028
7029static int bpf_object__collect_map_relos(struct bpf_object *obj,
7030 Elf64_Shdr *shdr, Elf_Data *data)
7031{
7032 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
7033 int i, j, nrels, new_sz;
7034 const struct btf_var_secinfo *vi = NULL;
7035 const struct btf_type *sec, *var, *def;
7036 struct bpf_map *map = NULL, *targ_map = NULL;
7037 struct bpf_program *targ_prog = NULL;
7038 bool is_prog_array, is_map_in_map;
7039 const struct btf_member *member;
7040 const char *name, *mname, *type;
7041 unsigned int moff;
7042 Elf64_Sym *sym;
7043 Elf64_Rel *rel;
7044 void *tmp;
7045
7046 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
7047 return -EINVAL;
7048 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
7049 if (!sec)
7050 return -EINVAL;
7051
7052 nrels = shdr->sh_size / shdr->sh_entsize;
7053 for (i = 0; i < nrels; i++) {
7054 rel = elf_rel_by_idx(data, i);
7055 if (!rel) {
7056 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
7057 return -LIBBPF_ERRNO__FORMAT;
7058 }
7059
7060 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
7061 if (!sym) {
7062 pr_warn(".maps relo #%d: symbol %zx not found\n",
7063 i, (size_t)ELF64_R_SYM(rel->r_info));
7064 return -LIBBPF_ERRNO__FORMAT;
7065 }
7066 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
7067
7068 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
7069 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
7070 (size_t)rel->r_offset, sym->st_name, name);
7071
7072 for (j = 0; j < obj->nr_maps; j++) {
7073 map = &obj->maps[j];
7074 if (map->sec_idx != obj->efile.btf_maps_shndx)
7075 continue;
7076
7077 vi = btf_var_secinfos(sec) + map->btf_var_idx;
7078 if (vi->offset <= rel->r_offset &&
7079 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
7080 break;
7081 }
7082 if (j == obj->nr_maps) {
7083 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
7084 i, name, (size_t)rel->r_offset);
7085 return -EINVAL;
7086 }
7087
7088 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
7089 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
7090 type = is_map_in_map ? "map" : "prog";
7091 if (is_map_in_map) {
7092 if (sym->st_shndx != obj->efile.btf_maps_shndx) {
7093 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
7094 i, name);
7095 return -LIBBPF_ERRNO__RELOC;
7096 }
7097 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
7098 map->def.key_size != sizeof(int)) {
7099 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
7100 i, map->name, sizeof(int));
7101 return -EINVAL;
7102 }
7103 targ_map = bpf_object__find_map_by_name(obj, name);
7104 if (!targ_map) {
7105 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
7106 i, name);
7107 return -ESRCH;
7108 }
7109 } else if (is_prog_array) {
7110 targ_prog = bpf_object__find_program_by_name(obj, name);
7111 if (!targ_prog) {
7112 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
7113 i, name);
7114 return -ESRCH;
7115 }
7116 if (targ_prog->sec_idx != sym->st_shndx ||
7117 targ_prog->sec_insn_off * 8 != sym->st_value ||
7118 prog_is_subprog(obj, targ_prog)) {
7119 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
7120 i, name);
7121 return -LIBBPF_ERRNO__RELOC;
7122 }
7123 } else {
7124 return -EINVAL;
7125 }
7126
7127 var = btf__type_by_id(obj->btf, vi->type);
7128 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7129 if (btf_vlen(def) == 0)
7130 return -EINVAL;
7131 member = btf_members(def) + btf_vlen(def) - 1;
7132 mname = btf__name_by_offset(obj->btf, member->name_off);
7133 if (strcmp(mname, "values"))
7134 return -EINVAL;
7135
7136 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
7137 if (rel->r_offset - vi->offset < moff)
7138 return -EINVAL;
7139
7140 moff = rel->r_offset - vi->offset - moff;
7141 /* here we use BPF pointer size, which is always 64 bit, as we
7142 * are parsing ELF that was built for BPF target
7143 */
7144 if (moff % bpf_ptr_sz)
7145 return -EINVAL;
7146 moff /= bpf_ptr_sz;
7147 if (moff >= map->init_slots_sz) {
7148 new_sz = moff + 1;
7149 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
7150 if (!tmp)
7151 return -ENOMEM;
7152 map->init_slots = tmp;
7153 memset(map->init_slots + map->init_slots_sz, 0,
7154 (new_sz - map->init_slots_sz) * host_ptr_sz);
7155 map->init_slots_sz = new_sz;
7156 }
7157 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
7158
7159 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
7160 i, map->name, moff, type, name);
7161 }
7162
7163 return 0;
7164}
7165
7166static int bpf_object__collect_relos(struct bpf_object *obj)
7167{
7168 int i, err;
7169
7170 for (i = 0; i < obj->efile.sec_cnt; i++) {
7171 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
7172 Elf64_Shdr *shdr;
7173 Elf_Data *data;
7174 int idx;
7175
7176 if (sec_desc->sec_type != SEC_RELO)
7177 continue;
7178
7179 shdr = sec_desc->shdr;
7180 data = sec_desc->data;
7181 idx = shdr->sh_info;
7182
7183 if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
7184 pr_warn("internal error at %d\n", __LINE__);
7185 return -LIBBPF_ERRNO__INTERNAL;
7186 }
7187
7188 if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
7189 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
7190 else if (idx == obj->efile.btf_maps_shndx)
7191 err = bpf_object__collect_map_relos(obj, shdr, data);
7192 else
7193 err = bpf_object__collect_prog_relos(obj, shdr, data);
7194 if (err)
7195 return err;
7196 }
7197
7198 bpf_object__sort_relos(obj);
7199 return 0;
7200}
7201
7202static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
7203{
7204 if (BPF_CLASS(insn->code) == BPF_JMP &&
7205 BPF_OP(insn->code) == BPF_CALL &&
7206 BPF_SRC(insn->code) == BPF_K &&
7207 insn->src_reg == 0 &&
7208 insn->dst_reg == 0) {
7209 *func_id = insn->imm;
7210 return true;
7211 }
7212 return false;
7213}
7214
7215static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
7216{
7217 struct bpf_insn *insn = prog->insns;
7218 enum bpf_func_id func_id;
7219 int i;
7220
7221 if (obj->gen_loader)
7222 return 0;
7223
7224 for (i = 0; i < prog->insns_cnt; i++, insn++) {
7225 if (!insn_is_helper_call(insn, &func_id))
7226 continue;
7227
7228 /* on kernels that don't yet support
7229 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
7230 * to bpf_probe_read() which works well for old kernels
7231 */
7232 switch (func_id) {
7233 case BPF_FUNC_probe_read_kernel:
7234 case BPF_FUNC_probe_read_user:
7235 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7236 insn->imm = BPF_FUNC_probe_read;
7237 break;
7238 case BPF_FUNC_probe_read_kernel_str:
7239 case BPF_FUNC_probe_read_user_str:
7240 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7241 insn->imm = BPF_FUNC_probe_read_str;
7242 break;
7243 default:
7244 break;
7245 }
7246 }
7247 return 0;
7248}
7249
7250static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
7251 int *btf_obj_fd, int *btf_type_id);
7252
7253/* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
7254static int libbpf_prepare_prog_load(struct bpf_program *prog,
7255 struct bpf_prog_load_opts *opts, long cookie)
7256{
7257 enum sec_def_flags def = cookie;
7258
7259 /* old kernels might not support specifying expected_attach_type */
7260 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
7261 opts->expected_attach_type = 0;
7262
7263 if (def & SEC_SLEEPABLE)
7264 opts->prog_flags |= BPF_F_SLEEPABLE;
7265
7266 if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
7267 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
7268
7269 /* special check for usdt to use uprobe_multi link */
7270 if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
7271 prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7272
7273 if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
7274 int btf_obj_fd = 0, btf_type_id = 0, err;
7275 const char *attach_name;
7276
7277 attach_name = strchr(prog->sec_name, '/');
7278 if (!attach_name) {
7279 /* if BPF program is annotated with just SEC("fentry")
7280 * (or similar) without declaratively specifying
7281 * target, then it is expected that target will be
7282 * specified with bpf_program__set_attach_target() at
7283 * runtime before BPF object load step. If not, then
7284 * there is nothing to load into the kernel as BPF
7285 * verifier won't be able to validate BPF program
7286 * correctness anyways.
7287 */
7288 pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
7289 prog->name);
7290 return -EINVAL;
7291 }
7292 attach_name++; /* skip over / */
7293
7294 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
7295 if (err)
7296 return err;
7297
7298 /* cache resolved BTF FD and BTF type ID in the prog */
7299 prog->attach_btf_obj_fd = btf_obj_fd;
7300 prog->attach_btf_id = btf_type_id;
7301
7302 /* but by now libbpf common logic is not utilizing
7303 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
7304 * this callback is called after opts were populated by
7305 * libbpf, so this callback has to update opts explicitly here
7306 */
7307 opts->attach_btf_obj_fd = btf_obj_fd;
7308 opts->attach_btf_id = btf_type_id;
7309 }
7310 return 0;
7311}
7312
7313static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
7314
7315static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
7316 struct bpf_insn *insns, int insns_cnt,
7317 const char *license, __u32 kern_version, int *prog_fd)
7318{
7319 LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
7320 const char *prog_name = NULL;
7321 char *cp, errmsg[STRERR_BUFSIZE];
7322 size_t log_buf_size = 0;
7323 char *log_buf = NULL, *tmp;
7324 bool own_log_buf = true;
7325 __u32 log_level = prog->log_level;
7326 int ret, err;
7327
7328 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
7329 /*
7330 * The program type must be set. Most likely we couldn't find a proper
7331 * section definition at load time, and thus we didn't infer the type.
7332 */
7333 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
7334 prog->name, prog->sec_name);
7335 return -EINVAL;
7336 }
7337
7338 if (!insns || !insns_cnt)
7339 return -EINVAL;
7340
7341 if (kernel_supports(obj, FEAT_PROG_NAME))
7342 prog_name = prog->name;
7343 load_attr.attach_prog_fd = prog->attach_prog_fd;
7344 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
7345 load_attr.attach_btf_id = prog->attach_btf_id;
7346 load_attr.kern_version = kern_version;
7347 load_attr.prog_ifindex = prog->prog_ifindex;
7348
7349 /* specify func_info/line_info only if kernel supports them */
7350 if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
7351 load_attr.prog_btf_fd = btf__fd(obj->btf);
7352 load_attr.func_info = prog->func_info;
7353 load_attr.func_info_rec_size = prog->func_info_rec_size;
7354 load_attr.func_info_cnt = prog->func_info_cnt;
7355 load_attr.line_info = prog->line_info;
7356 load_attr.line_info_rec_size = prog->line_info_rec_size;
7357 load_attr.line_info_cnt = prog->line_info_cnt;
7358 }
7359 load_attr.log_level = log_level;
7360 load_attr.prog_flags = prog->prog_flags;
7361 load_attr.fd_array = obj->fd_array;
7362
7363 load_attr.token_fd = obj->token_fd;
7364 if (obj->token_fd)
7365 load_attr.prog_flags |= BPF_F_TOKEN_FD;
7366
7367 /* adjust load_attr if sec_def provides custom preload callback */
7368 if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
7369 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
7370 if (err < 0) {
7371 pr_warn("prog '%s': failed to prepare load attributes: %d\n",
7372 prog->name, err);
7373 return err;
7374 }
7375 insns = prog->insns;
7376 insns_cnt = prog->insns_cnt;
7377 }
7378
7379 /* allow prog_prepare_load_fn to change expected_attach_type */
7380 load_attr.expected_attach_type = prog->expected_attach_type;
7381
7382 if (obj->gen_loader) {
7383 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
7384 license, insns, insns_cnt, &load_attr,
7385 prog - obj->programs);
7386 *prog_fd = -1;
7387 return 0;
7388 }
7389
7390retry_load:
7391 /* if log_level is zero, we don't request logs initially even if
7392 * custom log_buf is specified; if the program load fails, then we'll
7393 * bump log_level to 1 and use either custom log_buf or we'll allocate
7394 * our own and retry the load to get details on what failed
7395 */
7396 if (log_level) {
7397 if (prog->log_buf) {
7398 log_buf = prog->log_buf;
7399 log_buf_size = prog->log_size;
7400 own_log_buf = false;
7401 } else if (obj->log_buf) {
7402 log_buf = obj->log_buf;
7403 log_buf_size = obj->log_size;
7404 own_log_buf = false;
7405 } else {
7406 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
7407 tmp = realloc(log_buf, log_buf_size);
7408 if (!tmp) {
7409 ret = -ENOMEM;
7410 goto out;
7411 }
7412 log_buf = tmp;
7413 log_buf[0] = '\0';
7414 own_log_buf = true;
7415 }
7416 }
7417
7418 load_attr.log_buf = log_buf;
7419 load_attr.log_size = log_buf_size;
7420 load_attr.log_level = log_level;
7421
7422 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
7423 if (ret >= 0) {
7424 if (log_level && own_log_buf) {
7425 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7426 prog->name, log_buf);
7427 }
7428
7429 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
7430 struct bpf_map *map;
7431 int i;
7432
7433 for (i = 0; i < obj->nr_maps; i++) {
7434 map = &prog->obj->maps[i];
7435 if (map->libbpf_type != LIBBPF_MAP_RODATA)
7436 continue;
7437
7438 if (bpf_prog_bind_map(ret, map->fd, NULL)) {
7439 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7440 pr_warn("prog '%s': failed to bind map '%s': %s\n",
7441 prog->name, map->real_name, cp);
7442 /* Don't fail hard if can't bind rodata. */
7443 }
7444 }
7445 }
7446
7447 *prog_fd = ret;
7448 ret = 0;
7449 goto out;
7450 }
7451
7452 if (log_level == 0) {
7453 log_level = 1;
7454 goto retry_load;
7455 }
7456 /* On ENOSPC, increase log buffer size and retry, unless custom
7457 * log_buf is specified.
7458 * Be careful to not overflow u32, though. Kernel's log buf size limit
7459 * isn't part of UAPI so it can always be bumped to full 4GB. So don't
7460 * multiply by 2 unless we are sure we'll fit within 32 bits.
7461 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
7462 */
7463 if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
7464 goto retry_load;
7465
7466 ret = -errno;
7467
7468 /* post-process verifier log to improve error descriptions */
7469 fixup_verifier_log(prog, log_buf, log_buf_size);
7470
7471 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7472 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
7473 pr_perm_msg(ret);
7474
7475 if (own_log_buf && log_buf && log_buf[0] != '\0') {
7476 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7477 prog->name, log_buf);
7478 }
7479
7480out:
7481 if (own_log_buf)
7482 free(log_buf);
7483 return ret;
7484}
7485
7486static char *find_prev_line(char *buf, char *cur)
7487{
7488 char *p;
7489
7490 if (cur == buf) /* end of a log buf */
7491 return NULL;
7492
7493 p = cur - 1;
7494 while (p - 1 >= buf && *(p - 1) != '\n')
7495 p--;
7496
7497 return p;
7498}
7499
7500static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
7501 char *orig, size_t orig_sz, const char *patch)
7502{
7503 /* size of the remaining log content to the right from the to-be-replaced part */
7504 size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
7505 size_t patch_sz = strlen(patch);
7506
7507 if (patch_sz != orig_sz) {
7508 /* If patch line(s) are longer than original piece of verifier log,
7509 * shift log contents by (patch_sz - orig_sz) bytes to the right
7510 * starting from after to-be-replaced part of the log.
7511 *
7512 * If patch line(s) are shorter than original piece of verifier log,
7513 * shift log contents by (orig_sz - patch_sz) bytes to the left
7514 * starting from after to-be-replaced part of the log
7515 *
7516 * We need to be careful about not overflowing available
7517 * buf_sz capacity. If that's the case, we'll truncate the end
7518 * of the original log, as necessary.
7519 */
7520 if (patch_sz > orig_sz) {
7521 if (orig + patch_sz >= buf + buf_sz) {
7522 /* patch is big enough to cover remaining space completely */
7523 patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
7524 rem_sz = 0;
7525 } else if (patch_sz - orig_sz > buf_sz - log_sz) {
7526 /* patch causes part of remaining log to be truncated */
7527 rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
7528 }
7529 }
7530 /* shift remaining log to the right by calculated amount */
7531 memmove(orig + patch_sz, orig + orig_sz, rem_sz);
7532 }
7533
7534 memcpy(orig, patch, patch_sz);
7535}
7536
7537static void fixup_log_failed_core_relo(struct bpf_program *prog,
7538 char *buf, size_t buf_sz, size_t log_sz,
7539 char *line1, char *line2, char *line3)
7540{
7541 /* Expected log for failed and not properly guarded CO-RE relocation:
7542 * line1 -> 123: (85) call unknown#195896080
7543 * line2 -> invalid func unknown#195896080
7544 * line3 -> <anything else or end of buffer>
7545 *
7546 * "123" is the index of the instruction that was poisoned. We extract
7547 * instruction index to find corresponding CO-RE relocation and
7548 * replace this part of the log with more relevant information about
7549 * failed CO-RE relocation.
7550 */
7551 const struct bpf_core_relo *relo;
7552 struct bpf_core_spec spec;
7553 char patch[512], spec_buf[256];
7554 int insn_idx, err, spec_len;
7555
7556 if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
7557 return;
7558
7559 relo = find_relo_core(prog, insn_idx);
7560 if (!relo)
7561 return;
7562
7563 err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
7564 if (err)
7565 return;
7566
7567 spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
7568 snprintf(patch, sizeof(patch),
7569 "%d: <invalid CO-RE relocation>\n"
7570 "failed to resolve CO-RE relocation %s%s\n",
7571 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
7572
7573 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7574}
7575
7576static void fixup_log_missing_map_load(struct bpf_program *prog,
7577 char *buf, size_t buf_sz, size_t log_sz,
7578 char *line1, char *line2, char *line3)
7579{
7580 /* Expected log for failed and not properly guarded map reference:
7581 * line1 -> 123: (85) call unknown#2001000345
7582 * line2 -> invalid func unknown#2001000345
7583 * line3 -> <anything else or end of buffer>
7584 *
7585 * "123" is the index of the instruction that was poisoned.
7586 * "345" in "2001000345" is a map index in obj->maps to fetch map name.
7587 */
7588 struct bpf_object *obj = prog->obj;
7589 const struct bpf_map *map;
7590 int insn_idx, map_idx;
7591 char patch[128];
7592
7593 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
7594 return;
7595
7596 map_idx -= POISON_LDIMM64_MAP_BASE;
7597 if (map_idx < 0 || map_idx >= obj->nr_maps)
7598 return;
7599 map = &obj->maps[map_idx];
7600
7601 snprintf(patch, sizeof(patch),
7602 "%d: <invalid BPF map reference>\n"
7603 "BPF map '%s' is referenced but wasn't created\n",
7604 insn_idx, map->name);
7605
7606 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7607}
7608
7609static void fixup_log_missing_kfunc_call(struct bpf_program *prog,
7610 char *buf, size_t buf_sz, size_t log_sz,
7611 char *line1, char *line2, char *line3)
7612{
7613 /* Expected log for failed and not properly guarded kfunc call:
7614 * line1 -> 123: (85) call unknown#2002000345
7615 * line2 -> invalid func unknown#2002000345
7616 * line3 -> <anything else or end of buffer>
7617 *
7618 * "123" is the index of the instruction that was poisoned.
7619 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
7620 */
7621 struct bpf_object *obj = prog->obj;
7622 const struct extern_desc *ext;
7623 int insn_idx, ext_idx;
7624 char patch[128];
7625
7626 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2)
7627 return;
7628
7629 ext_idx -= POISON_CALL_KFUNC_BASE;
7630 if (ext_idx < 0 || ext_idx >= obj->nr_extern)
7631 return;
7632 ext = &obj->externs[ext_idx];
7633
7634 snprintf(patch, sizeof(patch),
7635 "%d: <invalid kfunc call>\n"
7636 "kfunc '%s' is referenced but wasn't resolved\n",
7637 insn_idx, ext->name);
7638
7639 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7640}
7641
7642static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
7643{
7644 /* look for familiar error patterns in last N lines of the log */
7645 const size_t max_last_line_cnt = 10;
7646 char *prev_line, *cur_line, *next_line;
7647 size_t log_sz;
7648 int i;
7649
7650 if (!buf)
7651 return;
7652
7653 log_sz = strlen(buf) + 1;
7654 next_line = buf + log_sz - 1;
7655
7656 for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
7657 cur_line = find_prev_line(buf, next_line);
7658 if (!cur_line)
7659 return;
7660
7661 if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
7662 prev_line = find_prev_line(buf, cur_line);
7663 if (!prev_line)
7664 continue;
7665
7666 /* failed CO-RE relocation case */
7667 fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
7668 prev_line, cur_line, next_line);
7669 return;
7670 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) {
7671 prev_line = find_prev_line(buf, cur_line);
7672 if (!prev_line)
7673 continue;
7674
7675 /* reference to uncreated BPF map */
7676 fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
7677 prev_line, cur_line, next_line);
7678 return;
7679 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) {
7680 prev_line = find_prev_line(buf, cur_line);
7681 if (!prev_line)
7682 continue;
7683
7684 /* reference to unresolved kfunc */
7685 fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz,
7686 prev_line, cur_line, next_line);
7687 return;
7688 }
7689 }
7690}
7691
7692static int bpf_program_record_relos(struct bpf_program *prog)
7693{
7694 struct bpf_object *obj = prog->obj;
7695 int i;
7696
7697 for (i = 0; i < prog->nr_reloc; i++) {
7698 struct reloc_desc *relo = &prog->reloc_desc[i];
7699 struct extern_desc *ext = &obj->externs[relo->ext_idx];
7700 int kind;
7701
7702 switch (relo->type) {
7703 case RELO_EXTERN_LD64:
7704 if (ext->type != EXT_KSYM)
7705 continue;
7706 kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
7707 BTF_KIND_VAR : BTF_KIND_FUNC;
7708 bpf_gen__record_extern(obj->gen_loader, ext->name,
7709 ext->is_weak, !ext->ksym.type_id,
7710 true, kind, relo->insn_idx);
7711 break;
7712 case RELO_EXTERN_CALL:
7713 bpf_gen__record_extern(obj->gen_loader, ext->name,
7714 ext->is_weak, false, false, BTF_KIND_FUNC,
7715 relo->insn_idx);
7716 break;
7717 case RELO_CORE: {
7718 struct bpf_core_relo cr = {
7719 .insn_off = relo->insn_idx * 8,
7720 .type_id = relo->core_relo->type_id,
7721 .access_str_off = relo->core_relo->access_str_off,
7722 .kind = relo->core_relo->kind,
7723 };
7724
7725 bpf_gen__record_relo_core(obj->gen_loader, &cr);
7726 break;
7727 }
7728 default:
7729 continue;
7730 }
7731 }
7732 return 0;
7733}
7734
7735static int
7736bpf_object__load_progs(struct bpf_object *obj, int log_level)
7737{
7738 struct bpf_program *prog;
7739 size_t i;
7740 int err;
7741
7742 for (i = 0; i < obj->nr_programs; i++) {
7743 prog = &obj->programs[i];
7744 err = bpf_object__sanitize_prog(obj, prog);
7745 if (err)
7746 return err;
7747 }
7748
7749 for (i = 0; i < obj->nr_programs; i++) {
7750 prog = &obj->programs[i];
7751 if (prog_is_subprog(obj, prog))
7752 continue;
7753 if (!prog->autoload) {
7754 pr_debug("prog '%s': skipped loading\n", prog->name);
7755 continue;
7756 }
7757 prog->log_level |= log_level;
7758
7759 if (obj->gen_loader)
7760 bpf_program_record_relos(prog);
7761
7762 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
7763 obj->license, obj->kern_version, &prog->fd);
7764 if (err) {
7765 pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
7766 return err;
7767 }
7768 }
7769
7770 bpf_object__free_relocs(obj);
7771 return 0;
7772}
7773
7774static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7775
7776static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
7777{
7778 struct bpf_program *prog;
7779 int err;
7780
7781 bpf_object__for_each_program(prog, obj) {
7782 prog->sec_def = find_sec_def(prog->sec_name);
7783 if (!prog->sec_def) {
7784 /* couldn't guess, but user might manually specify */
7785 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7786 prog->name, prog->sec_name);
7787 continue;
7788 }
7789
7790 prog->type = prog->sec_def->prog_type;
7791 prog->expected_attach_type = prog->sec_def->expected_attach_type;
7792
7793 /* sec_def can have custom callback which should be called
7794 * after bpf_program is initialized to adjust its properties
7795 */
7796 if (prog->sec_def->prog_setup_fn) {
7797 err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
7798 if (err < 0) {
7799 pr_warn("prog '%s': failed to initialize: %d\n",
7800 prog->name, err);
7801 return err;
7802 }
7803 }
7804 }
7805
7806 return 0;
7807}
7808
7809static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7810 const struct bpf_object_open_opts *opts)
7811{
7812 const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
7813 struct bpf_object *obj;
7814 char tmp_name[64];
7815 int err;
7816 char *log_buf;
7817 size_t log_size;
7818 __u32 log_level;
7819
7820 if (elf_version(EV_CURRENT) == EV_NONE) {
7821 pr_warn("failed to init libelf for %s\n",
7822 path ? : "(mem buf)");
7823 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7824 }
7825
7826 if (!OPTS_VALID(opts, bpf_object_open_opts))
7827 return ERR_PTR(-EINVAL);
7828
7829 obj_name = OPTS_GET(opts, object_name, NULL);
7830 if (obj_buf) {
7831 if (!obj_name) {
7832 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7833 (unsigned long)obj_buf,
7834 (unsigned long)obj_buf_sz);
7835 obj_name = tmp_name;
7836 }
7837 path = obj_name;
7838 pr_debug("loading object '%s' from buffer\n", obj_name);
7839 }
7840
7841 log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
7842 log_size = OPTS_GET(opts, kernel_log_size, 0);
7843 log_level = OPTS_GET(opts, kernel_log_level, 0);
7844 if (log_size > UINT_MAX)
7845 return ERR_PTR(-EINVAL);
7846 if (log_size && !log_buf)
7847 return ERR_PTR(-EINVAL);
7848
7849 token_path = OPTS_GET(opts, bpf_token_path, NULL);
7850 /* if user didn't specify bpf_token_path explicitly, check if
7851 * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path
7852 * option
7853 */
7854 if (!token_path)
7855 token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
7856 if (token_path && strlen(token_path) >= PATH_MAX)
7857 return ERR_PTR(-ENAMETOOLONG);
7858
7859 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7860 if (IS_ERR(obj))
7861 return obj;
7862
7863 obj->log_buf = log_buf;
7864 obj->log_size = log_size;
7865 obj->log_level = log_level;
7866
7867 if (token_path) {
7868 obj->token_path = strdup(token_path);
7869 if (!obj->token_path) {
7870 err = -ENOMEM;
7871 goto out;
7872 }
7873 }
7874
7875 btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7876 if (btf_tmp_path) {
7877 if (strlen(btf_tmp_path) >= PATH_MAX) {
7878 err = -ENAMETOOLONG;
7879 goto out;
7880 }
7881 obj->btf_custom_path = strdup(btf_tmp_path);
7882 if (!obj->btf_custom_path) {
7883 err = -ENOMEM;
7884 goto out;
7885 }
7886 }
7887
7888 kconfig = OPTS_GET(opts, kconfig, NULL);
7889 if (kconfig) {
7890 obj->kconfig = strdup(kconfig);
7891 if (!obj->kconfig) {
7892 err = -ENOMEM;
7893 goto out;
7894 }
7895 }
7896
7897 err = bpf_object__elf_init(obj);
7898 err = err ? : bpf_object__check_endianness(obj);
7899 err = err ? : bpf_object__elf_collect(obj);
7900 err = err ? : bpf_object__collect_externs(obj);
7901 err = err ? : bpf_object_fixup_btf(obj);
7902 err = err ? : bpf_object__init_maps(obj, opts);
7903 err = err ? : bpf_object_init_progs(obj, opts);
7904 err = err ? : bpf_object__collect_relos(obj);
7905 if (err)
7906 goto out;
7907
7908 bpf_object__elf_finish(obj);
7909
7910 return obj;
7911out:
7912 bpf_object__close(obj);
7913 return ERR_PTR(err);
7914}
7915
7916struct bpf_object *
7917bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7918{
7919 if (!path)
7920 return libbpf_err_ptr(-EINVAL);
7921
7922 pr_debug("loading %s\n", path);
7923
7924 return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
7925}
7926
7927struct bpf_object *bpf_object__open(const char *path)
7928{
7929 return bpf_object__open_file(path, NULL);
7930}
7931
7932struct bpf_object *
7933bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7934 const struct bpf_object_open_opts *opts)
7935{
7936 if (!obj_buf || obj_buf_sz == 0)
7937 return libbpf_err_ptr(-EINVAL);
7938
7939 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
7940}
7941
7942static int bpf_object_unload(struct bpf_object *obj)
7943{
7944 size_t i;
7945
7946 if (!obj)
7947 return libbpf_err(-EINVAL);
7948
7949 for (i = 0; i < obj->nr_maps; i++) {
7950 zclose(obj->maps[i].fd);
7951 if (obj->maps[i].st_ops)
7952 zfree(&obj->maps[i].st_ops->kern_vdata);
7953 }
7954
7955 for (i = 0; i < obj->nr_programs; i++)
7956 bpf_program__unload(&obj->programs[i]);
7957
7958 return 0;
7959}
7960
7961static int bpf_object__sanitize_maps(struct bpf_object *obj)
7962{
7963 struct bpf_map *m;
7964
7965 bpf_object__for_each_map(m, obj) {
7966 if (!bpf_map__is_internal(m))
7967 continue;
7968 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
7969 m->def.map_flags &= ~BPF_F_MMAPABLE;
7970 }
7971
7972 return 0;
7973}
7974
7975int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
7976{
7977 char sym_type, sym_name[500];
7978 unsigned long long sym_addr;
7979 int ret, err = 0;
7980 FILE *f;
7981
7982 f = fopen("/proc/kallsyms", "re");
7983 if (!f) {
7984 err = -errno;
7985 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7986 return err;
7987 }
7988
7989 while (true) {
7990 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7991 &sym_addr, &sym_type, sym_name);
7992 if (ret == EOF && feof(f))
7993 break;
7994 if (ret != 3) {
7995 pr_warn("failed to read kallsyms entry: %d\n", ret);
7996 err = -EINVAL;
7997 break;
7998 }
7999
8000 err = cb(sym_addr, sym_type, sym_name, ctx);
8001 if (err)
8002 break;
8003 }
8004
8005 fclose(f);
8006 return err;
8007}
8008
8009static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
8010 const char *sym_name, void *ctx)
8011{
8012 struct bpf_object *obj = ctx;
8013 const struct btf_type *t;
8014 struct extern_desc *ext;
8015
8016 ext = find_extern_by_name(obj, sym_name);
8017 if (!ext || ext->type != EXT_KSYM)
8018 return 0;
8019
8020 t = btf__type_by_id(obj->btf, ext->btf_id);
8021 if (!btf_is_var(t))
8022 return 0;
8023
8024 if (ext->is_set && ext->ksym.addr != sym_addr) {
8025 pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
8026 sym_name, ext->ksym.addr, sym_addr);
8027 return -EINVAL;
8028 }
8029 if (!ext->is_set) {
8030 ext->is_set = true;
8031 ext->ksym.addr = sym_addr;
8032 pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
8033 }
8034 return 0;
8035}
8036
8037static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
8038{
8039 return libbpf_kallsyms_parse(kallsyms_cb, obj);
8040}
8041
8042static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
8043 __u16 kind, struct btf **res_btf,
8044 struct module_btf **res_mod_btf)
8045{
8046 struct module_btf *mod_btf;
8047 struct btf *btf;
8048 int i, id, err;
8049
8050 btf = obj->btf_vmlinux;
8051 mod_btf = NULL;
8052 id = btf__find_by_name_kind(btf, ksym_name, kind);
8053
8054 if (id == -ENOENT) {
8055 err = load_module_btfs(obj);
8056 if (err)
8057 return err;
8058
8059 for (i = 0; i < obj->btf_module_cnt; i++) {
8060 /* we assume module_btf's BTF FD is always >0 */
8061 mod_btf = &obj->btf_modules[i];
8062 btf = mod_btf->btf;
8063 id = btf__find_by_name_kind_own(btf, ksym_name, kind);
8064 if (id != -ENOENT)
8065 break;
8066 }
8067 }
8068 if (id <= 0)
8069 return -ESRCH;
8070
8071 *res_btf = btf;
8072 *res_mod_btf = mod_btf;
8073 return id;
8074}
8075
8076static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
8077 struct extern_desc *ext)
8078{
8079 const struct btf_type *targ_var, *targ_type;
8080 __u32 targ_type_id, local_type_id;
8081 struct module_btf *mod_btf = NULL;
8082 const char *targ_var_name;
8083 struct btf *btf = NULL;
8084 int id, err;
8085
8086 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
8087 if (id < 0) {
8088 if (id == -ESRCH && ext->is_weak)
8089 return 0;
8090 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
8091 ext->name);
8092 return id;
8093 }
8094
8095 /* find local type_id */
8096 local_type_id = ext->ksym.type_id;
8097
8098 /* find target type_id */
8099 targ_var = btf__type_by_id(btf, id);
8100 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
8101 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
8102
8103 err = bpf_core_types_are_compat(obj->btf, local_type_id,
8104 btf, targ_type_id);
8105 if (err <= 0) {
8106 const struct btf_type *local_type;
8107 const char *targ_name, *local_name;
8108
8109 local_type = btf__type_by_id(obj->btf, local_type_id);
8110 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
8111 targ_name = btf__name_by_offset(btf, targ_type->name_off);
8112
8113 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
8114 ext->name, local_type_id,
8115 btf_kind_str(local_type), local_name, targ_type_id,
8116 btf_kind_str(targ_type), targ_name);
8117 return -EINVAL;
8118 }
8119
8120 ext->is_set = true;
8121 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8122 ext->ksym.kernel_btf_id = id;
8123 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
8124 ext->name, id, btf_kind_str(targ_var), targ_var_name);
8125
8126 return 0;
8127}
8128
8129static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
8130 struct extern_desc *ext)
8131{
8132 int local_func_proto_id, kfunc_proto_id, kfunc_id;
8133 struct module_btf *mod_btf = NULL;
8134 const struct btf_type *kern_func;
8135 struct btf *kern_btf = NULL;
8136 int ret;
8137
8138 local_func_proto_id = ext->ksym.type_id;
8139
8140 kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
8141 &mod_btf);
8142 if (kfunc_id < 0) {
8143 if (kfunc_id == -ESRCH && ext->is_weak)
8144 return 0;
8145 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
8146 ext->name);
8147 return kfunc_id;
8148 }
8149
8150 kern_func = btf__type_by_id(kern_btf, kfunc_id);
8151 kfunc_proto_id = kern_func->type;
8152
8153 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
8154 kern_btf, kfunc_proto_id);
8155 if (ret <= 0) {
8156 if (ext->is_weak)
8157 return 0;
8158
8159 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n",
8160 ext->name, local_func_proto_id,
8161 mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
8162 return -EINVAL;
8163 }
8164
8165 /* set index for module BTF fd in fd_array, if unset */
8166 if (mod_btf && !mod_btf->fd_array_idx) {
8167 /* insn->off is s16 */
8168 if (obj->fd_array_cnt == INT16_MAX) {
8169 pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
8170 ext->name, mod_btf->fd_array_idx);
8171 return -E2BIG;
8172 }
8173 /* Cannot use index 0 for module BTF fd */
8174 if (!obj->fd_array_cnt)
8175 obj->fd_array_cnt = 1;
8176
8177 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
8178 obj->fd_array_cnt + 1);
8179 if (ret)
8180 return ret;
8181 mod_btf->fd_array_idx = obj->fd_array_cnt;
8182 /* we assume module BTF FD is always >0 */
8183 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
8184 }
8185
8186 ext->is_set = true;
8187 ext->ksym.kernel_btf_id = kfunc_id;
8188 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
8189 /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
8190 * populates FD into ld_imm64 insn when it's used to point to kfunc.
8191 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
8192 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
8193 */
8194 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8195 pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n",
8196 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id);
8197
8198 return 0;
8199}
8200
8201static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
8202{
8203 const struct btf_type *t;
8204 struct extern_desc *ext;
8205 int i, err;
8206
8207 for (i = 0; i < obj->nr_extern; i++) {
8208 ext = &obj->externs[i];
8209 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
8210 continue;
8211
8212 if (obj->gen_loader) {
8213 ext->is_set = true;
8214 ext->ksym.kernel_btf_obj_fd = 0;
8215 ext->ksym.kernel_btf_id = 0;
8216 continue;
8217 }
8218 t = btf__type_by_id(obj->btf, ext->btf_id);
8219 if (btf_is_var(t))
8220 err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
8221 else
8222 err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
8223 if (err)
8224 return err;
8225 }
8226 return 0;
8227}
8228
8229static int bpf_object__resolve_externs(struct bpf_object *obj,
8230 const char *extra_kconfig)
8231{
8232 bool need_config = false, need_kallsyms = false;
8233 bool need_vmlinux_btf = false;
8234 struct extern_desc *ext;
8235 void *kcfg_data = NULL;
8236 int err, i;
8237
8238 if (obj->nr_extern == 0)
8239 return 0;
8240
8241 if (obj->kconfig_map_idx >= 0)
8242 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
8243
8244 for (i = 0; i < obj->nr_extern; i++) {
8245 ext = &obj->externs[i];
8246
8247 if (ext->type == EXT_KSYM) {
8248 if (ext->ksym.type_id)
8249 need_vmlinux_btf = true;
8250 else
8251 need_kallsyms = true;
8252 continue;
8253 } else if (ext->type == EXT_KCFG) {
8254 void *ext_ptr = kcfg_data + ext->kcfg.data_off;
8255 __u64 value = 0;
8256
8257 /* Kconfig externs need actual /proc/config.gz */
8258 if (str_has_pfx(ext->name, "CONFIG_")) {
8259 need_config = true;
8260 continue;
8261 }
8262
8263 /* Virtual kcfg externs are customly handled by libbpf */
8264 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
8265 value = get_kernel_version();
8266 if (!value) {
8267 pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
8268 return -EINVAL;
8269 }
8270 } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
8271 value = kernel_supports(obj, FEAT_BPF_COOKIE);
8272 } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
8273 value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
8274 } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
8275 /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
8276 * __kconfig externs, where LINUX_ ones are virtual and filled out
8277 * customly by libbpf (their values don't come from Kconfig).
8278 * If LINUX_xxx variable is not recognized by libbpf, but is marked
8279 * __weak, it defaults to zero value, just like for CONFIG_xxx
8280 * externs.
8281 */
8282 pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
8283 return -EINVAL;
8284 }
8285
8286 err = set_kcfg_value_num(ext, ext_ptr, value);
8287 if (err)
8288 return err;
8289 pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
8290 ext->name, (long long)value);
8291 } else {
8292 pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
8293 return -EINVAL;
8294 }
8295 }
8296 if (need_config && extra_kconfig) {
8297 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
8298 if (err)
8299 return -EINVAL;
8300 need_config = false;
8301 for (i = 0; i < obj->nr_extern; i++) {
8302 ext = &obj->externs[i];
8303 if (ext->type == EXT_KCFG && !ext->is_set) {
8304 need_config = true;
8305 break;
8306 }
8307 }
8308 }
8309 if (need_config) {
8310 err = bpf_object__read_kconfig_file(obj, kcfg_data);
8311 if (err)
8312 return -EINVAL;
8313 }
8314 if (need_kallsyms) {
8315 err = bpf_object__read_kallsyms_file(obj);
8316 if (err)
8317 return -EINVAL;
8318 }
8319 if (need_vmlinux_btf) {
8320 err = bpf_object__resolve_ksyms_btf_id(obj);
8321 if (err)
8322 return -EINVAL;
8323 }
8324 for (i = 0; i < obj->nr_extern; i++) {
8325 ext = &obj->externs[i];
8326
8327 if (!ext->is_set && !ext->is_weak) {
8328 pr_warn("extern '%s' (strong): not resolved\n", ext->name);
8329 return -ESRCH;
8330 } else if (!ext->is_set) {
8331 pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
8332 ext->name);
8333 }
8334 }
8335
8336 return 0;
8337}
8338
8339static void bpf_map_prepare_vdata(const struct bpf_map *map)
8340{
8341 struct bpf_struct_ops *st_ops;
8342 __u32 i;
8343
8344 st_ops = map->st_ops;
8345 for (i = 0; i < btf_vlen(st_ops->type); i++) {
8346 struct bpf_program *prog = st_ops->progs[i];
8347 void *kern_data;
8348 int prog_fd;
8349
8350 if (!prog)
8351 continue;
8352
8353 prog_fd = bpf_program__fd(prog);
8354 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
8355 *(unsigned long *)kern_data = prog_fd;
8356 }
8357}
8358
8359static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8360{
8361 struct bpf_map *map;
8362 int i;
8363
8364 for (i = 0; i < obj->nr_maps; i++) {
8365 map = &obj->maps[i];
8366
8367 if (!bpf_map__is_struct_ops(map))
8368 continue;
8369
8370 if (!map->autocreate)
8371 continue;
8372
8373 bpf_map_prepare_vdata(map);
8374 }
8375
8376 return 0;
8377}
8378
8379static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
8380{
8381 int err, i;
8382
8383 if (!obj)
8384 return libbpf_err(-EINVAL);
8385
8386 if (obj->loaded) {
8387 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
8388 return libbpf_err(-EINVAL);
8389 }
8390
8391 if (obj->gen_loader)
8392 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
8393
8394 err = bpf_object_prepare_token(obj);
8395 err = err ? : bpf_object__probe_loading(obj);
8396 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8397 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8398 err = err ? : bpf_object__sanitize_maps(obj);
8399 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
8400 err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
8401 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
8402 err = err ? : bpf_object__sanitize_and_load_btf(obj);
8403 err = err ? : bpf_object__create_maps(obj);
8404 err = err ? : bpf_object__load_progs(obj, extra_log_level);
8405 err = err ? : bpf_object_init_prog_arrays(obj);
8406 err = err ? : bpf_object_prepare_struct_ops(obj);
8407
8408 if (obj->gen_loader) {
8409 /* reset FDs */
8410 if (obj->btf)
8411 btf__set_fd(obj->btf, -1);
8412 if (!err)
8413 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
8414 }
8415
8416 /* clean up fd_array */
8417 zfree(&obj->fd_array);
8418
8419 /* clean up module BTFs */
8420 for (i = 0; i < obj->btf_module_cnt; i++) {
8421 close(obj->btf_modules[i].fd);
8422 btf__free(obj->btf_modules[i].btf);
8423 free(obj->btf_modules[i].name);
8424 }
8425 free(obj->btf_modules);
8426
8427 /* clean up vmlinux BTF */
8428 btf__free(obj->btf_vmlinux);
8429 obj->btf_vmlinux = NULL;
8430
8431 obj->loaded = true; /* doesn't matter if successfully or not */
8432
8433 if (err)
8434 goto out;
8435
8436 return 0;
8437out:
8438 /* unpin any maps that were auto-pinned during load */
8439 for (i = 0; i < obj->nr_maps; i++)
8440 if (obj->maps[i].pinned && !obj->maps[i].reused)
8441 bpf_map__unpin(&obj->maps[i], NULL);
8442
8443 bpf_object_unload(obj);
8444 pr_warn("failed to load object '%s'\n", obj->path);
8445 return libbpf_err(err);
8446}
8447
8448int bpf_object__load(struct bpf_object *obj)
8449{
8450 return bpf_object_load(obj, 0, NULL);
8451}
8452
8453static int make_parent_dir(const char *path)
8454{
8455 char *cp, errmsg[STRERR_BUFSIZE];
8456 char *dname, *dir;
8457 int err = 0;
8458
8459 dname = strdup(path);
8460 if (dname == NULL)
8461 return -ENOMEM;
8462
8463 dir = dirname(dname);
8464 if (mkdir(dir, 0700) && errno != EEXIST)
8465 err = -errno;
8466
8467 free(dname);
8468 if (err) {
8469 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8470 pr_warn("failed to mkdir %s: %s\n", path, cp);
8471 }
8472 return err;
8473}
8474
8475static int check_path(const char *path)
8476{
8477 char *cp, errmsg[STRERR_BUFSIZE];
8478 struct statfs st_fs;
8479 char *dname, *dir;
8480 int err = 0;
8481
8482 if (path == NULL)
8483 return -EINVAL;
8484
8485 dname = strdup(path);
8486 if (dname == NULL)
8487 return -ENOMEM;
8488
8489 dir = dirname(dname);
8490 if (statfs(dir, &st_fs)) {
8491 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
8492 pr_warn("failed to statfs %s: %s\n", dir, cp);
8493 err = -errno;
8494 }
8495 free(dname);
8496
8497 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
8498 pr_warn("specified path %s is not on BPF FS\n", path);
8499 err = -EINVAL;
8500 }
8501
8502 return err;
8503}
8504
8505int bpf_program__pin(struct bpf_program *prog, const char *path)
8506{
8507 char *cp, errmsg[STRERR_BUFSIZE];
8508 int err;
8509
8510 if (prog->fd < 0) {
8511 pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
8512 return libbpf_err(-EINVAL);
8513 }
8514
8515 err = make_parent_dir(path);
8516 if (err)
8517 return libbpf_err(err);
8518
8519 err = check_path(path);
8520 if (err)
8521 return libbpf_err(err);
8522
8523 if (bpf_obj_pin(prog->fd, path)) {
8524 err = -errno;
8525 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
8526 pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
8527 return libbpf_err(err);
8528 }
8529
8530 pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
8531 return 0;
8532}
8533
8534int bpf_program__unpin(struct bpf_program *prog, const char *path)
8535{
8536 int err;
8537
8538 if (prog->fd < 0) {
8539 pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
8540 return libbpf_err(-EINVAL);
8541 }
8542
8543 err = check_path(path);
8544 if (err)
8545 return libbpf_err(err);
8546
8547 err = unlink(path);
8548 if (err)
8549 return libbpf_err(-errno);
8550
8551 pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
8552 return 0;
8553}
8554
8555int bpf_map__pin(struct bpf_map *map, const char *path)
8556{
8557 char *cp, errmsg[STRERR_BUFSIZE];
8558 int err;
8559
8560 if (map == NULL) {
8561 pr_warn("invalid map pointer\n");
8562 return libbpf_err(-EINVAL);
8563 }
8564
8565 if (map->pin_path) {
8566 if (path && strcmp(path, map->pin_path)) {
8567 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8568 bpf_map__name(map), map->pin_path, path);
8569 return libbpf_err(-EINVAL);
8570 } else if (map->pinned) {
8571 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8572 bpf_map__name(map), map->pin_path);
8573 return 0;
8574 }
8575 } else {
8576 if (!path) {
8577 pr_warn("missing a path to pin map '%s' at\n",
8578 bpf_map__name(map));
8579 return libbpf_err(-EINVAL);
8580 } else if (map->pinned) {
8581 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
8582 return libbpf_err(-EEXIST);
8583 }
8584
8585 map->pin_path = strdup(path);
8586 if (!map->pin_path) {
8587 err = -errno;
8588 goto out_err;
8589 }
8590 }
8591
8592 err = make_parent_dir(map->pin_path);
8593 if (err)
8594 return libbpf_err(err);
8595
8596 err = check_path(map->pin_path);
8597 if (err)
8598 return libbpf_err(err);
8599
8600 if (bpf_obj_pin(map->fd, map->pin_path)) {
8601 err = -errno;
8602 goto out_err;
8603 }
8604
8605 map->pinned = true;
8606 pr_debug("pinned map '%s'\n", map->pin_path);
8607
8608 return 0;
8609
8610out_err:
8611 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8612 pr_warn("failed to pin map: %s\n", cp);
8613 return libbpf_err(err);
8614}
8615
8616int bpf_map__unpin(struct bpf_map *map, const char *path)
8617{
8618 int err;
8619
8620 if (map == NULL) {
8621 pr_warn("invalid map pointer\n");
8622 return libbpf_err(-EINVAL);
8623 }
8624
8625 if (map->pin_path) {
8626 if (path && strcmp(path, map->pin_path)) {
8627 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8628 bpf_map__name(map), map->pin_path, path);
8629 return libbpf_err(-EINVAL);
8630 }
8631 path = map->pin_path;
8632 } else if (!path) {
8633 pr_warn("no path to unpin map '%s' from\n",
8634 bpf_map__name(map));
8635 return libbpf_err(-EINVAL);
8636 }
8637
8638 err = check_path(path);
8639 if (err)
8640 return libbpf_err(err);
8641
8642 err = unlink(path);
8643 if (err != 0)
8644 return libbpf_err(-errno);
8645
8646 map->pinned = false;
8647 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
8648
8649 return 0;
8650}
8651
8652int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
8653{
8654 char *new = NULL;
8655
8656 if (path) {
8657 new = strdup(path);
8658 if (!new)
8659 return libbpf_err(-errno);
8660 }
8661
8662 free(map->pin_path);
8663 map->pin_path = new;
8664 return 0;
8665}
8666
8667__alias(bpf_map__pin_path)
8668const char *bpf_map__get_pin_path(const struct bpf_map *map);
8669
8670const char *bpf_map__pin_path(const struct bpf_map *map)
8671{
8672 return map->pin_path;
8673}
8674
8675bool bpf_map__is_pinned(const struct bpf_map *map)
8676{
8677 return map->pinned;
8678}
8679
8680static void sanitize_pin_path(char *s)
8681{
8682 /* bpffs disallows periods in path names */
8683 while (*s) {
8684 if (*s == '.')
8685 *s = '_';
8686 s++;
8687 }
8688}
8689
8690int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
8691{
8692 struct bpf_map *map;
8693 int err;
8694
8695 if (!obj)
8696 return libbpf_err(-ENOENT);
8697
8698 if (!obj->loaded) {
8699 pr_warn("object not yet loaded; load it first\n");
8700 return libbpf_err(-ENOENT);
8701 }
8702
8703 bpf_object__for_each_map(map, obj) {
8704 char *pin_path = NULL;
8705 char buf[PATH_MAX];
8706
8707 if (!map->autocreate)
8708 continue;
8709
8710 if (path) {
8711 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8712 if (err)
8713 goto err_unpin_maps;
8714 sanitize_pin_path(buf);
8715 pin_path = buf;
8716 } else if (!map->pin_path) {
8717 continue;
8718 }
8719
8720 err = bpf_map__pin(map, pin_path);
8721 if (err)
8722 goto err_unpin_maps;
8723 }
8724
8725 return 0;
8726
8727err_unpin_maps:
8728 while ((map = bpf_object__prev_map(obj, map))) {
8729 if (!map->pin_path)
8730 continue;
8731
8732 bpf_map__unpin(map, NULL);
8733 }
8734
8735 return libbpf_err(err);
8736}
8737
8738int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8739{
8740 struct bpf_map *map;
8741 int err;
8742
8743 if (!obj)
8744 return libbpf_err(-ENOENT);
8745
8746 bpf_object__for_each_map(map, obj) {
8747 char *pin_path = NULL;
8748 char buf[PATH_MAX];
8749
8750 if (path) {
8751 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8752 if (err)
8753 return libbpf_err(err);
8754 sanitize_pin_path(buf);
8755 pin_path = buf;
8756 } else if (!map->pin_path) {
8757 continue;
8758 }
8759
8760 err = bpf_map__unpin(map, pin_path);
8761 if (err)
8762 return libbpf_err(err);
8763 }
8764
8765 return 0;
8766}
8767
8768int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8769{
8770 struct bpf_program *prog;
8771 char buf[PATH_MAX];
8772 int err;
8773
8774 if (!obj)
8775 return libbpf_err(-ENOENT);
8776
8777 if (!obj->loaded) {
8778 pr_warn("object not yet loaded; load it first\n");
8779 return libbpf_err(-ENOENT);
8780 }
8781
8782 bpf_object__for_each_program(prog, obj) {
8783 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8784 if (err)
8785 goto err_unpin_programs;
8786
8787 err = bpf_program__pin(prog, buf);
8788 if (err)
8789 goto err_unpin_programs;
8790 }
8791
8792 return 0;
8793
8794err_unpin_programs:
8795 while ((prog = bpf_object__prev_program(obj, prog))) {
8796 if (pathname_concat(buf, sizeof(buf), path, prog->name))
8797 continue;
8798
8799 bpf_program__unpin(prog, buf);
8800 }
8801
8802 return libbpf_err(err);
8803}
8804
8805int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8806{
8807 struct bpf_program *prog;
8808 int err;
8809
8810 if (!obj)
8811 return libbpf_err(-ENOENT);
8812
8813 bpf_object__for_each_program(prog, obj) {
8814 char buf[PATH_MAX];
8815
8816 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8817 if (err)
8818 return libbpf_err(err);
8819
8820 err = bpf_program__unpin(prog, buf);
8821 if (err)
8822 return libbpf_err(err);
8823 }
8824
8825 return 0;
8826}
8827
8828int bpf_object__pin(struct bpf_object *obj, const char *path)
8829{
8830 int err;
8831
8832 err = bpf_object__pin_maps(obj, path);
8833 if (err)
8834 return libbpf_err(err);
8835
8836 err = bpf_object__pin_programs(obj, path);
8837 if (err) {
8838 bpf_object__unpin_maps(obj, path);
8839 return libbpf_err(err);
8840 }
8841
8842 return 0;
8843}
8844
8845int bpf_object__unpin(struct bpf_object *obj, const char *path)
8846{
8847 int err;
8848
8849 err = bpf_object__unpin_programs(obj, path);
8850 if (err)
8851 return libbpf_err(err);
8852
8853 err = bpf_object__unpin_maps(obj, path);
8854 if (err)
8855 return libbpf_err(err);
8856
8857 return 0;
8858}
8859
8860static void bpf_map__destroy(struct bpf_map *map)
8861{
8862 if (map->inner_map) {
8863 bpf_map__destroy(map->inner_map);
8864 zfree(&map->inner_map);
8865 }
8866
8867 zfree(&map->init_slots);
8868 map->init_slots_sz = 0;
8869
8870 if (map->mmaped && map->mmaped != map->obj->arena_data)
8871 munmap(map->mmaped, bpf_map_mmap_sz(map));
8872 map->mmaped = NULL;
8873
8874 if (map->st_ops) {
8875 zfree(&map->st_ops->data);
8876 zfree(&map->st_ops->progs);
8877 zfree(&map->st_ops->kern_func_off);
8878 zfree(&map->st_ops);
8879 }
8880
8881 zfree(&map->name);
8882 zfree(&map->real_name);
8883 zfree(&map->pin_path);
8884
8885 if (map->fd >= 0)
8886 zclose(map->fd);
8887}
8888
8889void bpf_object__close(struct bpf_object *obj)
8890{
8891 size_t i;
8892
8893 if (IS_ERR_OR_NULL(obj))
8894 return;
8895
8896 usdt_manager_free(obj->usdt_man);
8897 obj->usdt_man = NULL;
8898
8899 bpf_gen__free(obj->gen_loader);
8900 bpf_object__elf_finish(obj);
8901 bpf_object_unload(obj);
8902 btf__free(obj->btf);
8903 btf__free(obj->btf_vmlinux);
8904 btf_ext__free(obj->btf_ext);
8905
8906 for (i = 0; i < obj->nr_maps; i++)
8907 bpf_map__destroy(&obj->maps[i]);
8908
8909 zfree(&obj->btf_custom_path);
8910 zfree(&obj->kconfig);
8911
8912 for (i = 0; i < obj->nr_extern; i++)
8913 zfree(&obj->externs[i].essent_name);
8914
8915 zfree(&obj->externs);
8916 obj->nr_extern = 0;
8917
8918 zfree(&obj->maps);
8919 obj->nr_maps = 0;
8920
8921 if (obj->programs && obj->nr_programs) {
8922 for (i = 0; i < obj->nr_programs; i++)
8923 bpf_program__exit(&obj->programs[i]);
8924 }
8925 zfree(&obj->programs);
8926
8927 zfree(&obj->feat_cache);
8928 zfree(&obj->token_path);
8929 if (obj->token_fd > 0)
8930 close(obj->token_fd);
8931
8932 zfree(&obj->arena_data);
8933
8934 free(obj);
8935}
8936
8937const char *bpf_object__name(const struct bpf_object *obj)
8938{
8939 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
8940}
8941
8942unsigned int bpf_object__kversion(const struct bpf_object *obj)
8943{
8944 return obj ? obj->kern_version : 0;
8945}
8946
8947struct btf *bpf_object__btf(const struct bpf_object *obj)
8948{
8949 return obj ? obj->btf : NULL;
8950}
8951
8952int bpf_object__btf_fd(const struct bpf_object *obj)
8953{
8954 return obj->btf ? btf__fd(obj->btf) : -1;
8955}
8956
8957int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8958{
8959 if (obj->loaded)
8960 return libbpf_err(-EINVAL);
8961
8962 obj->kern_version = kern_version;
8963
8964 return 0;
8965}
8966
8967int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
8968{
8969 struct bpf_gen *gen;
8970
8971 if (!opts)
8972 return -EFAULT;
8973 if (!OPTS_VALID(opts, gen_loader_opts))
8974 return -EINVAL;
8975 gen = calloc(sizeof(*gen), 1);
8976 if (!gen)
8977 return -ENOMEM;
8978 gen->opts = opts;
8979 obj->gen_loader = gen;
8980 return 0;
8981}
8982
8983static struct bpf_program *
8984__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8985 bool forward)
8986{
8987 size_t nr_programs = obj->nr_programs;
8988 ssize_t idx;
8989
8990 if (!nr_programs)
8991 return NULL;
8992
8993 if (!p)
8994 /* Iter from the beginning */
8995 return forward ? &obj->programs[0] :
8996 &obj->programs[nr_programs - 1];
8997
8998 if (p->obj != obj) {
8999 pr_warn("error: program handler doesn't match object\n");
9000 return errno = EINVAL, NULL;
9001 }
9002
9003 idx = (p - obj->programs) + (forward ? 1 : -1);
9004 if (idx >= obj->nr_programs || idx < 0)
9005 return NULL;
9006 return &obj->programs[idx];
9007}
9008
9009struct bpf_program *
9010bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
9011{
9012 struct bpf_program *prog = prev;
9013
9014 do {
9015 prog = __bpf_program__iter(prog, obj, true);
9016 } while (prog && prog_is_subprog(obj, prog));
9017
9018 return prog;
9019}
9020
9021struct bpf_program *
9022bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
9023{
9024 struct bpf_program *prog = next;
9025
9026 do {
9027 prog = __bpf_program__iter(prog, obj, false);
9028 } while (prog && prog_is_subprog(obj, prog));
9029
9030 return prog;
9031}
9032
9033void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
9034{
9035 prog->prog_ifindex = ifindex;
9036}
9037
9038const char *bpf_program__name(const struct bpf_program *prog)
9039{
9040 return prog->name;
9041}
9042
9043const char *bpf_program__section_name(const struct bpf_program *prog)
9044{
9045 return prog->sec_name;
9046}
9047
9048bool bpf_program__autoload(const struct bpf_program *prog)
9049{
9050 return prog->autoload;
9051}
9052
9053int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
9054{
9055 if (prog->obj->loaded)
9056 return libbpf_err(-EINVAL);
9057
9058 prog->autoload = autoload;
9059 return 0;
9060}
9061
9062bool bpf_program__autoattach(const struct bpf_program *prog)
9063{
9064 return prog->autoattach;
9065}
9066
9067void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
9068{
9069 prog->autoattach = autoattach;
9070}
9071
9072const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
9073{
9074 return prog->insns;
9075}
9076
9077size_t bpf_program__insn_cnt(const struct bpf_program *prog)
9078{
9079 return prog->insns_cnt;
9080}
9081
9082int bpf_program__set_insns(struct bpf_program *prog,
9083 struct bpf_insn *new_insns, size_t new_insn_cnt)
9084{
9085 struct bpf_insn *insns;
9086
9087 if (prog->obj->loaded)
9088 return -EBUSY;
9089
9090 insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
9091 /* NULL is a valid return from reallocarray if the new count is zero */
9092 if (!insns && new_insn_cnt) {
9093 pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
9094 return -ENOMEM;
9095 }
9096 memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
9097
9098 prog->insns = insns;
9099 prog->insns_cnt = new_insn_cnt;
9100 return 0;
9101}
9102
9103int bpf_program__fd(const struct bpf_program *prog)
9104{
9105 if (!prog)
9106 return libbpf_err(-EINVAL);
9107
9108 if (prog->fd < 0)
9109 return libbpf_err(-ENOENT);
9110
9111 return prog->fd;
9112}
9113
9114__alias(bpf_program__type)
9115enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
9116
9117enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
9118{
9119 return prog->type;
9120}
9121
9122static size_t custom_sec_def_cnt;
9123static struct bpf_sec_def *custom_sec_defs;
9124static struct bpf_sec_def custom_fallback_def;
9125static bool has_custom_fallback_def;
9126static int last_custom_sec_def_handler_id;
9127
9128int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
9129{
9130 if (prog->obj->loaded)
9131 return libbpf_err(-EBUSY);
9132
9133 /* if type is not changed, do nothing */
9134 if (prog->type == type)
9135 return 0;
9136
9137 prog->type = type;
9138
9139 /* If a program type was changed, we need to reset associated SEC()
9140 * handler, as it will be invalid now. The only exception is a generic
9141 * fallback handler, which by definition is program type-agnostic and
9142 * is a catch-all custom handler, optionally set by the application,
9143 * so should be able to handle any type of BPF program.
9144 */
9145 if (prog->sec_def != &custom_fallback_def)
9146 prog->sec_def = NULL;
9147 return 0;
9148}
9149
9150__alias(bpf_program__expected_attach_type)
9151enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
9152
9153enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
9154{
9155 return prog->expected_attach_type;
9156}
9157
9158int bpf_program__set_expected_attach_type(struct bpf_program *prog,
9159 enum bpf_attach_type type)
9160{
9161 if (prog->obj->loaded)
9162 return libbpf_err(-EBUSY);
9163
9164 prog->expected_attach_type = type;
9165 return 0;
9166}
9167
9168__u32 bpf_program__flags(const struct bpf_program *prog)
9169{
9170 return prog->prog_flags;
9171}
9172
9173int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
9174{
9175 if (prog->obj->loaded)
9176 return libbpf_err(-EBUSY);
9177
9178 prog->prog_flags = flags;
9179 return 0;
9180}
9181
9182__u32 bpf_program__log_level(const struct bpf_program *prog)
9183{
9184 return prog->log_level;
9185}
9186
9187int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
9188{
9189 if (prog->obj->loaded)
9190 return libbpf_err(-EBUSY);
9191
9192 prog->log_level = log_level;
9193 return 0;
9194}
9195
9196const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
9197{
9198 *log_size = prog->log_size;
9199 return prog->log_buf;
9200}
9201
9202int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
9203{
9204 if (log_size && !log_buf)
9205 return -EINVAL;
9206 if (prog->log_size > UINT_MAX)
9207 return -EINVAL;
9208 if (prog->obj->loaded)
9209 return -EBUSY;
9210
9211 prog->log_buf = log_buf;
9212 prog->log_size = log_size;
9213 return 0;
9214}
9215
9216#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
9217 .sec = (char *)sec_pfx, \
9218 .prog_type = BPF_PROG_TYPE_##ptype, \
9219 .expected_attach_type = atype, \
9220 .cookie = (long)(flags), \
9221 .prog_prepare_load_fn = libbpf_prepare_prog_load, \
9222 __VA_ARGS__ \
9223}
9224
9225static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9226static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9227static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9228static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9229static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9230static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9231static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9232static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9233static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9234static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9235static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9236
9237static const struct bpf_sec_def section_defs[] = {
9238 SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE),
9239 SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
9240 SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
9241 SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
9242 SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
9243 SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9244 SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
9245 SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
9246 SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9247 SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9248 SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9249 SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9250 SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9251 SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9252 SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9253 SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
9254 SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
9255 SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
9256 SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt),
9257 SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */
9258 SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */
9259 SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE),
9260 SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE),
9261 SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9262 SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9263 SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9264 SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE),
9265 SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE),
9266 SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9267 SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9268 SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9269 SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9270 SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9271 SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9272 SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
9273 SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
9274 SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
9275 SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
9276 SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9277 SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9278 SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9279 SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace),
9280 SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
9281 SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
9282 SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
9283 SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
9284 SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
9285 SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
9286 SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
9287 SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
9288 SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
9289 SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
9290 SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS),
9291 SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
9292 SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE),
9293 SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE),
9294 SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE),
9295 SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE),
9296 SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE),
9297 SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
9298 SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
9299 SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
9300 SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE),
9301 SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
9302 SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
9303 SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
9304 SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
9305 SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
9306 SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE),
9307 SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
9308 SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
9309 SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
9310 SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
9311 SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
9312 SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
9313 SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
9314 SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
9315 SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
9316 SEC_DEF("cgroup/connect_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE),
9317 SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
9318 SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
9319 SEC_DEF("cgroup/sendmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE),
9320 SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
9321 SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
9322 SEC_DEF("cgroup/recvmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE),
9323 SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
9324 SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
9325 SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE),
9326 SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
9327 SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
9328 SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE),
9329 SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
9330 SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
9331 SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
9332 SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
9333 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
9334 SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
9335 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
9336 SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE),
9337};
9338
9339int libbpf_register_prog_handler(const char *sec,
9340 enum bpf_prog_type prog_type,
9341 enum bpf_attach_type exp_attach_type,
9342 const struct libbpf_prog_handler_opts *opts)
9343{
9344 struct bpf_sec_def *sec_def;
9345
9346 if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
9347 return libbpf_err(-EINVAL);
9348
9349 if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
9350 return libbpf_err(-E2BIG);
9351
9352 if (sec) {
9353 sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
9354 sizeof(*sec_def));
9355 if (!sec_def)
9356 return libbpf_err(-ENOMEM);
9357
9358 custom_sec_defs = sec_def;
9359 sec_def = &custom_sec_defs[custom_sec_def_cnt];
9360 } else {
9361 if (has_custom_fallback_def)
9362 return libbpf_err(-EBUSY);
9363
9364 sec_def = &custom_fallback_def;
9365 }
9366
9367 sec_def->sec = sec ? strdup(sec) : NULL;
9368 if (sec && !sec_def->sec)
9369 return libbpf_err(-ENOMEM);
9370
9371 sec_def->prog_type = prog_type;
9372 sec_def->expected_attach_type = exp_attach_type;
9373 sec_def->cookie = OPTS_GET(opts, cookie, 0);
9374
9375 sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
9376 sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
9377 sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
9378
9379 sec_def->handler_id = ++last_custom_sec_def_handler_id;
9380
9381 if (sec)
9382 custom_sec_def_cnt++;
9383 else
9384 has_custom_fallback_def = true;
9385
9386 return sec_def->handler_id;
9387}
9388
9389int libbpf_unregister_prog_handler(int handler_id)
9390{
9391 struct bpf_sec_def *sec_defs;
9392 int i;
9393
9394 if (handler_id <= 0)
9395 return libbpf_err(-EINVAL);
9396
9397 if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
9398 memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
9399 has_custom_fallback_def = false;
9400 return 0;
9401 }
9402
9403 for (i = 0; i < custom_sec_def_cnt; i++) {
9404 if (custom_sec_defs[i].handler_id == handler_id)
9405 break;
9406 }
9407
9408 if (i == custom_sec_def_cnt)
9409 return libbpf_err(-ENOENT);
9410
9411 free(custom_sec_defs[i].sec);
9412 for (i = i + 1; i < custom_sec_def_cnt; i++)
9413 custom_sec_defs[i - 1] = custom_sec_defs[i];
9414 custom_sec_def_cnt--;
9415
9416 /* try to shrink the array, but it's ok if we couldn't */
9417 sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
9418 /* if new count is zero, reallocarray can return a valid NULL result;
9419 * in this case the previous pointer will be freed, so we *have to*
9420 * reassign old pointer to the new value (even if it's NULL)
9421 */
9422 if (sec_defs || custom_sec_def_cnt == 0)
9423 custom_sec_defs = sec_defs;
9424
9425 return 0;
9426}
9427
9428static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
9429{
9430 size_t len = strlen(sec_def->sec);
9431
9432 /* "type/" always has to have proper SEC("type/extras") form */
9433 if (sec_def->sec[len - 1] == '/') {
9434 if (str_has_pfx(sec_name, sec_def->sec))
9435 return true;
9436 return false;
9437 }
9438
9439 /* "type+" means it can be either exact SEC("type") or
9440 * well-formed SEC("type/extras") with proper '/' separator
9441 */
9442 if (sec_def->sec[len - 1] == '+') {
9443 len--;
9444 /* not even a prefix */
9445 if (strncmp(sec_name, sec_def->sec, len) != 0)
9446 return false;
9447 /* exact match or has '/' separator */
9448 if (sec_name[len] == '\0' || sec_name[len] == '/')
9449 return true;
9450 return false;
9451 }
9452
9453 return strcmp(sec_name, sec_def->sec) == 0;
9454}
9455
9456static const struct bpf_sec_def *find_sec_def(const char *sec_name)
9457{
9458 const struct bpf_sec_def *sec_def;
9459 int i, n;
9460
9461 n = custom_sec_def_cnt;
9462 for (i = 0; i < n; i++) {
9463 sec_def = &custom_sec_defs[i];
9464 if (sec_def_matches(sec_def, sec_name))
9465 return sec_def;
9466 }
9467
9468 n = ARRAY_SIZE(section_defs);
9469 for (i = 0; i < n; i++) {
9470 sec_def = §ion_defs[i];
9471 if (sec_def_matches(sec_def, sec_name))
9472 return sec_def;
9473 }
9474
9475 if (has_custom_fallback_def)
9476 return &custom_fallback_def;
9477
9478 return NULL;
9479}
9480
9481#define MAX_TYPE_NAME_SIZE 32
9482
9483static char *libbpf_get_type_names(bool attach_type)
9484{
9485 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
9486 char *buf;
9487
9488 buf = malloc(len);
9489 if (!buf)
9490 return NULL;
9491
9492 buf[0] = '\0';
9493 /* Forge string buf with all available names */
9494 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9495 const struct bpf_sec_def *sec_def = §ion_defs[i];
9496
9497 if (attach_type) {
9498 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9499 continue;
9500
9501 if (!(sec_def->cookie & SEC_ATTACHABLE))
9502 continue;
9503 }
9504
9505 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
9506 free(buf);
9507 return NULL;
9508 }
9509 strcat(buf, " ");
9510 strcat(buf, section_defs[i].sec);
9511 }
9512
9513 return buf;
9514}
9515
9516int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
9517 enum bpf_attach_type *expected_attach_type)
9518{
9519 const struct bpf_sec_def *sec_def;
9520 char *type_names;
9521
9522 if (!name)
9523 return libbpf_err(-EINVAL);
9524
9525 sec_def = find_sec_def(name);
9526 if (sec_def) {
9527 *prog_type = sec_def->prog_type;
9528 *expected_attach_type = sec_def->expected_attach_type;
9529 return 0;
9530 }
9531
9532 pr_debug("failed to guess program type from ELF section '%s'\n", name);
9533 type_names = libbpf_get_type_names(false);
9534 if (type_names != NULL) {
9535 pr_debug("supported section(type) names are:%s\n", type_names);
9536 free(type_names);
9537 }
9538
9539 return libbpf_err(-ESRCH);
9540}
9541
9542const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
9543{
9544 if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
9545 return NULL;
9546
9547 return attach_type_name[t];
9548}
9549
9550const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
9551{
9552 if (t < 0 || t >= ARRAY_SIZE(link_type_name))
9553 return NULL;
9554
9555 return link_type_name[t];
9556}
9557
9558const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
9559{
9560 if (t < 0 || t >= ARRAY_SIZE(map_type_name))
9561 return NULL;
9562
9563 return map_type_name[t];
9564}
9565
9566const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
9567{
9568 if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
9569 return NULL;
9570
9571 return prog_type_name[t];
9572}
9573
9574static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
9575 int sec_idx,
9576 size_t offset)
9577{
9578 struct bpf_map *map;
9579 size_t i;
9580
9581 for (i = 0; i < obj->nr_maps; i++) {
9582 map = &obj->maps[i];
9583 if (!bpf_map__is_struct_ops(map))
9584 continue;
9585 if (map->sec_idx == sec_idx &&
9586 map->sec_offset <= offset &&
9587 offset - map->sec_offset < map->def.value_size)
9588 return map;
9589 }
9590
9591 return NULL;
9592}
9593
9594/* Collect the reloc from ELF, populate the st_ops->progs[], and update
9595 * st_ops->data for shadow type.
9596 */
9597static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
9598 Elf64_Shdr *shdr, Elf_Data *data)
9599{
9600 const struct btf_member *member;
9601 struct bpf_struct_ops *st_ops;
9602 struct bpf_program *prog;
9603 unsigned int shdr_idx;
9604 const struct btf *btf;
9605 struct bpf_map *map;
9606 unsigned int moff, insn_idx;
9607 const char *name;
9608 __u32 member_idx;
9609 Elf64_Sym *sym;
9610 Elf64_Rel *rel;
9611 int i, nrels;
9612
9613 btf = obj->btf;
9614 nrels = shdr->sh_size / shdr->sh_entsize;
9615 for (i = 0; i < nrels; i++) {
9616 rel = elf_rel_by_idx(data, i);
9617 if (!rel) {
9618 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
9619 return -LIBBPF_ERRNO__FORMAT;
9620 }
9621
9622 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
9623 if (!sym) {
9624 pr_warn("struct_ops reloc: symbol %zx not found\n",
9625 (size_t)ELF64_R_SYM(rel->r_info));
9626 return -LIBBPF_ERRNO__FORMAT;
9627 }
9628
9629 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
9630 map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
9631 if (!map) {
9632 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
9633 (size_t)rel->r_offset);
9634 return -EINVAL;
9635 }
9636
9637 moff = rel->r_offset - map->sec_offset;
9638 shdr_idx = sym->st_shndx;
9639 st_ops = map->st_ops;
9640 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
9641 map->name,
9642 (long long)(rel->r_info >> 32),
9643 (long long)sym->st_value,
9644 shdr_idx, (size_t)rel->r_offset,
9645 map->sec_offset, sym->st_name, name);
9646
9647 if (shdr_idx >= SHN_LORESERVE) {
9648 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
9649 map->name, (size_t)rel->r_offset, shdr_idx);
9650 return -LIBBPF_ERRNO__RELOC;
9651 }
9652 if (sym->st_value % BPF_INSN_SZ) {
9653 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
9654 map->name, (unsigned long long)sym->st_value);
9655 return -LIBBPF_ERRNO__FORMAT;
9656 }
9657 insn_idx = sym->st_value / BPF_INSN_SZ;
9658
9659 member = find_member_by_offset(st_ops->type, moff * 8);
9660 if (!member) {
9661 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9662 map->name, moff);
9663 return -EINVAL;
9664 }
9665 member_idx = member - btf_members(st_ops->type);
9666 name = btf__name_by_offset(btf, member->name_off);
9667
9668 if (!resolve_func_ptr(btf, member->type, NULL)) {
9669 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9670 map->name, name);
9671 return -EINVAL;
9672 }
9673
9674 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
9675 if (!prog) {
9676 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9677 map->name, shdr_idx, name);
9678 return -EINVAL;
9679 }
9680
9681 /* prevent the use of BPF prog with invalid type */
9682 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
9683 pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
9684 map->name, prog->name);
9685 return -EINVAL;
9686 }
9687
9688 st_ops->progs[member_idx] = prog;
9689
9690 /* st_ops->data will be exposed to users, being returned by
9691 * bpf_map__initial_value() as a pointer to the shadow
9692 * type. All function pointers in the original struct type
9693 * should be converted to a pointer to struct bpf_program
9694 * in the shadow type.
9695 */
9696 *((struct bpf_program **)(st_ops->data + moff)) = prog;
9697 }
9698
9699 return 0;
9700}
9701
9702#define BTF_TRACE_PREFIX "btf_trace_"
9703#define BTF_LSM_PREFIX "bpf_lsm_"
9704#define BTF_ITER_PREFIX "bpf_iter_"
9705#define BTF_MAX_NAME_SIZE 128
9706
9707void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
9708 const char **prefix, int *kind)
9709{
9710 switch (attach_type) {
9711 case BPF_TRACE_RAW_TP:
9712 *prefix = BTF_TRACE_PREFIX;
9713 *kind = BTF_KIND_TYPEDEF;
9714 break;
9715 case BPF_LSM_MAC:
9716 case BPF_LSM_CGROUP:
9717 *prefix = BTF_LSM_PREFIX;
9718 *kind = BTF_KIND_FUNC;
9719 break;
9720 case BPF_TRACE_ITER:
9721 *prefix = BTF_ITER_PREFIX;
9722 *kind = BTF_KIND_FUNC;
9723 break;
9724 default:
9725 *prefix = "";
9726 *kind = BTF_KIND_FUNC;
9727 }
9728}
9729
9730static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9731 const char *name, __u32 kind)
9732{
9733 char btf_type_name[BTF_MAX_NAME_SIZE];
9734 int ret;
9735
9736 ret = snprintf(btf_type_name, sizeof(btf_type_name),
9737 "%s%s", prefix, name);
9738 /* snprintf returns the number of characters written excluding the
9739 * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
9740 * indicates truncation.
9741 */
9742 if (ret < 0 || ret >= sizeof(btf_type_name))
9743 return -ENAMETOOLONG;
9744 return btf__find_by_name_kind(btf, btf_type_name, kind);
9745}
9746
9747static inline int find_attach_btf_id(struct btf *btf, const char *name,
9748 enum bpf_attach_type attach_type)
9749{
9750 const char *prefix;
9751 int kind;
9752
9753 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9754 return find_btf_by_prefix_kind(btf, prefix, name, kind);
9755}
9756
9757int libbpf_find_vmlinux_btf_id(const char *name,
9758 enum bpf_attach_type attach_type)
9759{
9760 struct btf *btf;
9761 int err;
9762
9763 btf = btf__load_vmlinux_btf();
9764 err = libbpf_get_error(btf);
9765 if (err) {
9766 pr_warn("vmlinux BTF is not found\n");
9767 return libbpf_err(err);
9768 }
9769
9770 err = find_attach_btf_id(btf, name, attach_type);
9771 if (err <= 0)
9772 pr_warn("%s is not found in vmlinux BTF\n", name);
9773
9774 btf__free(btf);
9775 return libbpf_err(err);
9776}
9777
9778static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9779{
9780 struct bpf_prog_info info;
9781 __u32 info_len = sizeof(info);
9782 struct btf *btf;
9783 int err;
9784
9785 memset(&info, 0, info_len);
9786 err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
9787 if (err) {
9788 pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
9789 attach_prog_fd, err);
9790 return err;
9791 }
9792
9793 err = -EINVAL;
9794 if (!info.btf_id) {
9795 pr_warn("The target program doesn't have BTF\n");
9796 goto out;
9797 }
9798 btf = btf__load_from_kernel_by_id(info.btf_id);
9799 err = libbpf_get_error(btf);
9800 if (err) {
9801 pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
9802 goto out;
9803 }
9804 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9805 btf__free(btf);
9806 if (err <= 0) {
9807 pr_warn("%s is not found in prog's BTF\n", name);
9808 goto out;
9809 }
9810out:
9811 return err;
9812}
9813
9814static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9815 enum bpf_attach_type attach_type,
9816 int *btf_obj_fd, int *btf_type_id)
9817{
9818 int ret, i;
9819
9820 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9821 if (ret > 0) {
9822 *btf_obj_fd = 0; /* vmlinux BTF */
9823 *btf_type_id = ret;
9824 return 0;
9825 }
9826 if (ret != -ENOENT)
9827 return ret;
9828
9829 ret = load_module_btfs(obj);
9830 if (ret)
9831 return ret;
9832
9833 for (i = 0; i < obj->btf_module_cnt; i++) {
9834 const struct module_btf *mod = &obj->btf_modules[i];
9835
9836 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9837 if (ret > 0) {
9838 *btf_obj_fd = mod->fd;
9839 *btf_type_id = ret;
9840 return 0;
9841 }
9842 if (ret == -ENOENT)
9843 continue;
9844
9845 return ret;
9846 }
9847
9848 return -ESRCH;
9849}
9850
9851static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9852 int *btf_obj_fd, int *btf_type_id)
9853{
9854 enum bpf_attach_type attach_type = prog->expected_attach_type;
9855 __u32 attach_prog_fd = prog->attach_prog_fd;
9856 int err = 0;
9857
9858 /* BPF program's BTF ID */
9859 if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
9860 if (!attach_prog_fd) {
9861 pr_warn("prog '%s': attach program FD is not set\n", prog->name);
9862 return -EINVAL;
9863 }
9864 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9865 if (err < 0) {
9866 pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9867 prog->name, attach_prog_fd, attach_name, err);
9868 return err;
9869 }
9870 *btf_obj_fd = 0;
9871 *btf_type_id = err;
9872 return 0;
9873 }
9874
9875 /* kernel/module BTF ID */
9876 if (prog->obj->gen_loader) {
9877 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9878 *btf_obj_fd = 0;
9879 *btf_type_id = 1;
9880 } else {
9881 err = find_kernel_btf_id(prog->obj, attach_name,
9882 attach_type, btf_obj_fd,
9883 btf_type_id);
9884 }
9885 if (err) {
9886 pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
9887 prog->name, attach_name, err);
9888 return err;
9889 }
9890 return 0;
9891}
9892
9893int libbpf_attach_type_by_name(const char *name,
9894 enum bpf_attach_type *attach_type)
9895{
9896 char *type_names;
9897 const struct bpf_sec_def *sec_def;
9898
9899 if (!name)
9900 return libbpf_err(-EINVAL);
9901
9902 sec_def = find_sec_def(name);
9903 if (!sec_def) {
9904 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9905 type_names = libbpf_get_type_names(true);
9906 if (type_names != NULL) {
9907 pr_debug("attachable section(type) names are:%s\n", type_names);
9908 free(type_names);
9909 }
9910
9911 return libbpf_err(-EINVAL);
9912 }
9913
9914 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9915 return libbpf_err(-EINVAL);
9916 if (!(sec_def->cookie & SEC_ATTACHABLE))
9917 return libbpf_err(-EINVAL);
9918
9919 *attach_type = sec_def->expected_attach_type;
9920 return 0;
9921}
9922
9923int bpf_map__fd(const struct bpf_map *map)
9924{
9925 if (!map)
9926 return libbpf_err(-EINVAL);
9927 if (!map_is_created(map))
9928 return -1;
9929 return map->fd;
9930}
9931
9932static bool map_uses_real_name(const struct bpf_map *map)
9933{
9934 /* Since libbpf started to support custom .data.* and .rodata.* maps,
9935 * their user-visible name differs from kernel-visible name. Users see
9936 * such map's corresponding ELF section name as a map name.
9937 * This check distinguishes .data/.rodata from .data.* and .rodata.*
9938 * maps to know which name has to be returned to the user.
9939 */
9940 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
9941 return true;
9942 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
9943 return true;
9944 return false;
9945}
9946
9947const char *bpf_map__name(const struct bpf_map *map)
9948{
9949 if (!map)
9950 return NULL;
9951
9952 if (map_uses_real_name(map))
9953 return map->real_name;
9954
9955 return map->name;
9956}
9957
9958enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9959{
9960 return map->def.type;
9961}
9962
9963int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9964{
9965 if (map_is_created(map))
9966 return libbpf_err(-EBUSY);
9967 map->def.type = type;
9968 return 0;
9969}
9970
9971__u32 bpf_map__map_flags(const struct bpf_map *map)
9972{
9973 return map->def.map_flags;
9974}
9975
9976int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9977{
9978 if (map_is_created(map))
9979 return libbpf_err(-EBUSY);
9980 map->def.map_flags = flags;
9981 return 0;
9982}
9983
9984__u64 bpf_map__map_extra(const struct bpf_map *map)
9985{
9986 return map->map_extra;
9987}
9988
9989int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
9990{
9991 if (map_is_created(map))
9992 return libbpf_err(-EBUSY);
9993 map->map_extra = map_extra;
9994 return 0;
9995}
9996
9997__u32 bpf_map__numa_node(const struct bpf_map *map)
9998{
9999 return map->numa_node;
10000}
10001
10002int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
10003{
10004 if (map_is_created(map))
10005 return libbpf_err(-EBUSY);
10006 map->numa_node = numa_node;
10007 return 0;
10008}
10009
10010__u32 bpf_map__key_size(const struct bpf_map *map)
10011{
10012 return map->def.key_size;
10013}
10014
10015int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
10016{
10017 if (map_is_created(map))
10018 return libbpf_err(-EBUSY);
10019 map->def.key_size = size;
10020 return 0;
10021}
10022
10023__u32 bpf_map__value_size(const struct bpf_map *map)
10024{
10025 return map->def.value_size;
10026}
10027
10028static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
10029{
10030 struct btf *btf;
10031 struct btf_type *datasec_type, *var_type;
10032 struct btf_var_secinfo *var;
10033 const struct btf_type *array_type;
10034 const struct btf_array *array;
10035 int vlen, element_sz, new_array_id;
10036 __u32 nr_elements;
10037
10038 /* check btf existence */
10039 btf = bpf_object__btf(map->obj);
10040 if (!btf)
10041 return -ENOENT;
10042
10043 /* verify map is datasec */
10044 datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
10045 if (!btf_is_datasec(datasec_type)) {
10046 pr_warn("map '%s': cannot be resized, map value type is not a datasec\n",
10047 bpf_map__name(map));
10048 return -EINVAL;
10049 }
10050
10051 /* verify datasec has at least one var */
10052 vlen = btf_vlen(datasec_type);
10053 if (vlen == 0) {
10054 pr_warn("map '%s': cannot be resized, map value datasec is empty\n",
10055 bpf_map__name(map));
10056 return -EINVAL;
10057 }
10058
10059 /* verify last var in the datasec is an array */
10060 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10061 var_type = btf_type_by_id(btf, var->type);
10062 array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
10063 if (!btf_is_array(array_type)) {
10064 pr_warn("map '%s': cannot be resized, last var must be an array\n",
10065 bpf_map__name(map));
10066 return -EINVAL;
10067 }
10068
10069 /* verify request size aligns with array */
10070 array = btf_array(array_type);
10071 element_sz = btf__resolve_size(btf, array->type);
10072 if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
10073 pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n",
10074 bpf_map__name(map), element_sz, size);
10075 return -EINVAL;
10076 }
10077
10078 /* create a new array based on the existing array, but with new length */
10079 nr_elements = (size - var->offset) / element_sz;
10080 new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
10081 if (new_array_id < 0)
10082 return new_array_id;
10083
10084 /* adding a new btf type invalidates existing pointers to btf objects,
10085 * so refresh pointers before proceeding
10086 */
10087 datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
10088 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10089 var_type = btf_type_by_id(btf, var->type);
10090
10091 /* finally update btf info */
10092 datasec_type->size = size;
10093 var->size = size - var->offset;
10094 var_type->type = new_array_id;
10095
10096 return 0;
10097}
10098
10099int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
10100{
10101 if (map->obj->loaded || map->reused)
10102 return libbpf_err(-EBUSY);
10103
10104 if (map->mmaped) {
10105 size_t mmap_old_sz, mmap_new_sz;
10106 int err;
10107
10108 if (map->def.type != BPF_MAP_TYPE_ARRAY)
10109 return -EOPNOTSUPP;
10110
10111 mmap_old_sz = bpf_map_mmap_sz(map);
10112 mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
10113 err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
10114 if (err) {
10115 pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
10116 bpf_map__name(map), err);
10117 return err;
10118 }
10119 err = map_btf_datasec_resize(map, size);
10120 if (err && err != -ENOENT) {
10121 pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n",
10122 bpf_map__name(map), err);
10123 map->btf_value_type_id = 0;
10124 map->btf_key_type_id = 0;
10125 }
10126 }
10127
10128 map->def.value_size = size;
10129 return 0;
10130}
10131
10132__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
10133{
10134 return map ? map->btf_key_type_id : 0;
10135}
10136
10137__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
10138{
10139 return map ? map->btf_value_type_id : 0;
10140}
10141
10142int bpf_map__set_initial_value(struct bpf_map *map,
10143 const void *data, size_t size)
10144{
10145 size_t actual_sz;
10146
10147 if (map->obj->loaded || map->reused)
10148 return libbpf_err(-EBUSY);
10149
10150 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
10151 return libbpf_err(-EINVAL);
10152
10153 if (map->def.type == BPF_MAP_TYPE_ARENA)
10154 actual_sz = map->obj->arena_data_sz;
10155 else
10156 actual_sz = map->def.value_size;
10157 if (size != actual_sz)
10158 return libbpf_err(-EINVAL);
10159
10160 memcpy(map->mmaped, data, size);
10161 return 0;
10162}
10163
10164void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize)
10165{
10166 if (bpf_map__is_struct_ops(map)) {
10167 if (psize)
10168 *psize = map->def.value_size;
10169 return map->st_ops->data;
10170 }
10171
10172 if (!map->mmaped)
10173 return NULL;
10174
10175 if (map->def.type == BPF_MAP_TYPE_ARENA)
10176 *psize = map->obj->arena_data_sz;
10177 else
10178 *psize = map->def.value_size;
10179
10180 return map->mmaped;
10181}
10182
10183bool bpf_map__is_internal(const struct bpf_map *map)
10184{
10185 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
10186}
10187
10188__u32 bpf_map__ifindex(const struct bpf_map *map)
10189{
10190 return map->map_ifindex;
10191}
10192
10193int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
10194{
10195 if (map_is_created(map))
10196 return libbpf_err(-EBUSY);
10197 map->map_ifindex = ifindex;
10198 return 0;
10199}
10200
10201int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
10202{
10203 if (!bpf_map_type__is_map_in_map(map->def.type)) {
10204 pr_warn("error: unsupported map type\n");
10205 return libbpf_err(-EINVAL);
10206 }
10207 if (map->inner_map_fd != -1) {
10208 pr_warn("error: inner_map_fd already specified\n");
10209 return libbpf_err(-EINVAL);
10210 }
10211 if (map->inner_map) {
10212 bpf_map__destroy(map->inner_map);
10213 zfree(&map->inner_map);
10214 }
10215 map->inner_map_fd = fd;
10216 return 0;
10217}
10218
10219static struct bpf_map *
10220__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
10221{
10222 ssize_t idx;
10223 struct bpf_map *s, *e;
10224
10225 if (!obj || !obj->maps)
10226 return errno = EINVAL, NULL;
10227
10228 s = obj->maps;
10229 e = obj->maps + obj->nr_maps;
10230
10231 if ((m < s) || (m >= e)) {
10232 pr_warn("error in %s: map handler doesn't belong to object\n",
10233 __func__);
10234 return errno = EINVAL, NULL;
10235 }
10236
10237 idx = (m - obj->maps) + i;
10238 if (idx >= obj->nr_maps || idx < 0)
10239 return NULL;
10240 return &obj->maps[idx];
10241}
10242
10243struct bpf_map *
10244bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
10245{
10246 if (prev == NULL)
10247 return obj->maps;
10248
10249 return __bpf_map__iter(prev, obj, 1);
10250}
10251
10252struct bpf_map *
10253bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
10254{
10255 if (next == NULL) {
10256 if (!obj->nr_maps)
10257 return NULL;
10258 return obj->maps + obj->nr_maps - 1;
10259 }
10260
10261 return __bpf_map__iter(next, obj, -1);
10262}
10263
10264struct bpf_map *
10265bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
10266{
10267 struct bpf_map *pos;
10268
10269 bpf_object__for_each_map(pos, obj) {
10270 /* if it's a special internal map name (which always starts
10271 * with dot) then check if that special name matches the
10272 * real map name (ELF section name)
10273 */
10274 if (name[0] == '.') {
10275 if (pos->real_name && strcmp(pos->real_name, name) == 0)
10276 return pos;
10277 continue;
10278 }
10279 /* otherwise map name has to be an exact match */
10280 if (map_uses_real_name(pos)) {
10281 if (strcmp(pos->real_name, name) == 0)
10282 return pos;
10283 continue;
10284 }
10285 if (strcmp(pos->name, name) == 0)
10286 return pos;
10287 }
10288 return errno = ENOENT, NULL;
10289}
10290
10291int
10292bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
10293{
10294 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
10295}
10296
10297static int validate_map_op(const struct bpf_map *map, size_t key_sz,
10298 size_t value_sz, bool check_value_sz)
10299{
10300 if (!map_is_created(map)) /* map is not yet created */
10301 return -ENOENT;
10302
10303 if (map->def.key_size != key_sz) {
10304 pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
10305 map->name, key_sz, map->def.key_size);
10306 return -EINVAL;
10307 }
10308
10309 if (!check_value_sz)
10310 return 0;
10311
10312 switch (map->def.type) {
10313 case BPF_MAP_TYPE_PERCPU_ARRAY:
10314 case BPF_MAP_TYPE_PERCPU_HASH:
10315 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
10316 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
10317 int num_cpu = libbpf_num_possible_cpus();
10318 size_t elem_sz = roundup(map->def.value_size, 8);
10319
10320 if (value_sz != num_cpu * elem_sz) {
10321 pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
10322 map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
10323 return -EINVAL;
10324 }
10325 break;
10326 }
10327 default:
10328 if (map->def.value_size != value_sz) {
10329 pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
10330 map->name, value_sz, map->def.value_size);
10331 return -EINVAL;
10332 }
10333 break;
10334 }
10335 return 0;
10336}
10337
10338int bpf_map__lookup_elem(const struct bpf_map *map,
10339 const void *key, size_t key_sz,
10340 void *value, size_t value_sz, __u64 flags)
10341{
10342 int err;
10343
10344 err = validate_map_op(map, key_sz, value_sz, true);
10345 if (err)
10346 return libbpf_err(err);
10347
10348 return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
10349}
10350
10351int bpf_map__update_elem(const struct bpf_map *map,
10352 const void *key, size_t key_sz,
10353 const void *value, size_t value_sz, __u64 flags)
10354{
10355 int err;
10356
10357 err = validate_map_op(map, key_sz, value_sz, true);
10358 if (err)
10359 return libbpf_err(err);
10360
10361 return bpf_map_update_elem(map->fd, key, value, flags);
10362}
10363
10364int bpf_map__delete_elem(const struct bpf_map *map,
10365 const void *key, size_t key_sz, __u64 flags)
10366{
10367 int err;
10368
10369 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10370 if (err)
10371 return libbpf_err(err);
10372
10373 return bpf_map_delete_elem_flags(map->fd, key, flags);
10374}
10375
10376int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
10377 const void *key, size_t key_sz,
10378 void *value, size_t value_sz, __u64 flags)
10379{
10380 int err;
10381
10382 err = validate_map_op(map, key_sz, value_sz, true);
10383 if (err)
10384 return libbpf_err(err);
10385
10386 return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
10387}
10388
10389int bpf_map__get_next_key(const struct bpf_map *map,
10390 const void *cur_key, void *next_key, size_t key_sz)
10391{
10392 int err;
10393
10394 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10395 if (err)
10396 return libbpf_err(err);
10397
10398 return bpf_map_get_next_key(map->fd, cur_key, next_key);
10399}
10400
10401long libbpf_get_error(const void *ptr)
10402{
10403 if (!IS_ERR_OR_NULL(ptr))
10404 return 0;
10405
10406 if (IS_ERR(ptr))
10407 errno = -PTR_ERR(ptr);
10408
10409 /* If ptr == NULL, then errno should be already set by the failing
10410 * API, because libbpf never returns NULL on success and it now always
10411 * sets errno on error. So no extra errno handling for ptr == NULL
10412 * case.
10413 */
10414 return -errno;
10415}
10416
10417/* Replace link's underlying BPF program with the new one */
10418int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
10419{
10420 int ret;
10421
10422 ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
10423 return libbpf_err_errno(ret);
10424}
10425
10426/* Release "ownership" of underlying BPF resource (typically, BPF program
10427 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
10428 * link, when destructed through bpf_link__destroy() call won't attempt to
10429 * detach/unregisted that BPF resource. This is useful in situations where,
10430 * say, attached BPF program has to outlive userspace program that attached it
10431 * in the system. Depending on type of BPF program, though, there might be
10432 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
10433 * exit of userspace program doesn't trigger automatic detachment and clean up
10434 * inside the kernel.
10435 */
10436void bpf_link__disconnect(struct bpf_link *link)
10437{
10438 link->disconnected = true;
10439}
10440
10441int bpf_link__destroy(struct bpf_link *link)
10442{
10443 int err = 0;
10444
10445 if (IS_ERR_OR_NULL(link))
10446 return 0;
10447
10448 if (!link->disconnected && link->detach)
10449 err = link->detach(link);
10450 if (link->pin_path)
10451 free(link->pin_path);
10452 if (link->dealloc)
10453 link->dealloc(link);
10454 else
10455 free(link);
10456
10457 return libbpf_err(err);
10458}
10459
10460int bpf_link__fd(const struct bpf_link *link)
10461{
10462 return link->fd;
10463}
10464
10465const char *bpf_link__pin_path(const struct bpf_link *link)
10466{
10467 return link->pin_path;
10468}
10469
10470static int bpf_link__detach_fd(struct bpf_link *link)
10471{
10472 return libbpf_err_errno(close(link->fd));
10473}
10474
10475struct bpf_link *bpf_link__open(const char *path)
10476{
10477 struct bpf_link *link;
10478 int fd;
10479
10480 fd = bpf_obj_get(path);
10481 if (fd < 0) {
10482 fd = -errno;
10483 pr_warn("failed to open link at %s: %d\n", path, fd);
10484 return libbpf_err_ptr(fd);
10485 }
10486
10487 link = calloc(1, sizeof(*link));
10488 if (!link) {
10489 close(fd);
10490 return libbpf_err_ptr(-ENOMEM);
10491 }
10492 link->detach = &bpf_link__detach_fd;
10493 link->fd = fd;
10494
10495 link->pin_path = strdup(path);
10496 if (!link->pin_path) {
10497 bpf_link__destroy(link);
10498 return libbpf_err_ptr(-ENOMEM);
10499 }
10500
10501 return link;
10502}
10503
10504int bpf_link__detach(struct bpf_link *link)
10505{
10506 return bpf_link_detach(link->fd) ? -errno : 0;
10507}
10508
10509int bpf_link__pin(struct bpf_link *link, const char *path)
10510{
10511 int err;
10512
10513 if (link->pin_path)
10514 return libbpf_err(-EBUSY);
10515 err = make_parent_dir(path);
10516 if (err)
10517 return libbpf_err(err);
10518 err = check_path(path);
10519 if (err)
10520 return libbpf_err(err);
10521
10522 link->pin_path = strdup(path);
10523 if (!link->pin_path)
10524 return libbpf_err(-ENOMEM);
10525
10526 if (bpf_obj_pin(link->fd, link->pin_path)) {
10527 err = -errno;
10528 zfree(&link->pin_path);
10529 return libbpf_err(err);
10530 }
10531
10532 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10533 return 0;
10534}
10535
10536int bpf_link__unpin(struct bpf_link *link)
10537{
10538 int err;
10539
10540 if (!link->pin_path)
10541 return libbpf_err(-EINVAL);
10542
10543 err = unlink(link->pin_path);
10544 if (err != 0)
10545 return -errno;
10546
10547 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10548 zfree(&link->pin_path);
10549 return 0;
10550}
10551
10552struct bpf_link_perf {
10553 struct bpf_link link;
10554 int perf_event_fd;
10555 /* legacy kprobe support: keep track of probe identifier and type */
10556 char *legacy_probe_name;
10557 bool legacy_is_kprobe;
10558 bool legacy_is_retprobe;
10559};
10560
10561static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
10562static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
10563
10564static int bpf_link_perf_detach(struct bpf_link *link)
10565{
10566 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10567 int err = 0;
10568
10569 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
10570 err = -errno;
10571
10572 if (perf_link->perf_event_fd != link->fd)
10573 close(perf_link->perf_event_fd);
10574 close(link->fd);
10575
10576 /* legacy uprobe/kprobe needs to be removed after perf event fd closure */
10577 if (perf_link->legacy_probe_name) {
10578 if (perf_link->legacy_is_kprobe) {
10579 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
10580 perf_link->legacy_is_retprobe);
10581 } else {
10582 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
10583 perf_link->legacy_is_retprobe);
10584 }
10585 }
10586
10587 return err;
10588}
10589
10590static void bpf_link_perf_dealloc(struct bpf_link *link)
10591{
10592 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10593
10594 free(perf_link->legacy_probe_name);
10595 free(perf_link);
10596}
10597
10598struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
10599 const struct bpf_perf_event_opts *opts)
10600{
10601 char errmsg[STRERR_BUFSIZE];
10602 struct bpf_link_perf *link;
10603 int prog_fd, link_fd = -1, err;
10604 bool force_ioctl_attach;
10605
10606 if (!OPTS_VALID(opts, bpf_perf_event_opts))
10607 return libbpf_err_ptr(-EINVAL);
10608
10609 if (pfd < 0) {
10610 pr_warn("prog '%s': invalid perf event FD %d\n",
10611 prog->name, pfd);
10612 return libbpf_err_ptr(-EINVAL);
10613 }
10614 prog_fd = bpf_program__fd(prog);
10615 if (prog_fd < 0) {
10616 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
10617 prog->name);
10618 return libbpf_err_ptr(-EINVAL);
10619 }
10620
10621 link = calloc(1, sizeof(*link));
10622 if (!link)
10623 return libbpf_err_ptr(-ENOMEM);
10624 link->link.detach = &bpf_link_perf_detach;
10625 link->link.dealloc = &bpf_link_perf_dealloc;
10626 link->perf_event_fd = pfd;
10627
10628 force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
10629 if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
10630 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
10631 .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
10632
10633 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
10634 if (link_fd < 0) {
10635 err = -errno;
10636 pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
10637 prog->name, pfd,
10638 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10639 goto err_out;
10640 }
10641 link->link.fd = link_fd;
10642 } else {
10643 if (OPTS_GET(opts, bpf_cookie, 0)) {
10644 pr_warn("prog '%s': user context value is not supported\n", prog->name);
10645 err = -EOPNOTSUPP;
10646 goto err_out;
10647 }
10648
10649 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
10650 err = -errno;
10651 pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
10652 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10653 if (err == -EPROTO)
10654 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
10655 prog->name, pfd);
10656 goto err_out;
10657 }
10658 link->link.fd = pfd;
10659 }
10660 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10661 err = -errno;
10662 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
10663 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10664 goto err_out;
10665 }
10666
10667 return &link->link;
10668err_out:
10669 if (link_fd >= 0)
10670 close(link_fd);
10671 free(link);
10672 return libbpf_err_ptr(err);
10673}
10674
10675struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
10676{
10677 return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
10678}
10679
10680/*
10681 * this function is expected to parse integer in the range of [0, 2^31-1] from
10682 * given file using scanf format string fmt. If actual parsed value is
10683 * negative, the result might be indistinguishable from error
10684 */
10685static int parse_uint_from_file(const char *file, const char *fmt)
10686{
10687 char buf[STRERR_BUFSIZE];
10688 int err, ret;
10689 FILE *f;
10690
10691 f = fopen(file, "re");
10692 if (!f) {
10693 err = -errno;
10694 pr_debug("failed to open '%s': %s\n", file,
10695 libbpf_strerror_r(err, buf, sizeof(buf)));
10696 return err;
10697 }
10698 err = fscanf(f, fmt, &ret);
10699 if (err != 1) {
10700 err = err == EOF ? -EIO : -errno;
10701 pr_debug("failed to parse '%s': %s\n", file,
10702 libbpf_strerror_r(err, buf, sizeof(buf)));
10703 fclose(f);
10704 return err;
10705 }
10706 fclose(f);
10707 return ret;
10708}
10709
10710static int determine_kprobe_perf_type(void)
10711{
10712 const char *file = "/sys/bus/event_source/devices/kprobe/type";
10713
10714 return parse_uint_from_file(file, "%d\n");
10715}
10716
10717static int determine_uprobe_perf_type(void)
10718{
10719 const char *file = "/sys/bus/event_source/devices/uprobe/type";
10720
10721 return parse_uint_from_file(file, "%d\n");
10722}
10723
10724static int determine_kprobe_retprobe_bit(void)
10725{
10726 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10727
10728 return parse_uint_from_file(file, "config:%d\n");
10729}
10730
10731static int determine_uprobe_retprobe_bit(void)
10732{
10733 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10734
10735 return parse_uint_from_file(file, "config:%d\n");
10736}
10737
10738#define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
10739#define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
10740
10741static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
10742 uint64_t offset, int pid, size_t ref_ctr_off)
10743{
10744 const size_t attr_sz = sizeof(struct perf_event_attr);
10745 struct perf_event_attr attr;
10746 char errmsg[STRERR_BUFSIZE];
10747 int type, pfd;
10748
10749 if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
10750 return -EINVAL;
10751
10752 memset(&attr, 0, attr_sz);
10753
10754 type = uprobe ? determine_uprobe_perf_type()
10755 : determine_kprobe_perf_type();
10756 if (type < 0) {
10757 pr_warn("failed to determine %s perf type: %s\n",
10758 uprobe ? "uprobe" : "kprobe",
10759 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10760 return type;
10761 }
10762 if (retprobe) {
10763 int bit = uprobe ? determine_uprobe_retprobe_bit()
10764 : determine_kprobe_retprobe_bit();
10765
10766 if (bit < 0) {
10767 pr_warn("failed to determine %s retprobe bit: %s\n",
10768 uprobe ? "uprobe" : "kprobe",
10769 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
10770 return bit;
10771 }
10772 attr.config |= 1 << bit;
10773 }
10774 attr.size = attr_sz;
10775 attr.type = type;
10776 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
10777 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
10778 attr.config2 = offset; /* kprobe_addr or probe_offset */
10779
10780 /* pid filter is meaningful only for uprobes */
10781 pfd = syscall(__NR_perf_event_open, &attr,
10782 pid < 0 ? -1 : pid /* pid */,
10783 pid == -1 ? 0 : -1 /* cpu */,
10784 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10785 return pfd >= 0 ? pfd : -errno;
10786}
10787
10788static int append_to_file(const char *file, const char *fmt, ...)
10789{
10790 int fd, n, err = 0;
10791 va_list ap;
10792 char buf[1024];
10793
10794 va_start(ap, fmt);
10795 n = vsnprintf(buf, sizeof(buf), fmt, ap);
10796 va_end(ap);
10797
10798 if (n < 0 || n >= sizeof(buf))
10799 return -EINVAL;
10800
10801 fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
10802 if (fd < 0)
10803 return -errno;
10804
10805 if (write(fd, buf, n) < 0)
10806 err = -errno;
10807
10808 close(fd);
10809 return err;
10810}
10811
10812#define DEBUGFS "/sys/kernel/debug/tracing"
10813#define TRACEFS "/sys/kernel/tracing"
10814
10815static bool use_debugfs(void)
10816{
10817 static int has_debugfs = -1;
10818
10819 if (has_debugfs < 0)
10820 has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
10821
10822 return has_debugfs == 1;
10823}
10824
10825static const char *tracefs_path(void)
10826{
10827 return use_debugfs() ? DEBUGFS : TRACEFS;
10828}
10829
10830static const char *tracefs_kprobe_events(void)
10831{
10832 return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
10833}
10834
10835static const char *tracefs_uprobe_events(void)
10836{
10837 return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
10838}
10839
10840static const char *tracefs_available_filter_functions(void)
10841{
10842 return use_debugfs() ? DEBUGFS"/available_filter_functions"
10843 : TRACEFS"/available_filter_functions";
10844}
10845
10846static const char *tracefs_available_filter_functions_addrs(void)
10847{
10848 return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs"
10849 : TRACEFS"/available_filter_functions_addrs";
10850}
10851
10852static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
10853 const char *kfunc_name, size_t offset)
10854{
10855 static int index = 0;
10856 int i;
10857
10858 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
10859 __sync_fetch_and_add(&index, 1));
10860
10861 /* sanitize binary_path in the probe name */
10862 for (i = 0; buf[i]; i++) {
10863 if (!isalnum(buf[i]))
10864 buf[i] = '_';
10865 }
10866}
10867
10868static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
10869 const char *kfunc_name, size_t offset)
10870{
10871 return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
10872 retprobe ? 'r' : 'p',
10873 retprobe ? "kretprobes" : "kprobes",
10874 probe_name, kfunc_name, offset);
10875}
10876
10877static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
10878{
10879 return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
10880 retprobe ? "kretprobes" : "kprobes", probe_name);
10881}
10882
10883static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10884{
10885 char file[256];
10886
10887 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
10888 tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
10889
10890 return parse_uint_from_file(file, "%d\n");
10891}
10892
10893static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
10894 const char *kfunc_name, size_t offset, int pid)
10895{
10896 const size_t attr_sz = sizeof(struct perf_event_attr);
10897 struct perf_event_attr attr;
10898 char errmsg[STRERR_BUFSIZE];
10899 int type, pfd, err;
10900
10901 err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
10902 if (err < 0) {
10903 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
10904 kfunc_name, offset,
10905 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10906 return err;
10907 }
10908 type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
10909 if (type < 0) {
10910 err = type;
10911 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
10912 kfunc_name, offset,
10913 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10914 goto err_clean_legacy;
10915 }
10916
10917 memset(&attr, 0, attr_sz);
10918 attr.size = attr_sz;
10919 attr.config = type;
10920 attr.type = PERF_TYPE_TRACEPOINT;
10921
10922 pfd = syscall(__NR_perf_event_open, &attr,
10923 pid < 0 ? -1 : pid, /* pid */
10924 pid == -1 ? 0 : -1, /* cpu */
10925 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10926 if (pfd < 0) {
10927 err = -errno;
10928 pr_warn("legacy kprobe perf_event_open() failed: %s\n",
10929 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10930 goto err_clean_legacy;
10931 }
10932 return pfd;
10933
10934err_clean_legacy:
10935 /* Clear the newly added legacy kprobe_event */
10936 remove_kprobe_event_legacy(probe_name, retprobe);
10937 return err;
10938}
10939
10940static const char *arch_specific_syscall_pfx(void)
10941{
10942#if defined(__x86_64__)
10943 return "x64";
10944#elif defined(__i386__)
10945 return "ia32";
10946#elif defined(__s390x__)
10947 return "s390x";
10948#elif defined(__s390__)
10949 return "s390";
10950#elif defined(__arm__)
10951 return "arm";
10952#elif defined(__aarch64__)
10953 return "arm64";
10954#elif defined(__mips__)
10955 return "mips";
10956#elif defined(__riscv)
10957 return "riscv";
10958#elif defined(__powerpc__)
10959 return "powerpc";
10960#elif defined(__powerpc64__)
10961 return "powerpc64";
10962#else
10963 return NULL;
10964#endif
10965}
10966
10967int probe_kern_syscall_wrapper(int token_fd)
10968{
10969 char syscall_name[64];
10970 const char *ksys_pfx;
10971
10972 ksys_pfx = arch_specific_syscall_pfx();
10973 if (!ksys_pfx)
10974 return 0;
10975
10976 snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
10977
10978 if (determine_kprobe_perf_type() >= 0) {
10979 int pfd;
10980
10981 pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
10982 if (pfd >= 0)
10983 close(pfd);
10984
10985 return pfd >= 0 ? 1 : 0;
10986 } else { /* legacy mode */
10987 char probe_name[128];
10988
10989 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
10990 if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
10991 return 0;
10992
10993 (void)remove_kprobe_event_legacy(probe_name, false);
10994 return 1;
10995 }
10996}
10997
10998struct bpf_link *
10999bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
11000 const char *func_name,
11001 const struct bpf_kprobe_opts *opts)
11002{
11003 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11004 enum probe_attach_mode attach_mode;
11005 char errmsg[STRERR_BUFSIZE];
11006 char *legacy_probe = NULL;
11007 struct bpf_link *link;
11008 size_t offset;
11009 bool retprobe, legacy;
11010 int pfd, err;
11011
11012 if (!OPTS_VALID(opts, bpf_kprobe_opts))
11013 return libbpf_err_ptr(-EINVAL);
11014
11015 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
11016 retprobe = OPTS_GET(opts, retprobe, false);
11017 offset = OPTS_GET(opts, offset, 0);
11018 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11019
11020 legacy = determine_kprobe_perf_type() < 0;
11021 switch (attach_mode) {
11022 case PROBE_ATTACH_MODE_LEGACY:
11023 legacy = true;
11024 pe_opts.force_ioctl_attach = true;
11025 break;
11026 case PROBE_ATTACH_MODE_PERF:
11027 if (legacy)
11028 return libbpf_err_ptr(-ENOTSUP);
11029 pe_opts.force_ioctl_attach = true;
11030 break;
11031 case PROBE_ATTACH_MODE_LINK:
11032 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11033 return libbpf_err_ptr(-ENOTSUP);
11034 break;
11035 case PROBE_ATTACH_MODE_DEFAULT:
11036 break;
11037 default:
11038 return libbpf_err_ptr(-EINVAL);
11039 }
11040
11041 if (!legacy) {
11042 pfd = perf_event_open_probe(false /* uprobe */, retprobe,
11043 func_name, offset,
11044 -1 /* pid */, 0 /* ref_ctr_off */);
11045 } else {
11046 char probe_name[256];
11047
11048 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
11049 func_name, offset);
11050
11051 legacy_probe = strdup(probe_name);
11052 if (!legacy_probe)
11053 return libbpf_err_ptr(-ENOMEM);
11054
11055 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
11056 offset, -1 /* pid */);
11057 }
11058 if (pfd < 0) {
11059 err = -errno;
11060 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
11061 prog->name, retprobe ? "kretprobe" : "kprobe",
11062 func_name, offset,
11063 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11064 goto err_out;
11065 }
11066 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11067 err = libbpf_get_error(link);
11068 if (err) {
11069 close(pfd);
11070 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
11071 prog->name, retprobe ? "kretprobe" : "kprobe",
11072 func_name, offset,
11073 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11074 goto err_clean_legacy;
11075 }
11076 if (legacy) {
11077 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11078
11079 perf_link->legacy_probe_name = legacy_probe;
11080 perf_link->legacy_is_kprobe = true;
11081 perf_link->legacy_is_retprobe = retprobe;
11082 }
11083
11084 return link;
11085
11086err_clean_legacy:
11087 if (legacy)
11088 remove_kprobe_event_legacy(legacy_probe, retprobe);
11089err_out:
11090 free(legacy_probe);
11091 return libbpf_err_ptr(err);
11092}
11093
11094struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
11095 bool retprobe,
11096 const char *func_name)
11097{
11098 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
11099 .retprobe = retprobe,
11100 );
11101
11102 return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
11103}
11104
11105struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
11106 const char *syscall_name,
11107 const struct bpf_ksyscall_opts *opts)
11108{
11109 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
11110 char func_name[128];
11111
11112 if (!OPTS_VALID(opts, bpf_ksyscall_opts))
11113 return libbpf_err_ptr(-EINVAL);
11114
11115 if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
11116 /* arch_specific_syscall_pfx() should never return NULL here
11117 * because it is guarded by kernel_supports(). However, since
11118 * compiler does not know that we have an explicit conditional
11119 * as well.
11120 */
11121 snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
11122 arch_specific_syscall_pfx() ? : "", syscall_name);
11123 } else {
11124 snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
11125 }
11126
11127 kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
11128 kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11129
11130 return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
11131}
11132
11133/* Adapted from perf/util/string.c */
11134bool glob_match(const char *str, const char *pat)
11135{
11136 while (*str && *pat && *pat != '*') {
11137 if (*pat == '?') { /* Matches any single character */
11138 str++;
11139 pat++;
11140 continue;
11141 }
11142 if (*str != *pat)
11143 return false;
11144 str++;
11145 pat++;
11146 }
11147 /* Check wild card */
11148 if (*pat == '*') {
11149 while (*pat == '*')
11150 pat++;
11151 if (!*pat) /* Tail wild card matches all */
11152 return true;
11153 while (*str)
11154 if (glob_match(str++, pat))
11155 return true;
11156 }
11157 return !*str && !*pat;
11158}
11159
11160struct kprobe_multi_resolve {
11161 const char *pattern;
11162 unsigned long *addrs;
11163 size_t cap;
11164 size_t cnt;
11165};
11166
11167struct avail_kallsyms_data {
11168 char **syms;
11169 size_t cnt;
11170 struct kprobe_multi_resolve *res;
11171};
11172
11173static int avail_func_cmp(const void *a, const void *b)
11174{
11175 return strcmp(*(const char **)a, *(const char **)b);
11176}
11177
11178static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type,
11179 const char *sym_name, void *ctx)
11180{
11181 struct avail_kallsyms_data *data = ctx;
11182 struct kprobe_multi_resolve *res = data->res;
11183 int err;
11184
11185 if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
11186 return 0;
11187
11188 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
11189 if (err)
11190 return err;
11191
11192 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11193 return 0;
11194}
11195
11196static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res)
11197{
11198 const char *available_functions_file = tracefs_available_filter_functions();
11199 struct avail_kallsyms_data data;
11200 char sym_name[500];
11201 FILE *f;
11202 int err = 0, ret, i;
11203 char **syms = NULL;
11204 size_t cap = 0, cnt = 0;
11205
11206 f = fopen(available_functions_file, "re");
11207 if (!f) {
11208 err = -errno;
11209 pr_warn("failed to open %s: %d\n", available_functions_file, err);
11210 return err;
11211 }
11212
11213 while (true) {
11214 char *name;
11215
11216 ret = fscanf(f, "%499s%*[^\n]\n", sym_name);
11217 if (ret == EOF && feof(f))
11218 break;
11219
11220 if (ret != 1) {
11221 pr_warn("failed to parse available_filter_functions entry: %d\n", ret);
11222 err = -EINVAL;
11223 goto cleanup;
11224 }
11225
11226 if (!glob_match(sym_name, res->pattern))
11227 continue;
11228
11229 err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1);
11230 if (err)
11231 goto cleanup;
11232
11233 name = strdup(sym_name);
11234 if (!name) {
11235 err = -errno;
11236 goto cleanup;
11237 }
11238
11239 syms[cnt++] = name;
11240 }
11241
11242 /* no entries found, bail out */
11243 if (cnt == 0) {
11244 err = -ENOENT;
11245 goto cleanup;
11246 }
11247
11248 /* sort available functions */
11249 qsort(syms, cnt, sizeof(*syms), avail_func_cmp);
11250
11251 data.syms = syms;
11252 data.res = res;
11253 data.cnt = cnt;
11254 libbpf_kallsyms_parse(avail_kallsyms_cb, &data);
11255
11256 if (res->cnt == 0)
11257 err = -ENOENT;
11258
11259cleanup:
11260 for (i = 0; i < cnt; i++)
11261 free((char *)syms[i]);
11262 free(syms);
11263
11264 fclose(f);
11265 return err;
11266}
11267
11268static bool has_available_filter_functions_addrs(void)
11269{
11270 return access(tracefs_available_filter_functions_addrs(), R_OK) != -1;
11271}
11272
11273static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res)
11274{
11275 const char *available_path = tracefs_available_filter_functions_addrs();
11276 char sym_name[500];
11277 FILE *f;
11278 int ret, err = 0;
11279 unsigned long long sym_addr;
11280
11281 f = fopen(available_path, "re");
11282 if (!f) {
11283 err = -errno;
11284 pr_warn("failed to open %s: %d\n", available_path, err);
11285 return err;
11286 }
11287
11288 while (true) {
11289 ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name);
11290 if (ret == EOF && feof(f))
11291 break;
11292
11293 if (ret != 2) {
11294 pr_warn("failed to parse available_filter_functions_addrs entry: %d\n",
11295 ret);
11296 err = -EINVAL;
11297 goto cleanup;
11298 }
11299
11300 if (!glob_match(sym_name, res->pattern))
11301 continue;
11302
11303 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap,
11304 sizeof(*res->addrs), res->cnt + 1);
11305 if (err)
11306 goto cleanup;
11307
11308 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11309 }
11310
11311 if (res->cnt == 0)
11312 err = -ENOENT;
11313
11314cleanup:
11315 fclose(f);
11316 return err;
11317}
11318
11319struct bpf_link *
11320bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
11321 const char *pattern,
11322 const struct bpf_kprobe_multi_opts *opts)
11323{
11324 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11325 struct kprobe_multi_resolve res = {
11326 .pattern = pattern,
11327 };
11328 struct bpf_link *link = NULL;
11329 char errmsg[STRERR_BUFSIZE];
11330 const unsigned long *addrs;
11331 int err, link_fd, prog_fd;
11332 const __u64 *cookies;
11333 const char **syms;
11334 bool retprobe;
11335 size_t cnt;
11336
11337 if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
11338 return libbpf_err_ptr(-EINVAL);
11339
11340 syms = OPTS_GET(opts, syms, false);
11341 addrs = OPTS_GET(opts, addrs, false);
11342 cnt = OPTS_GET(opts, cnt, false);
11343 cookies = OPTS_GET(opts, cookies, false);
11344
11345 if (!pattern && !addrs && !syms)
11346 return libbpf_err_ptr(-EINVAL);
11347 if (pattern && (addrs || syms || cookies || cnt))
11348 return libbpf_err_ptr(-EINVAL);
11349 if (!pattern && !cnt)
11350 return libbpf_err_ptr(-EINVAL);
11351 if (addrs && syms)
11352 return libbpf_err_ptr(-EINVAL);
11353
11354 if (pattern) {
11355 if (has_available_filter_functions_addrs())
11356 err = libbpf_available_kprobes_parse(&res);
11357 else
11358 err = libbpf_available_kallsyms_parse(&res);
11359 if (err)
11360 goto error;
11361 addrs = res.addrs;
11362 cnt = res.cnt;
11363 }
11364
11365 retprobe = OPTS_GET(opts, retprobe, false);
11366
11367 lopts.kprobe_multi.syms = syms;
11368 lopts.kprobe_multi.addrs = addrs;
11369 lopts.kprobe_multi.cookies = cookies;
11370 lopts.kprobe_multi.cnt = cnt;
11371 lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
11372
11373 link = calloc(1, sizeof(*link));
11374 if (!link) {
11375 err = -ENOMEM;
11376 goto error;
11377 }
11378 link->detach = &bpf_link__detach_fd;
11379
11380 prog_fd = bpf_program__fd(prog);
11381 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
11382 if (link_fd < 0) {
11383 err = -errno;
11384 pr_warn("prog '%s': failed to attach: %s\n",
11385 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11386 goto error;
11387 }
11388 link->fd = link_fd;
11389 free(res.addrs);
11390 return link;
11391
11392error:
11393 free(link);
11394 free(res.addrs);
11395 return libbpf_err_ptr(err);
11396}
11397
11398static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11399{
11400 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
11401 unsigned long offset = 0;
11402 const char *func_name;
11403 char *func;
11404 int n;
11405
11406 *link = NULL;
11407
11408 /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
11409 if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
11410 return 0;
11411
11412 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
11413 if (opts.retprobe)
11414 func_name = prog->sec_name + sizeof("kretprobe/") - 1;
11415 else
11416 func_name = prog->sec_name + sizeof("kprobe/") - 1;
11417
11418 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
11419 if (n < 1) {
11420 pr_warn("kprobe name is invalid: %s\n", func_name);
11421 return -EINVAL;
11422 }
11423 if (opts.retprobe && offset != 0) {
11424 free(func);
11425 pr_warn("kretprobes do not support offset specification\n");
11426 return -EINVAL;
11427 }
11428
11429 opts.offset = offset;
11430 *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
11431 free(func);
11432 return libbpf_get_error(*link);
11433}
11434
11435static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11436{
11437 LIBBPF_OPTS(bpf_ksyscall_opts, opts);
11438 const char *syscall_name;
11439
11440 *link = NULL;
11441
11442 /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
11443 if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
11444 return 0;
11445
11446 opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
11447 if (opts.retprobe)
11448 syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
11449 else
11450 syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
11451
11452 *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
11453 return *link ? 0 : -errno;
11454}
11455
11456static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11457{
11458 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
11459 const char *spec;
11460 char *pattern;
11461 int n;
11462
11463 *link = NULL;
11464
11465 /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
11466 if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
11467 strcmp(prog->sec_name, "kretprobe.multi") == 0)
11468 return 0;
11469
11470 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
11471 if (opts.retprobe)
11472 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
11473 else
11474 spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
11475
11476 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11477 if (n < 1) {
11478 pr_warn("kprobe multi pattern is invalid: %s\n", spec);
11479 return -EINVAL;
11480 }
11481
11482 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
11483 free(pattern);
11484 return libbpf_get_error(*link);
11485}
11486
11487static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11488{
11489 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
11490 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
11491 int n, ret = -EINVAL;
11492
11493 *link = NULL;
11494
11495 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
11496 &probe_type, &binary_path, &func_name);
11497 switch (n) {
11498 case 1:
11499 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11500 ret = 0;
11501 break;
11502 case 3:
11503 opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
11504 *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
11505 ret = libbpf_get_error(*link);
11506 break;
11507 default:
11508 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11509 prog->sec_name);
11510 break;
11511 }
11512 free(probe_type);
11513 free(binary_path);
11514 free(func_name);
11515 return ret;
11516}
11517
11518static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
11519 const char *binary_path, uint64_t offset)
11520{
11521 int i;
11522
11523 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
11524
11525 /* sanitize binary_path in the probe name */
11526 for (i = 0; buf[i]; i++) {
11527 if (!isalnum(buf[i]))
11528 buf[i] = '_';
11529 }
11530}
11531
11532static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
11533 const char *binary_path, size_t offset)
11534{
11535 return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
11536 retprobe ? 'r' : 'p',
11537 retprobe ? "uretprobes" : "uprobes",
11538 probe_name, binary_path, offset);
11539}
11540
11541static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
11542{
11543 return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
11544 retprobe ? "uretprobes" : "uprobes", probe_name);
11545}
11546
11547static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
11548{
11549 char file[512];
11550
11551 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
11552 tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
11553
11554 return parse_uint_from_file(file, "%d\n");
11555}
11556
11557static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
11558 const char *binary_path, size_t offset, int pid)
11559{
11560 const size_t attr_sz = sizeof(struct perf_event_attr);
11561 struct perf_event_attr attr;
11562 int type, pfd, err;
11563
11564 err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
11565 if (err < 0) {
11566 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
11567 binary_path, (size_t)offset, err);
11568 return err;
11569 }
11570 type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
11571 if (type < 0) {
11572 err = type;
11573 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
11574 binary_path, offset, err);
11575 goto err_clean_legacy;
11576 }
11577
11578 memset(&attr, 0, attr_sz);
11579 attr.size = attr_sz;
11580 attr.config = type;
11581 attr.type = PERF_TYPE_TRACEPOINT;
11582
11583 pfd = syscall(__NR_perf_event_open, &attr,
11584 pid < 0 ? -1 : pid, /* pid */
11585 pid == -1 ? 0 : -1, /* cpu */
11586 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11587 if (pfd < 0) {
11588 err = -errno;
11589 pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
11590 goto err_clean_legacy;
11591 }
11592 return pfd;
11593
11594err_clean_legacy:
11595 /* Clear the newly added legacy uprobe_event */
11596 remove_uprobe_event_legacy(probe_name, retprobe);
11597 return err;
11598}
11599
11600/* Find offset of function name in archive specified by path. Currently
11601 * supported are .zip files that do not compress their contents, as used on
11602 * Android in the form of APKs, for example. "file_name" is the name of the ELF
11603 * file inside the archive. "func_name" matches symbol name or name@@LIB for
11604 * library functions.
11605 *
11606 * An overview of the APK format specifically provided here:
11607 * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
11608 */
11609static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
11610 const char *func_name)
11611{
11612 struct zip_archive *archive;
11613 struct zip_entry entry;
11614 long ret;
11615 Elf *elf;
11616
11617 archive = zip_archive_open(archive_path);
11618 if (IS_ERR(archive)) {
11619 ret = PTR_ERR(archive);
11620 pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
11621 return ret;
11622 }
11623
11624 ret = zip_archive_find_entry(archive, file_name, &entry);
11625 if (ret) {
11626 pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
11627 archive_path, ret);
11628 goto out;
11629 }
11630 pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
11631 (unsigned long)entry.data_offset);
11632
11633 if (entry.compression) {
11634 pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
11635 archive_path);
11636 ret = -LIBBPF_ERRNO__FORMAT;
11637 goto out;
11638 }
11639
11640 elf = elf_memory((void *)entry.data, entry.data_length);
11641 if (!elf) {
11642 pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
11643 elf_errmsg(-1));
11644 ret = -LIBBPF_ERRNO__LIBELF;
11645 goto out;
11646 }
11647
11648 ret = elf_find_func_offset(elf, file_name, func_name);
11649 if (ret > 0) {
11650 pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
11651 func_name, file_name, archive_path, entry.data_offset, ret,
11652 ret + entry.data_offset);
11653 ret += entry.data_offset;
11654 }
11655 elf_end(elf);
11656
11657out:
11658 zip_archive_close(archive);
11659 return ret;
11660}
11661
11662static const char *arch_specific_lib_paths(void)
11663{
11664 /*
11665 * Based on https://packages.debian.org/sid/libc6.
11666 *
11667 * Assume that the traced program is built for the same architecture
11668 * as libbpf, which should cover the vast majority of cases.
11669 */
11670#if defined(__x86_64__)
11671 return "/lib/x86_64-linux-gnu";
11672#elif defined(__i386__)
11673 return "/lib/i386-linux-gnu";
11674#elif defined(__s390x__)
11675 return "/lib/s390x-linux-gnu";
11676#elif defined(__s390__)
11677 return "/lib/s390-linux-gnu";
11678#elif defined(__arm__) && defined(__SOFTFP__)
11679 return "/lib/arm-linux-gnueabi";
11680#elif defined(__arm__) && !defined(__SOFTFP__)
11681 return "/lib/arm-linux-gnueabihf";
11682#elif defined(__aarch64__)
11683 return "/lib/aarch64-linux-gnu";
11684#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
11685 return "/lib/mips64el-linux-gnuabi64";
11686#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
11687 return "/lib/mipsel-linux-gnu";
11688#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
11689 return "/lib/powerpc64le-linux-gnu";
11690#elif defined(__sparc__) && defined(__arch64__)
11691 return "/lib/sparc64-linux-gnu";
11692#elif defined(__riscv) && __riscv_xlen == 64
11693 return "/lib/riscv64-linux-gnu";
11694#else
11695 return NULL;
11696#endif
11697}
11698
11699/* Get full path to program/shared library. */
11700static int resolve_full_path(const char *file, char *result, size_t result_sz)
11701{
11702 const char *search_paths[3] = {};
11703 int i, perm;
11704
11705 if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
11706 search_paths[0] = getenv("LD_LIBRARY_PATH");
11707 search_paths[1] = "/usr/lib64:/usr/lib";
11708 search_paths[2] = arch_specific_lib_paths();
11709 perm = R_OK;
11710 } else {
11711 search_paths[0] = getenv("PATH");
11712 search_paths[1] = "/usr/bin:/usr/sbin";
11713 perm = R_OK | X_OK;
11714 }
11715
11716 for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
11717 const char *s;
11718
11719 if (!search_paths[i])
11720 continue;
11721 for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
11722 char *next_path;
11723 int seg_len;
11724
11725 if (s[0] == ':')
11726 s++;
11727 next_path = strchr(s, ':');
11728 seg_len = next_path ? next_path - s : strlen(s);
11729 if (!seg_len)
11730 continue;
11731 snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
11732 /* ensure it has required permissions */
11733 if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
11734 continue;
11735 pr_debug("resolved '%s' to '%s'\n", file, result);
11736 return 0;
11737 }
11738 }
11739 return -ENOENT;
11740}
11741
11742struct bpf_link *
11743bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
11744 pid_t pid,
11745 const char *path,
11746 const char *func_pattern,
11747 const struct bpf_uprobe_multi_opts *opts)
11748{
11749 const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
11750 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11751 unsigned long *resolved_offsets = NULL;
11752 int err = 0, link_fd, prog_fd;
11753 struct bpf_link *link = NULL;
11754 char errmsg[STRERR_BUFSIZE];
11755 char full_path[PATH_MAX];
11756 const __u64 *cookies;
11757 const char **syms;
11758 size_t cnt;
11759
11760 if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
11761 return libbpf_err_ptr(-EINVAL);
11762
11763 syms = OPTS_GET(opts, syms, NULL);
11764 offsets = OPTS_GET(opts, offsets, NULL);
11765 ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
11766 cookies = OPTS_GET(opts, cookies, NULL);
11767 cnt = OPTS_GET(opts, cnt, 0);
11768
11769 /*
11770 * User can specify 2 mutually exclusive set of inputs:
11771 *
11772 * 1) use only path/func_pattern/pid arguments
11773 *
11774 * 2) use path/pid with allowed combinations of:
11775 * syms/offsets/ref_ctr_offsets/cookies/cnt
11776 *
11777 * - syms and offsets are mutually exclusive
11778 * - ref_ctr_offsets and cookies are optional
11779 *
11780 * Any other usage results in error.
11781 */
11782
11783 if (!path)
11784 return libbpf_err_ptr(-EINVAL);
11785 if (!func_pattern && cnt == 0)
11786 return libbpf_err_ptr(-EINVAL);
11787
11788 if (func_pattern) {
11789 if (syms || offsets || ref_ctr_offsets || cookies || cnt)
11790 return libbpf_err_ptr(-EINVAL);
11791 } else {
11792 if (!!syms == !!offsets)
11793 return libbpf_err_ptr(-EINVAL);
11794 }
11795
11796 if (func_pattern) {
11797 if (!strchr(path, '/')) {
11798 err = resolve_full_path(path, full_path, sizeof(full_path));
11799 if (err) {
11800 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11801 prog->name, path, err);
11802 return libbpf_err_ptr(err);
11803 }
11804 path = full_path;
11805 }
11806
11807 err = elf_resolve_pattern_offsets(path, func_pattern,
11808 &resolved_offsets, &cnt);
11809 if (err < 0)
11810 return libbpf_err_ptr(err);
11811 offsets = resolved_offsets;
11812 } else if (syms) {
11813 err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC);
11814 if (err < 0)
11815 return libbpf_err_ptr(err);
11816 offsets = resolved_offsets;
11817 }
11818
11819 lopts.uprobe_multi.path = path;
11820 lopts.uprobe_multi.offsets = offsets;
11821 lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
11822 lopts.uprobe_multi.cookies = cookies;
11823 lopts.uprobe_multi.cnt = cnt;
11824 lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0;
11825
11826 if (pid == 0)
11827 pid = getpid();
11828 if (pid > 0)
11829 lopts.uprobe_multi.pid = pid;
11830
11831 link = calloc(1, sizeof(*link));
11832 if (!link) {
11833 err = -ENOMEM;
11834 goto error;
11835 }
11836 link->detach = &bpf_link__detach_fd;
11837
11838 prog_fd = bpf_program__fd(prog);
11839 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
11840 if (link_fd < 0) {
11841 err = -errno;
11842 pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
11843 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11844 goto error;
11845 }
11846 link->fd = link_fd;
11847 free(resolved_offsets);
11848 return link;
11849
11850error:
11851 free(resolved_offsets);
11852 free(link);
11853 return libbpf_err_ptr(err);
11854}
11855
11856LIBBPF_API struct bpf_link *
11857bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
11858 const char *binary_path, size_t func_offset,
11859 const struct bpf_uprobe_opts *opts)
11860{
11861 const char *archive_path = NULL, *archive_sep = NULL;
11862 char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
11863 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11864 enum probe_attach_mode attach_mode;
11865 char full_path[PATH_MAX];
11866 struct bpf_link *link;
11867 size_t ref_ctr_off;
11868 int pfd, err;
11869 bool retprobe, legacy;
11870 const char *func_name;
11871
11872 if (!OPTS_VALID(opts, bpf_uprobe_opts))
11873 return libbpf_err_ptr(-EINVAL);
11874
11875 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
11876 retprobe = OPTS_GET(opts, retprobe, false);
11877 ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
11878 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11879
11880 if (!binary_path)
11881 return libbpf_err_ptr(-EINVAL);
11882
11883 /* Check if "binary_path" refers to an archive. */
11884 archive_sep = strstr(binary_path, "!/");
11885 if (archive_sep) {
11886 full_path[0] = '\0';
11887 libbpf_strlcpy(full_path, binary_path,
11888 min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
11889 archive_path = full_path;
11890 binary_path = archive_sep + 2;
11891 } else if (!strchr(binary_path, '/')) {
11892 err = resolve_full_path(binary_path, full_path, sizeof(full_path));
11893 if (err) {
11894 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11895 prog->name, binary_path, err);
11896 return libbpf_err_ptr(err);
11897 }
11898 binary_path = full_path;
11899 }
11900 func_name = OPTS_GET(opts, func_name, NULL);
11901 if (func_name) {
11902 long sym_off;
11903
11904 if (archive_path) {
11905 sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
11906 func_name);
11907 binary_path = archive_path;
11908 } else {
11909 sym_off = elf_find_func_offset_from_file(binary_path, func_name);
11910 }
11911 if (sym_off < 0)
11912 return libbpf_err_ptr(sym_off);
11913 func_offset += sym_off;
11914 }
11915
11916 legacy = determine_uprobe_perf_type() < 0;
11917 switch (attach_mode) {
11918 case PROBE_ATTACH_MODE_LEGACY:
11919 legacy = true;
11920 pe_opts.force_ioctl_attach = true;
11921 break;
11922 case PROBE_ATTACH_MODE_PERF:
11923 if (legacy)
11924 return libbpf_err_ptr(-ENOTSUP);
11925 pe_opts.force_ioctl_attach = true;
11926 break;
11927 case PROBE_ATTACH_MODE_LINK:
11928 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11929 return libbpf_err_ptr(-ENOTSUP);
11930 break;
11931 case PROBE_ATTACH_MODE_DEFAULT:
11932 break;
11933 default:
11934 return libbpf_err_ptr(-EINVAL);
11935 }
11936
11937 if (!legacy) {
11938 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
11939 func_offset, pid, ref_ctr_off);
11940 } else {
11941 char probe_name[PATH_MAX + 64];
11942
11943 if (ref_ctr_off)
11944 return libbpf_err_ptr(-EINVAL);
11945
11946 gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
11947 binary_path, func_offset);
11948
11949 legacy_probe = strdup(probe_name);
11950 if (!legacy_probe)
11951 return libbpf_err_ptr(-ENOMEM);
11952
11953 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
11954 binary_path, func_offset, pid);
11955 }
11956 if (pfd < 0) {
11957 err = -errno;
11958 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
11959 prog->name, retprobe ? "uretprobe" : "uprobe",
11960 binary_path, func_offset,
11961 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11962 goto err_out;
11963 }
11964
11965 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11966 err = libbpf_get_error(link);
11967 if (err) {
11968 close(pfd);
11969 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
11970 prog->name, retprobe ? "uretprobe" : "uprobe",
11971 binary_path, func_offset,
11972 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11973 goto err_clean_legacy;
11974 }
11975 if (legacy) {
11976 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11977
11978 perf_link->legacy_probe_name = legacy_probe;
11979 perf_link->legacy_is_kprobe = false;
11980 perf_link->legacy_is_retprobe = retprobe;
11981 }
11982 return link;
11983
11984err_clean_legacy:
11985 if (legacy)
11986 remove_uprobe_event_legacy(legacy_probe, retprobe);
11987err_out:
11988 free(legacy_probe);
11989 return libbpf_err_ptr(err);
11990}
11991
11992/* Format of u[ret]probe section definition supporting auto-attach:
11993 * u[ret]probe/binary:function[+offset]
11994 *
11995 * binary can be an absolute/relative path or a filename; the latter is resolved to a
11996 * full binary path via bpf_program__attach_uprobe_opts.
11997 *
11998 * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
11999 * specified (and auto-attach is not possible) or the above format is specified for
12000 * auto-attach.
12001 */
12002static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12003{
12004 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
12005 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off;
12006 int n, c, ret = -EINVAL;
12007 long offset = 0;
12008
12009 *link = NULL;
12010
12011 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
12012 &probe_type, &binary_path, &func_name);
12013 switch (n) {
12014 case 1:
12015 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
12016 ret = 0;
12017 break;
12018 case 2:
12019 pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
12020 prog->name, prog->sec_name);
12021 break;
12022 case 3:
12023 /* check if user specifies `+offset`, if yes, this should be
12024 * the last part of the string, make sure sscanf read to EOL
12025 */
12026 func_off = strrchr(func_name, '+');
12027 if (func_off) {
12028 n = sscanf(func_off, "+%li%n", &offset, &c);
12029 if (n == 1 && *(func_off + c) == '\0')
12030 func_off[0] = '\0';
12031 else
12032 offset = 0;
12033 }
12034 opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
12035 strcmp(probe_type, "uretprobe.s") == 0;
12036 if (opts.retprobe && offset != 0) {
12037 pr_warn("prog '%s': uretprobes do not support offset specification\n",
12038 prog->name);
12039 break;
12040 }
12041 opts.func_name = func_name;
12042 *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
12043 ret = libbpf_get_error(*link);
12044 break;
12045 default:
12046 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
12047 prog->sec_name);
12048 break;
12049 }
12050 free(probe_type);
12051 free(binary_path);
12052 free(func_name);
12053
12054 return ret;
12055}
12056
12057struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
12058 bool retprobe, pid_t pid,
12059 const char *binary_path,
12060 size_t func_offset)
12061{
12062 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
12063
12064 return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
12065}
12066
12067struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
12068 pid_t pid, const char *binary_path,
12069 const char *usdt_provider, const char *usdt_name,
12070 const struct bpf_usdt_opts *opts)
12071{
12072 char resolved_path[512];
12073 struct bpf_object *obj = prog->obj;
12074 struct bpf_link *link;
12075 __u64 usdt_cookie;
12076 int err;
12077
12078 if (!OPTS_VALID(opts, bpf_uprobe_opts))
12079 return libbpf_err_ptr(-EINVAL);
12080
12081 if (bpf_program__fd(prog) < 0) {
12082 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
12083 prog->name);
12084 return libbpf_err_ptr(-EINVAL);
12085 }
12086
12087 if (!binary_path)
12088 return libbpf_err_ptr(-EINVAL);
12089
12090 if (!strchr(binary_path, '/')) {
12091 err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
12092 if (err) {
12093 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
12094 prog->name, binary_path, err);
12095 return libbpf_err_ptr(err);
12096 }
12097 binary_path = resolved_path;
12098 }
12099
12100 /* USDT manager is instantiated lazily on first USDT attach. It will
12101 * be destroyed together with BPF object in bpf_object__close().
12102 */
12103 if (IS_ERR(obj->usdt_man))
12104 return libbpf_ptr(obj->usdt_man);
12105 if (!obj->usdt_man) {
12106 obj->usdt_man = usdt_manager_new(obj);
12107 if (IS_ERR(obj->usdt_man))
12108 return libbpf_ptr(obj->usdt_man);
12109 }
12110
12111 usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
12112 link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
12113 usdt_provider, usdt_name, usdt_cookie);
12114 err = libbpf_get_error(link);
12115 if (err)
12116 return libbpf_err_ptr(err);
12117 return link;
12118}
12119
12120static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12121{
12122 char *path = NULL, *provider = NULL, *name = NULL;
12123 const char *sec_name;
12124 int n, err;
12125
12126 sec_name = bpf_program__section_name(prog);
12127 if (strcmp(sec_name, "usdt") == 0) {
12128 /* no auto-attach for just SEC("usdt") */
12129 *link = NULL;
12130 return 0;
12131 }
12132
12133 n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
12134 if (n != 3) {
12135 pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
12136 sec_name);
12137 err = -EINVAL;
12138 } else {
12139 *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
12140 provider, name, NULL);
12141 err = libbpf_get_error(*link);
12142 }
12143 free(path);
12144 free(provider);
12145 free(name);
12146 return err;
12147}
12148
12149static int determine_tracepoint_id(const char *tp_category,
12150 const char *tp_name)
12151{
12152 char file[PATH_MAX];
12153 int ret;
12154
12155 ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
12156 tracefs_path(), tp_category, tp_name);
12157 if (ret < 0)
12158 return -errno;
12159 if (ret >= sizeof(file)) {
12160 pr_debug("tracepoint %s/%s path is too long\n",
12161 tp_category, tp_name);
12162 return -E2BIG;
12163 }
12164 return parse_uint_from_file(file, "%d\n");
12165}
12166
12167static int perf_event_open_tracepoint(const char *tp_category,
12168 const char *tp_name)
12169{
12170 const size_t attr_sz = sizeof(struct perf_event_attr);
12171 struct perf_event_attr attr;
12172 char errmsg[STRERR_BUFSIZE];
12173 int tp_id, pfd, err;
12174
12175 tp_id = determine_tracepoint_id(tp_category, tp_name);
12176 if (tp_id < 0) {
12177 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
12178 tp_category, tp_name,
12179 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
12180 return tp_id;
12181 }
12182
12183 memset(&attr, 0, attr_sz);
12184 attr.type = PERF_TYPE_TRACEPOINT;
12185 attr.size = attr_sz;
12186 attr.config = tp_id;
12187
12188 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
12189 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
12190 if (pfd < 0) {
12191 err = -errno;
12192 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
12193 tp_category, tp_name,
12194 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12195 return err;
12196 }
12197 return pfd;
12198}
12199
12200struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
12201 const char *tp_category,
12202 const char *tp_name,
12203 const struct bpf_tracepoint_opts *opts)
12204{
12205 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
12206 char errmsg[STRERR_BUFSIZE];
12207 struct bpf_link *link;
12208 int pfd, err;
12209
12210 if (!OPTS_VALID(opts, bpf_tracepoint_opts))
12211 return libbpf_err_ptr(-EINVAL);
12212
12213 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
12214
12215 pfd = perf_event_open_tracepoint(tp_category, tp_name);
12216 if (pfd < 0) {
12217 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
12218 prog->name, tp_category, tp_name,
12219 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12220 return libbpf_err_ptr(pfd);
12221 }
12222 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
12223 err = libbpf_get_error(link);
12224 if (err) {
12225 close(pfd);
12226 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
12227 prog->name, tp_category, tp_name,
12228 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12229 return libbpf_err_ptr(err);
12230 }
12231 return link;
12232}
12233
12234struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
12235 const char *tp_category,
12236 const char *tp_name)
12237{
12238 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
12239}
12240
12241static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12242{
12243 char *sec_name, *tp_cat, *tp_name;
12244
12245 *link = NULL;
12246
12247 /* no auto-attach for SEC("tp") or SEC("tracepoint") */
12248 if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
12249 return 0;
12250
12251 sec_name = strdup(prog->sec_name);
12252 if (!sec_name)
12253 return -ENOMEM;
12254
12255 /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
12256 if (str_has_pfx(prog->sec_name, "tp/"))
12257 tp_cat = sec_name + sizeof("tp/") - 1;
12258 else
12259 tp_cat = sec_name + sizeof("tracepoint/") - 1;
12260 tp_name = strchr(tp_cat, '/');
12261 if (!tp_name) {
12262 free(sec_name);
12263 return -EINVAL;
12264 }
12265 *tp_name = '\0';
12266 tp_name++;
12267
12268 *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
12269 free(sec_name);
12270 return libbpf_get_error(*link);
12271}
12272
12273struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
12274 const char *tp_name)
12275{
12276 char errmsg[STRERR_BUFSIZE];
12277 struct bpf_link *link;
12278 int prog_fd, pfd;
12279
12280 prog_fd = bpf_program__fd(prog);
12281 if (prog_fd < 0) {
12282 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12283 return libbpf_err_ptr(-EINVAL);
12284 }
12285
12286 link = calloc(1, sizeof(*link));
12287 if (!link)
12288 return libbpf_err_ptr(-ENOMEM);
12289 link->detach = &bpf_link__detach_fd;
12290
12291 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
12292 if (pfd < 0) {
12293 pfd = -errno;
12294 free(link);
12295 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
12296 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12297 return libbpf_err_ptr(pfd);
12298 }
12299 link->fd = pfd;
12300 return link;
12301}
12302
12303static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12304{
12305 static const char *const prefixes[] = {
12306 "raw_tp",
12307 "raw_tracepoint",
12308 "raw_tp.w",
12309 "raw_tracepoint.w",
12310 };
12311 size_t i;
12312 const char *tp_name = NULL;
12313
12314 *link = NULL;
12315
12316 for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
12317 size_t pfx_len;
12318
12319 if (!str_has_pfx(prog->sec_name, prefixes[i]))
12320 continue;
12321
12322 pfx_len = strlen(prefixes[i]);
12323 /* no auto-attach case of, e.g., SEC("raw_tp") */
12324 if (prog->sec_name[pfx_len] == '\0')
12325 return 0;
12326
12327 if (prog->sec_name[pfx_len] != '/')
12328 continue;
12329
12330 tp_name = prog->sec_name + pfx_len + 1;
12331 break;
12332 }
12333
12334 if (!tp_name) {
12335 pr_warn("prog '%s': invalid section name '%s'\n",
12336 prog->name, prog->sec_name);
12337 return -EINVAL;
12338 }
12339
12340 *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
12341 return libbpf_get_error(*link);
12342}
12343
12344/* Common logic for all BPF program types that attach to a btf_id */
12345static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
12346 const struct bpf_trace_opts *opts)
12347{
12348 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
12349 char errmsg[STRERR_BUFSIZE];
12350 struct bpf_link *link;
12351 int prog_fd, pfd;
12352
12353 if (!OPTS_VALID(opts, bpf_trace_opts))
12354 return libbpf_err_ptr(-EINVAL);
12355
12356 prog_fd = bpf_program__fd(prog);
12357 if (prog_fd < 0) {
12358 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12359 return libbpf_err_ptr(-EINVAL);
12360 }
12361
12362 link = calloc(1, sizeof(*link));
12363 if (!link)
12364 return libbpf_err_ptr(-ENOMEM);
12365 link->detach = &bpf_link__detach_fd;
12366
12367 /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
12368 link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
12369 pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
12370 if (pfd < 0) {
12371 pfd = -errno;
12372 free(link);
12373 pr_warn("prog '%s': failed to attach: %s\n",
12374 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12375 return libbpf_err_ptr(pfd);
12376 }
12377 link->fd = pfd;
12378 return link;
12379}
12380
12381struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
12382{
12383 return bpf_program__attach_btf_id(prog, NULL);
12384}
12385
12386struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
12387 const struct bpf_trace_opts *opts)
12388{
12389 return bpf_program__attach_btf_id(prog, opts);
12390}
12391
12392struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
12393{
12394 return bpf_program__attach_btf_id(prog, NULL);
12395}
12396
12397static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12398{
12399 *link = bpf_program__attach_trace(prog);
12400 return libbpf_get_error(*link);
12401}
12402
12403static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12404{
12405 *link = bpf_program__attach_lsm(prog);
12406 return libbpf_get_error(*link);
12407}
12408
12409static struct bpf_link *
12410bpf_program_attach_fd(const struct bpf_program *prog,
12411 int target_fd, const char *target_name,
12412 const struct bpf_link_create_opts *opts)
12413{
12414 enum bpf_attach_type attach_type;
12415 char errmsg[STRERR_BUFSIZE];
12416 struct bpf_link *link;
12417 int prog_fd, link_fd;
12418
12419 prog_fd = bpf_program__fd(prog);
12420 if (prog_fd < 0) {
12421 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12422 return libbpf_err_ptr(-EINVAL);
12423 }
12424
12425 link = calloc(1, sizeof(*link));
12426 if (!link)
12427 return libbpf_err_ptr(-ENOMEM);
12428 link->detach = &bpf_link__detach_fd;
12429
12430 attach_type = bpf_program__expected_attach_type(prog);
12431 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts);
12432 if (link_fd < 0) {
12433 link_fd = -errno;
12434 free(link);
12435 pr_warn("prog '%s': failed to attach to %s: %s\n",
12436 prog->name, target_name,
12437 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12438 return libbpf_err_ptr(link_fd);
12439 }
12440 link->fd = link_fd;
12441 return link;
12442}
12443
12444struct bpf_link *
12445bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
12446{
12447 return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL);
12448}
12449
12450struct bpf_link *
12451bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
12452{
12453 return bpf_program_attach_fd(prog, netns_fd, "netns", NULL);
12454}
12455
12456struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
12457{
12458 /* target_fd/target_ifindex use the same field in LINK_CREATE */
12459 return bpf_program_attach_fd(prog, ifindex, "xdp", NULL);
12460}
12461
12462struct bpf_link *
12463bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
12464 const struct bpf_tcx_opts *opts)
12465{
12466 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12467 __u32 relative_id;
12468 int relative_fd;
12469
12470 if (!OPTS_VALID(opts, bpf_tcx_opts))
12471 return libbpf_err_ptr(-EINVAL);
12472
12473 relative_id = OPTS_GET(opts, relative_id, 0);
12474 relative_fd = OPTS_GET(opts, relative_fd, 0);
12475
12476 /* validate we don't have unexpected combinations of non-zero fields */
12477 if (!ifindex) {
12478 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12479 prog->name);
12480 return libbpf_err_ptr(-EINVAL);
12481 }
12482 if (relative_fd && relative_id) {
12483 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12484 prog->name);
12485 return libbpf_err_ptr(-EINVAL);
12486 }
12487
12488 link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0);
12489 link_create_opts.tcx.relative_fd = relative_fd;
12490 link_create_opts.tcx.relative_id = relative_id;
12491 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12492
12493 /* target_fd/target_ifindex use the same field in LINK_CREATE */
12494 return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts);
12495}
12496
12497struct bpf_link *
12498bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
12499 const struct bpf_netkit_opts *opts)
12500{
12501 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12502 __u32 relative_id;
12503 int relative_fd;
12504
12505 if (!OPTS_VALID(opts, bpf_netkit_opts))
12506 return libbpf_err_ptr(-EINVAL);
12507
12508 relative_id = OPTS_GET(opts, relative_id, 0);
12509 relative_fd = OPTS_GET(opts, relative_fd, 0);
12510
12511 /* validate we don't have unexpected combinations of non-zero fields */
12512 if (!ifindex) {
12513 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12514 prog->name);
12515 return libbpf_err_ptr(-EINVAL);
12516 }
12517 if (relative_fd && relative_id) {
12518 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12519 prog->name);
12520 return libbpf_err_ptr(-EINVAL);
12521 }
12522
12523 link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0);
12524 link_create_opts.netkit.relative_fd = relative_fd;
12525 link_create_opts.netkit.relative_id = relative_id;
12526 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12527
12528 return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts);
12529}
12530
12531struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
12532 int target_fd,
12533 const char *attach_func_name)
12534{
12535 int btf_id;
12536
12537 if (!!target_fd != !!attach_func_name) {
12538 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
12539 prog->name);
12540 return libbpf_err_ptr(-EINVAL);
12541 }
12542
12543 if (prog->type != BPF_PROG_TYPE_EXT) {
12544 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
12545 prog->name);
12546 return libbpf_err_ptr(-EINVAL);
12547 }
12548
12549 if (target_fd) {
12550 LIBBPF_OPTS(bpf_link_create_opts, target_opts);
12551
12552 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
12553 if (btf_id < 0)
12554 return libbpf_err_ptr(btf_id);
12555
12556 target_opts.target_btf_id = btf_id;
12557
12558 return bpf_program_attach_fd(prog, target_fd, "freplace",
12559 &target_opts);
12560 } else {
12561 /* no target, so use raw_tracepoint_open for compatibility
12562 * with old kernels
12563 */
12564 return bpf_program__attach_trace(prog);
12565 }
12566}
12567
12568struct bpf_link *
12569bpf_program__attach_iter(const struct bpf_program *prog,
12570 const struct bpf_iter_attach_opts *opts)
12571{
12572 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12573 char errmsg[STRERR_BUFSIZE];
12574 struct bpf_link *link;
12575 int prog_fd, link_fd;
12576 __u32 target_fd = 0;
12577
12578 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
12579 return libbpf_err_ptr(-EINVAL);
12580
12581 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
12582 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
12583
12584 prog_fd = bpf_program__fd(prog);
12585 if (prog_fd < 0) {
12586 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12587 return libbpf_err_ptr(-EINVAL);
12588 }
12589
12590 link = calloc(1, sizeof(*link));
12591 if (!link)
12592 return libbpf_err_ptr(-ENOMEM);
12593 link->detach = &bpf_link__detach_fd;
12594
12595 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
12596 &link_create_opts);
12597 if (link_fd < 0) {
12598 link_fd = -errno;
12599 free(link);
12600 pr_warn("prog '%s': failed to attach to iterator: %s\n",
12601 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12602 return libbpf_err_ptr(link_fd);
12603 }
12604 link->fd = link_fd;
12605 return link;
12606}
12607
12608static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12609{
12610 *link = bpf_program__attach_iter(prog, NULL);
12611 return libbpf_get_error(*link);
12612}
12613
12614struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog,
12615 const struct bpf_netfilter_opts *opts)
12616{
12617 LIBBPF_OPTS(bpf_link_create_opts, lopts);
12618 struct bpf_link *link;
12619 int prog_fd, link_fd;
12620
12621 if (!OPTS_VALID(opts, bpf_netfilter_opts))
12622 return libbpf_err_ptr(-EINVAL);
12623
12624 prog_fd = bpf_program__fd(prog);
12625 if (prog_fd < 0) {
12626 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12627 return libbpf_err_ptr(-EINVAL);
12628 }
12629
12630 link = calloc(1, sizeof(*link));
12631 if (!link)
12632 return libbpf_err_ptr(-ENOMEM);
12633
12634 link->detach = &bpf_link__detach_fd;
12635
12636 lopts.netfilter.pf = OPTS_GET(opts, pf, 0);
12637 lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0);
12638 lopts.netfilter.priority = OPTS_GET(opts, priority, 0);
12639 lopts.netfilter.flags = OPTS_GET(opts, flags, 0);
12640
12641 link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts);
12642 if (link_fd < 0) {
12643 char errmsg[STRERR_BUFSIZE];
12644
12645 link_fd = -errno;
12646 free(link);
12647 pr_warn("prog '%s': failed to attach to netfilter: %s\n",
12648 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12649 return libbpf_err_ptr(link_fd);
12650 }
12651 link->fd = link_fd;
12652
12653 return link;
12654}
12655
12656struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
12657{
12658 struct bpf_link *link = NULL;
12659 int err;
12660
12661 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
12662 return libbpf_err_ptr(-EOPNOTSUPP);
12663
12664 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
12665 if (err)
12666 return libbpf_err_ptr(err);
12667
12668 /* When calling bpf_program__attach() explicitly, auto-attach support
12669 * is expected to work, so NULL returned link is considered an error.
12670 * This is different for skeleton's attach, see comment in
12671 * bpf_object__attach_skeleton().
12672 */
12673 if (!link)
12674 return libbpf_err_ptr(-EOPNOTSUPP);
12675
12676 return link;
12677}
12678
12679struct bpf_link_struct_ops {
12680 struct bpf_link link;
12681 int map_fd;
12682};
12683
12684static int bpf_link__detach_struct_ops(struct bpf_link *link)
12685{
12686 struct bpf_link_struct_ops *st_link;
12687 __u32 zero = 0;
12688
12689 st_link = container_of(link, struct bpf_link_struct_ops, link);
12690
12691 if (st_link->map_fd < 0)
12692 /* w/o a real link */
12693 return bpf_map_delete_elem(link->fd, &zero);
12694
12695 return close(link->fd);
12696}
12697
12698struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
12699{
12700 struct bpf_link_struct_ops *link;
12701 __u32 zero = 0;
12702 int err, fd;
12703
12704 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
12705 return libbpf_err_ptr(-EINVAL);
12706
12707 link = calloc(1, sizeof(*link));
12708 if (!link)
12709 return libbpf_err_ptr(-EINVAL);
12710
12711 /* kern_vdata should be prepared during the loading phase. */
12712 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12713 /* It can be EBUSY if the map has been used to create or
12714 * update a link before. We don't allow updating the value of
12715 * a struct_ops once it is set. That ensures that the value
12716 * never changed. So, it is safe to skip EBUSY.
12717 */
12718 if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
12719 free(link);
12720 return libbpf_err_ptr(err);
12721 }
12722
12723 link->link.detach = bpf_link__detach_struct_ops;
12724
12725 if (!(map->def.map_flags & BPF_F_LINK)) {
12726 /* w/o a real link */
12727 link->link.fd = map->fd;
12728 link->map_fd = -1;
12729 return &link->link;
12730 }
12731
12732 fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
12733 if (fd < 0) {
12734 free(link);
12735 return libbpf_err_ptr(fd);
12736 }
12737
12738 link->link.fd = fd;
12739 link->map_fd = map->fd;
12740
12741 return &link->link;
12742}
12743
12744/*
12745 * Swap the back struct_ops of a link with a new struct_ops map.
12746 */
12747int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
12748{
12749 struct bpf_link_struct_ops *st_ops_link;
12750 __u32 zero = 0;
12751 int err;
12752
12753 if (!bpf_map__is_struct_ops(map) || !map_is_created(map))
12754 return -EINVAL;
12755
12756 st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
12757 /* Ensure the type of a link is correct */
12758 if (st_ops_link->map_fd < 0)
12759 return -EINVAL;
12760
12761 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12762 /* It can be EBUSY if the map has been used to create or
12763 * update a link before. We don't allow updating the value of
12764 * a struct_ops once it is set. That ensures that the value
12765 * never changed. So, it is safe to skip EBUSY.
12766 */
12767 if (err && err != -EBUSY)
12768 return err;
12769
12770 err = bpf_link_update(link->fd, map->fd, NULL);
12771 if (err < 0)
12772 return err;
12773
12774 st_ops_link->map_fd = map->fd;
12775
12776 return 0;
12777}
12778
12779typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
12780 void *private_data);
12781
12782static enum bpf_perf_event_ret
12783perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
12784 void **copy_mem, size_t *copy_size,
12785 bpf_perf_event_print_t fn, void *private_data)
12786{
12787 struct perf_event_mmap_page *header = mmap_mem;
12788 __u64 data_head = ring_buffer_read_head(header);
12789 __u64 data_tail = header->data_tail;
12790 void *base = ((__u8 *)header) + page_size;
12791 int ret = LIBBPF_PERF_EVENT_CONT;
12792 struct perf_event_header *ehdr;
12793 size_t ehdr_size;
12794
12795 while (data_head != data_tail) {
12796 ehdr = base + (data_tail & (mmap_size - 1));
12797 ehdr_size = ehdr->size;
12798
12799 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
12800 void *copy_start = ehdr;
12801 size_t len_first = base + mmap_size - copy_start;
12802 size_t len_secnd = ehdr_size - len_first;
12803
12804 if (*copy_size < ehdr_size) {
12805 free(*copy_mem);
12806 *copy_mem = malloc(ehdr_size);
12807 if (!*copy_mem) {
12808 *copy_size = 0;
12809 ret = LIBBPF_PERF_EVENT_ERROR;
12810 break;
12811 }
12812 *copy_size = ehdr_size;
12813 }
12814
12815 memcpy(*copy_mem, copy_start, len_first);
12816 memcpy(*copy_mem + len_first, base, len_secnd);
12817 ehdr = *copy_mem;
12818 }
12819
12820 ret = fn(ehdr, private_data);
12821 data_tail += ehdr_size;
12822 if (ret != LIBBPF_PERF_EVENT_CONT)
12823 break;
12824 }
12825
12826 ring_buffer_write_tail(header, data_tail);
12827 return libbpf_err(ret);
12828}
12829
12830struct perf_buffer;
12831
12832struct perf_buffer_params {
12833 struct perf_event_attr *attr;
12834 /* if event_cb is specified, it takes precendence */
12835 perf_buffer_event_fn event_cb;
12836 /* sample_cb and lost_cb are higher-level common-case callbacks */
12837 perf_buffer_sample_fn sample_cb;
12838 perf_buffer_lost_fn lost_cb;
12839 void *ctx;
12840 int cpu_cnt;
12841 int *cpus;
12842 int *map_keys;
12843};
12844
12845struct perf_cpu_buf {
12846 struct perf_buffer *pb;
12847 void *base; /* mmap()'ed memory */
12848 void *buf; /* for reconstructing segmented data */
12849 size_t buf_size;
12850 int fd;
12851 int cpu;
12852 int map_key;
12853};
12854
12855struct perf_buffer {
12856 perf_buffer_event_fn event_cb;
12857 perf_buffer_sample_fn sample_cb;
12858 perf_buffer_lost_fn lost_cb;
12859 void *ctx; /* passed into callbacks */
12860
12861 size_t page_size;
12862 size_t mmap_size;
12863 struct perf_cpu_buf **cpu_bufs;
12864 struct epoll_event *events;
12865 int cpu_cnt; /* number of allocated CPU buffers */
12866 int epoll_fd; /* perf event FD */
12867 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
12868};
12869
12870static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
12871 struct perf_cpu_buf *cpu_buf)
12872{
12873 if (!cpu_buf)
12874 return;
12875 if (cpu_buf->base &&
12876 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
12877 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
12878 if (cpu_buf->fd >= 0) {
12879 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
12880 close(cpu_buf->fd);
12881 }
12882 free(cpu_buf->buf);
12883 free(cpu_buf);
12884}
12885
12886void perf_buffer__free(struct perf_buffer *pb)
12887{
12888 int i;
12889
12890 if (IS_ERR_OR_NULL(pb))
12891 return;
12892 if (pb->cpu_bufs) {
12893 for (i = 0; i < pb->cpu_cnt; i++) {
12894 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
12895
12896 if (!cpu_buf)
12897 continue;
12898
12899 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
12900 perf_buffer__free_cpu_buf(pb, cpu_buf);
12901 }
12902 free(pb->cpu_bufs);
12903 }
12904 if (pb->epoll_fd >= 0)
12905 close(pb->epoll_fd);
12906 free(pb->events);
12907 free(pb);
12908}
12909
12910static struct perf_cpu_buf *
12911perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
12912 int cpu, int map_key)
12913{
12914 struct perf_cpu_buf *cpu_buf;
12915 char msg[STRERR_BUFSIZE];
12916 int err;
12917
12918 cpu_buf = calloc(1, sizeof(*cpu_buf));
12919 if (!cpu_buf)
12920 return ERR_PTR(-ENOMEM);
12921
12922 cpu_buf->pb = pb;
12923 cpu_buf->cpu = cpu;
12924 cpu_buf->map_key = map_key;
12925
12926 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
12927 -1, PERF_FLAG_FD_CLOEXEC);
12928 if (cpu_buf->fd < 0) {
12929 err = -errno;
12930 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
12931 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12932 goto error;
12933 }
12934
12935 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
12936 PROT_READ | PROT_WRITE, MAP_SHARED,
12937 cpu_buf->fd, 0);
12938 if (cpu_buf->base == MAP_FAILED) {
12939 cpu_buf->base = NULL;
12940 err = -errno;
12941 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
12942 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12943 goto error;
12944 }
12945
12946 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
12947 err = -errno;
12948 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
12949 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12950 goto error;
12951 }
12952
12953 return cpu_buf;
12954
12955error:
12956 perf_buffer__free_cpu_buf(pb, cpu_buf);
12957 return (struct perf_cpu_buf *)ERR_PTR(err);
12958}
12959
12960static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
12961 struct perf_buffer_params *p);
12962
12963struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
12964 perf_buffer_sample_fn sample_cb,
12965 perf_buffer_lost_fn lost_cb,
12966 void *ctx,
12967 const struct perf_buffer_opts *opts)
12968{
12969 const size_t attr_sz = sizeof(struct perf_event_attr);
12970 struct perf_buffer_params p = {};
12971 struct perf_event_attr attr;
12972 __u32 sample_period;
12973
12974 if (!OPTS_VALID(opts, perf_buffer_opts))
12975 return libbpf_err_ptr(-EINVAL);
12976
12977 sample_period = OPTS_GET(opts, sample_period, 1);
12978 if (!sample_period)
12979 sample_period = 1;
12980
12981 memset(&attr, 0, attr_sz);
12982 attr.size = attr_sz;
12983 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
12984 attr.type = PERF_TYPE_SOFTWARE;
12985 attr.sample_type = PERF_SAMPLE_RAW;
12986 attr.sample_period = sample_period;
12987 attr.wakeup_events = sample_period;
12988
12989 p.attr = &attr;
12990 p.sample_cb = sample_cb;
12991 p.lost_cb = lost_cb;
12992 p.ctx = ctx;
12993
12994 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
12995}
12996
12997struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
12998 struct perf_event_attr *attr,
12999 perf_buffer_event_fn event_cb, void *ctx,
13000 const struct perf_buffer_raw_opts *opts)
13001{
13002 struct perf_buffer_params p = {};
13003
13004 if (!attr)
13005 return libbpf_err_ptr(-EINVAL);
13006
13007 if (!OPTS_VALID(opts, perf_buffer_raw_opts))
13008 return libbpf_err_ptr(-EINVAL);
13009
13010 p.attr = attr;
13011 p.event_cb = event_cb;
13012 p.ctx = ctx;
13013 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
13014 p.cpus = OPTS_GET(opts, cpus, NULL);
13015 p.map_keys = OPTS_GET(opts, map_keys, NULL);
13016
13017 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
13018}
13019
13020static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13021 struct perf_buffer_params *p)
13022{
13023 const char *online_cpus_file = "/sys/devices/system/cpu/online";
13024 struct bpf_map_info map;
13025 char msg[STRERR_BUFSIZE];
13026 struct perf_buffer *pb;
13027 bool *online = NULL;
13028 __u32 map_info_len;
13029 int err, i, j, n;
13030
13031 if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
13032 pr_warn("page count should be power of two, but is %zu\n",
13033 page_cnt);
13034 return ERR_PTR(-EINVAL);
13035 }
13036
13037 /* best-effort sanity checks */
13038 memset(&map, 0, sizeof(map));
13039 map_info_len = sizeof(map);
13040 err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
13041 if (err) {
13042 err = -errno;
13043 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
13044 * -EBADFD, -EFAULT, or -E2BIG on real error
13045 */
13046 if (err != -EINVAL) {
13047 pr_warn("failed to get map info for map FD %d: %s\n",
13048 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
13049 return ERR_PTR(err);
13050 }
13051 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
13052 map_fd);
13053 } else {
13054 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
13055 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
13056 map.name);
13057 return ERR_PTR(-EINVAL);
13058 }
13059 }
13060
13061 pb = calloc(1, sizeof(*pb));
13062 if (!pb)
13063 return ERR_PTR(-ENOMEM);
13064
13065 pb->event_cb = p->event_cb;
13066 pb->sample_cb = p->sample_cb;
13067 pb->lost_cb = p->lost_cb;
13068 pb->ctx = p->ctx;
13069
13070 pb->page_size = getpagesize();
13071 pb->mmap_size = pb->page_size * page_cnt;
13072 pb->map_fd = map_fd;
13073
13074 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
13075 if (pb->epoll_fd < 0) {
13076 err = -errno;
13077 pr_warn("failed to create epoll instance: %s\n",
13078 libbpf_strerror_r(err, msg, sizeof(msg)));
13079 goto error;
13080 }
13081
13082 if (p->cpu_cnt > 0) {
13083 pb->cpu_cnt = p->cpu_cnt;
13084 } else {
13085 pb->cpu_cnt = libbpf_num_possible_cpus();
13086 if (pb->cpu_cnt < 0) {
13087 err = pb->cpu_cnt;
13088 goto error;
13089 }
13090 if (map.max_entries && map.max_entries < pb->cpu_cnt)
13091 pb->cpu_cnt = map.max_entries;
13092 }
13093
13094 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
13095 if (!pb->events) {
13096 err = -ENOMEM;
13097 pr_warn("failed to allocate events: out of memory\n");
13098 goto error;
13099 }
13100 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
13101 if (!pb->cpu_bufs) {
13102 err = -ENOMEM;
13103 pr_warn("failed to allocate buffers: out of memory\n");
13104 goto error;
13105 }
13106
13107 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
13108 if (err) {
13109 pr_warn("failed to get online CPU mask: %d\n", err);
13110 goto error;
13111 }
13112
13113 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
13114 struct perf_cpu_buf *cpu_buf;
13115 int cpu, map_key;
13116
13117 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
13118 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
13119
13120 /* in case user didn't explicitly requested particular CPUs to
13121 * be attached to, skip offline/not present CPUs
13122 */
13123 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
13124 continue;
13125
13126 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
13127 if (IS_ERR(cpu_buf)) {
13128 err = PTR_ERR(cpu_buf);
13129 goto error;
13130 }
13131
13132 pb->cpu_bufs[j] = cpu_buf;
13133
13134 err = bpf_map_update_elem(pb->map_fd, &map_key,
13135 &cpu_buf->fd, 0);
13136 if (err) {
13137 err = -errno;
13138 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
13139 cpu, map_key, cpu_buf->fd,
13140 libbpf_strerror_r(err, msg, sizeof(msg)));
13141 goto error;
13142 }
13143
13144 pb->events[j].events = EPOLLIN;
13145 pb->events[j].data.ptr = cpu_buf;
13146 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
13147 &pb->events[j]) < 0) {
13148 err = -errno;
13149 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
13150 cpu, cpu_buf->fd,
13151 libbpf_strerror_r(err, msg, sizeof(msg)));
13152 goto error;
13153 }
13154 j++;
13155 }
13156 pb->cpu_cnt = j;
13157 free(online);
13158
13159 return pb;
13160
13161error:
13162 free(online);
13163 if (pb)
13164 perf_buffer__free(pb);
13165 return ERR_PTR(err);
13166}
13167
13168struct perf_sample_raw {
13169 struct perf_event_header header;
13170 uint32_t size;
13171 char data[];
13172};
13173
13174struct perf_sample_lost {
13175 struct perf_event_header header;
13176 uint64_t id;
13177 uint64_t lost;
13178 uint64_t sample_id;
13179};
13180
13181static enum bpf_perf_event_ret
13182perf_buffer__process_record(struct perf_event_header *e, void *ctx)
13183{
13184 struct perf_cpu_buf *cpu_buf = ctx;
13185 struct perf_buffer *pb = cpu_buf->pb;
13186 void *data = e;
13187
13188 /* user wants full control over parsing perf event */
13189 if (pb->event_cb)
13190 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
13191
13192 switch (e->type) {
13193 case PERF_RECORD_SAMPLE: {
13194 struct perf_sample_raw *s = data;
13195
13196 if (pb->sample_cb)
13197 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
13198 break;
13199 }
13200 case PERF_RECORD_LOST: {
13201 struct perf_sample_lost *s = data;
13202
13203 if (pb->lost_cb)
13204 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
13205 break;
13206 }
13207 default:
13208 pr_warn("unknown perf sample type %d\n", e->type);
13209 return LIBBPF_PERF_EVENT_ERROR;
13210 }
13211 return LIBBPF_PERF_EVENT_CONT;
13212}
13213
13214static int perf_buffer__process_records(struct perf_buffer *pb,
13215 struct perf_cpu_buf *cpu_buf)
13216{
13217 enum bpf_perf_event_ret ret;
13218
13219 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
13220 pb->page_size, &cpu_buf->buf,
13221 &cpu_buf->buf_size,
13222 perf_buffer__process_record, cpu_buf);
13223 if (ret != LIBBPF_PERF_EVENT_CONT)
13224 return ret;
13225 return 0;
13226}
13227
13228int perf_buffer__epoll_fd(const struct perf_buffer *pb)
13229{
13230 return pb->epoll_fd;
13231}
13232
13233int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
13234{
13235 int i, cnt, err;
13236
13237 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
13238 if (cnt < 0)
13239 return -errno;
13240
13241 for (i = 0; i < cnt; i++) {
13242 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
13243
13244 err = perf_buffer__process_records(pb, cpu_buf);
13245 if (err) {
13246 pr_warn("error while processing records: %d\n", err);
13247 return libbpf_err(err);
13248 }
13249 }
13250 return cnt;
13251}
13252
13253/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
13254 * manager.
13255 */
13256size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
13257{
13258 return pb->cpu_cnt;
13259}
13260
13261/*
13262 * Return perf_event FD of a ring buffer in *buf_idx* slot of
13263 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
13264 * select()/poll()/epoll() Linux syscalls.
13265 */
13266int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
13267{
13268 struct perf_cpu_buf *cpu_buf;
13269
13270 if (buf_idx >= pb->cpu_cnt)
13271 return libbpf_err(-EINVAL);
13272
13273 cpu_buf = pb->cpu_bufs[buf_idx];
13274 if (!cpu_buf)
13275 return libbpf_err(-ENOENT);
13276
13277 return cpu_buf->fd;
13278}
13279
13280int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
13281{
13282 struct perf_cpu_buf *cpu_buf;
13283
13284 if (buf_idx >= pb->cpu_cnt)
13285 return libbpf_err(-EINVAL);
13286
13287 cpu_buf = pb->cpu_bufs[buf_idx];
13288 if (!cpu_buf)
13289 return libbpf_err(-ENOENT);
13290
13291 *buf = cpu_buf->base;
13292 *buf_size = pb->mmap_size;
13293 return 0;
13294}
13295
13296/*
13297 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
13298 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
13299 * consume, do nothing and return success.
13300 * Returns:
13301 * - 0 on success;
13302 * - <0 on failure.
13303 */
13304int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
13305{
13306 struct perf_cpu_buf *cpu_buf;
13307
13308 if (buf_idx >= pb->cpu_cnt)
13309 return libbpf_err(-EINVAL);
13310
13311 cpu_buf = pb->cpu_bufs[buf_idx];
13312 if (!cpu_buf)
13313 return libbpf_err(-ENOENT);
13314
13315 return perf_buffer__process_records(pb, cpu_buf);
13316}
13317
13318int perf_buffer__consume(struct perf_buffer *pb)
13319{
13320 int i, err;
13321
13322 for (i = 0; i < pb->cpu_cnt; i++) {
13323 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13324
13325 if (!cpu_buf)
13326 continue;
13327
13328 err = perf_buffer__process_records(pb, cpu_buf);
13329 if (err) {
13330 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
13331 return libbpf_err(err);
13332 }
13333 }
13334 return 0;
13335}
13336
13337int bpf_program__set_attach_target(struct bpf_program *prog,
13338 int attach_prog_fd,
13339 const char *attach_func_name)
13340{
13341 int btf_obj_fd = 0, btf_id = 0, err;
13342
13343 if (!prog || attach_prog_fd < 0)
13344 return libbpf_err(-EINVAL);
13345
13346 if (prog->obj->loaded)
13347 return libbpf_err(-EINVAL);
13348
13349 if (attach_prog_fd && !attach_func_name) {
13350 /* remember attach_prog_fd and let bpf_program__load() find
13351 * BTF ID during the program load
13352 */
13353 prog->attach_prog_fd = attach_prog_fd;
13354 return 0;
13355 }
13356
13357 if (attach_prog_fd) {
13358 btf_id = libbpf_find_prog_btf_id(attach_func_name,
13359 attach_prog_fd);
13360 if (btf_id < 0)
13361 return libbpf_err(btf_id);
13362 } else {
13363 if (!attach_func_name)
13364 return libbpf_err(-EINVAL);
13365
13366 /* load btf_vmlinux, if not yet */
13367 err = bpf_object__load_vmlinux_btf(prog->obj, true);
13368 if (err)
13369 return libbpf_err(err);
13370 err = find_kernel_btf_id(prog->obj, attach_func_name,
13371 prog->expected_attach_type,
13372 &btf_obj_fd, &btf_id);
13373 if (err)
13374 return libbpf_err(err);
13375 }
13376
13377 prog->attach_btf_id = btf_id;
13378 prog->attach_btf_obj_fd = btf_obj_fd;
13379 prog->attach_prog_fd = attach_prog_fd;
13380 return 0;
13381}
13382
13383int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
13384{
13385 int err = 0, n, len, start, end = -1;
13386 bool *tmp;
13387
13388 *mask = NULL;
13389 *mask_sz = 0;
13390
13391 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
13392 while (*s) {
13393 if (*s == ',' || *s == '\n') {
13394 s++;
13395 continue;
13396 }
13397 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
13398 if (n <= 0 || n > 2) {
13399 pr_warn("Failed to get CPU range %s: %d\n", s, n);
13400 err = -EINVAL;
13401 goto cleanup;
13402 } else if (n == 1) {
13403 end = start;
13404 }
13405 if (start < 0 || start > end) {
13406 pr_warn("Invalid CPU range [%d,%d] in %s\n",
13407 start, end, s);
13408 err = -EINVAL;
13409 goto cleanup;
13410 }
13411 tmp = realloc(*mask, end + 1);
13412 if (!tmp) {
13413 err = -ENOMEM;
13414 goto cleanup;
13415 }
13416 *mask = tmp;
13417 memset(tmp + *mask_sz, 0, start - *mask_sz);
13418 memset(tmp + start, 1, end - start + 1);
13419 *mask_sz = end + 1;
13420 s += len;
13421 }
13422 if (!*mask_sz) {
13423 pr_warn("Empty CPU range\n");
13424 return -EINVAL;
13425 }
13426 return 0;
13427cleanup:
13428 free(*mask);
13429 *mask = NULL;
13430 return err;
13431}
13432
13433int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
13434{
13435 int fd, err = 0, len;
13436 char buf[128];
13437
13438 fd = open(fcpu, O_RDONLY | O_CLOEXEC);
13439 if (fd < 0) {
13440 err = -errno;
13441 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
13442 return err;
13443 }
13444 len = read(fd, buf, sizeof(buf));
13445 close(fd);
13446 if (len <= 0) {
13447 err = len ? -errno : -EINVAL;
13448 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
13449 return err;
13450 }
13451 if (len >= sizeof(buf)) {
13452 pr_warn("CPU mask is too big in file %s\n", fcpu);
13453 return -E2BIG;
13454 }
13455 buf[len] = '\0';
13456
13457 return parse_cpu_mask_str(buf, mask, mask_sz);
13458}
13459
13460int libbpf_num_possible_cpus(void)
13461{
13462 static const char *fcpu = "/sys/devices/system/cpu/possible";
13463 static int cpus;
13464 int err, n, i, tmp_cpus;
13465 bool *mask;
13466
13467 tmp_cpus = READ_ONCE(cpus);
13468 if (tmp_cpus > 0)
13469 return tmp_cpus;
13470
13471 err = parse_cpu_mask_file(fcpu, &mask, &n);
13472 if (err)
13473 return libbpf_err(err);
13474
13475 tmp_cpus = 0;
13476 for (i = 0; i < n; i++) {
13477 if (mask[i])
13478 tmp_cpus++;
13479 }
13480 free(mask);
13481
13482 WRITE_ONCE(cpus, tmp_cpus);
13483 return tmp_cpus;
13484}
13485
13486static int populate_skeleton_maps(const struct bpf_object *obj,
13487 struct bpf_map_skeleton *maps,
13488 size_t map_cnt)
13489{
13490 int i;
13491
13492 for (i = 0; i < map_cnt; i++) {
13493 struct bpf_map **map = maps[i].map;
13494 const char *name = maps[i].name;
13495 void **mmaped = maps[i].mmaped;
13496
13497 *map = bpf_object__find_map_by_name(obj, name);
13498 if (!*map) {
13499 pr_warn("failed to find skeleton map '%s'\n", name);
13500 return -ESRCH;
13501 }
13502
13503 /* externs shouldn't be pre-setup from user code */
13504 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
13505 *mmaped = (*map)->mmaped;
13506 }
13507 return 0;
13508}
13509
13510static int populate_skeleton_progs(const struct bpf_object *obj,
13511 struct bpf_prog_skeleton *progs,
13512 size_t prog_cnt)
13513{
13514 int i;
13515
13516 for (i = 0; i < prog_cnt; i++) {
13517 struct bpf_program **prog = progs[i].prog;
13518 const char *name = progs[i].name;
13519
13520 *prog = bpf_object__find_program_by_name(obj, name);
13521 if (!*prog) {
13522 pr_warn("failed to find skeleton program '%s'\n", name);
13523 return -ESRCH;
13524 }
13525 }
13526 return 0;
13527}
13528
13529int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
13530 const struct bpf_object_open_opts *opts)
13531{
13532 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
13533 .object_name = s->name,
13534 );
13535 struct bpf_object *obj;
13536 int err;
13537
13538 /* Attempt to preserve opts->object_name, unless overriden by user
13539 * explicitly. Overwriting object name for skeletons is discouraged,
13540 * as it breaks global data maps, because they contain object name
13541 * prefix as their own map name prefix. When skeleton is generated,
13542 * bpftool is making an assumption that this name will stay the same.
13543 */
13544 if (opts) {
13545 memcpy(&skel_opts, opts, sizeof(*opts));
13546 if (!opts->object_name)
13547 skel_opts.object_name = s->name;
13548 }
13549
13550 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
13551 err = libbpf_get_error(obj);
13552 if (err) {
13553 pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
13554 s->name, err);
13555 return libbpf_err(err);
13556 }
13557
13558 *s->obj = obj;
13559 err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
13560 if (err) {
13561 pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
13562 return libbpf_err(err);
13563 }
13564
13565 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
13566 if (err) {
13567 pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
13568 return libbpf_err(err);
13569 }
13570
13571 return 0;
13572}
13573
13574int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
13575{
13576 int err, len, var_idx, i;
13577 const char *var_name;
13578 const struct bpf_map *map;
13579 struct btf *btf;
13580 __u32 map_type_id;
13581 const struct btf_type *map_type, *var_type;
13582 const struct bpf_var_skeleton *var_skel;
13583 struct btf_var_secinfo *var;
13584
13585 if (!s->obj)
13586 return libbpf_err(-EINVAL);
13587
13588 btf = bpf_object__btf(s->obj);
13589 if (!btf) {
13590 pr_warn("subskeletons require BTF at runtime (object %s)\n",
13591 bpf_object__name(s->obj));
13592 return libbpf_err(-errno);
13593 }
13594
13595 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
13596 if (err) {
13597 pr_warn("failed to populate subskeleton maps: %d\n", err);
13598 return libbpf_err(err);
13599 }
13600
13601 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
13602 if (err) {
13603 pr_warn("failed to populate subskeleton maps: %d\n", err);
13604 return libbpf_err(err);
13605 }
13606
13607 for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
13608 var_skel = &s->vars[var_idx];
13609 map = *var_skel->map;
13610 map_type_id = bpf_map__btf_value_type_id(map);
13611 map_type = btf__type_by_id(btf, map_type_id);
13612
13613 if (!btf_is_datasec(map_type)) {
13614 pr_warn("type for map '%1$s' is not a datasec: %2$s",
13615 bpf_map__name(map),
13616 __btf_kind_str(btf_kind(map_type)));
13617 return libbpf_err(-EINVAL);
13618 }
13619
13620 len = btf_vlen(map_type);
13621 var = btf_var_secinfos(map_type);
13622 for (i = 0; i < len; i++, var++) {
13623 var_type = btf__type_by_id(btf, var->type);
13624 var_name = btf__name_by_offset(btf, var_type->name_off);
13625 if (strcmp(var_name, var_skel->name) == 0) {
13626 *var_skel->addr = map->mmaped + var->offset;
13627 break;
13628 }
13629 }
13630 }
13631 return 0;
13632}
13633
13634void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
13635{
13636 if (!s)
13637 return;
13638 free(s->maps);
13639 free(s->progs);
13640 free(s->vars);
13641 free(s);
13642}
13643
13644int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
13645{
13646 int i, err;
13647
13648 err = bpf_object__load(*s->obj);
13649 if (err) {
13650 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
13651 return libbpf_err(err);
13652 }
13653
13654 for (i = 0; i < s->map_cnt; i++) {
13655 struct bpf_map *map = *s->maps[i].map;
13656 size_t mmap_sz = bpf_map_mmap_sz(map);
13657 int prot, map_fd = map->fd;
13658 void **mmaped = s->maps[i].mmaped;
13659
13660 if (!mmaped)
13661 continue;
13662
13663 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
13664 *mmaped = NULL;
13665 continue;
13666 }
13667
13668 if (map->def.type == BPF_MAP_TYPE_ARENA) {
13669 *mmaped = map->mmaped;
13670 continue;
13671 }
13672
13673 if (map->def.map_flags & BPF_F_RDONLY_PROG)
13674 prot = PROT_READ;
13675 else
13676 prot = PROT_READ | PROT_WRITE;
13677
13678 /* Remap anonymous mmap()-ed "map initialization image" as
13679 * a BPF map-backed mmap()-ed memory, but preserving the same
13680 * memory address. This will cause kernel to change process'
13681 * page table to point to a different piece of kernel memory,
13682 * but from userspace point of view memory address (and its
13683 * contents, being identical at this point) will stay the
13684 * same. This mapping will be released by bpf_object__close()
13685 * as per normal clean up procedure, so we don't need to worry
13686 * about it from skeleton's clean up perspective.
13687 */
13688 *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
13689 if (*mmaped == MAP_FAILED) {
13690 err = -errno;
13691 *mmaped = NULL;
13692 pr_warn("failed to re-mmap() map '%s': %d\n",
13693 bpf_map__name(map), err);
13694 return libbpf_err(err);
13695 }
13696 }
13697
13698 return 0;
13699}
13700
13701int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
13702{
13703 int i, err;
13704
13705 for (i = 0; i < s->prog_cnt; i++) {
13706 struct bpf_program *prog = *s->progs[i].prog;
13707 struct bpf_link **link = s->progs[i].link;
13708
13709 if (!prog->autoload || !prog->autoattach)
13710 continue;
13711
13712 /* auto-attaching not supported for this program */
13713 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
13714 continue;
13715
13716 /* if user already set the link manually, don't attempt auto-attach */
13717 if (*link)
13718 continue;
13719
13720 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
13721 if (err) {
13722 pr_warn("prog '%s': failed to auto-attach: %d\n",
13723 bpf_program__name(prog), err);
13724 return libbpf_err(err);
13725 }
13726
13727 /* It's possible that for some SEC() definitions auto-attach
13728 * is supported in some cases (e.g., if definition completely
13729 * specifies target information), but is not in other cases.
13730 * SEC("uprobe") is one such case. If user specified target
13731 * binary and function name, such BPF program can be
13732 * auto-attached. But if not, it shouldn't trigger skeleton's
13733 * attach to fail. It should just be skipped.
13734 * attach_fn signals such case with returning 0 (no error) and
13735 * setting link to NULL.
13736 */
13737 }
13738
13739 return 0;
13740}
13741
13742void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
13743{
13744 int i;
13745
13746 for (i = 0; i < s->prog_cnt; i++) {
13747 struct bpf_link **link = s->progs[i].link;
13748
13749 bpf_link__destroy(*link);
13750 *link = NULL;
13751 }
13752}
13753
13754void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
13755{
13756 if (!s)
13757 return;
13758
13759 if (s->progs)
13760 bpf_object__detach_skeleton(s);
13761 if (s->obj)
13762 bpf_object__close(*s->obj);
13763 free(s->maps);
13764 free(s->progs);
13765 free(s);
13766}