Loading...
Note: File does not exist in v6.8.
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3#include <linux/kernel.h>
4#include <linux/filter.h>
5#include "bpf.h"
6#include "libbpf.h"
7#include "libbpf_common.h"
8#include "libbpf_internal.h"
9#include "str_error.h"
10
11static inline __u64 ptr_to_u64(const void *ptr)
12{
13 return (__u64)(unsigned long)ptr;
14}
15
16int probe_fd(int fd)
17{
18 if (fd >= 0)
19 close(fd);
20 return fd >= 0;
21}
22
23static int probe_kern_prog_name(int token_fd)
24{
25 const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
26 struct bpf_insn insns[] = {
27 BPF_MOV64_IMM(BPF_REG_0, 0),
28 BPF_EXIT_INSN(),
29 };
30 union bpf_attr attr;
31 int ret;
32
33 memset(&attr, 0, attr_sz);
34 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
35 attr.license = ptr_to_u64("GPL");
36 attr.insns = ptr_to_u64(insns);
37 attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
38 attr.prog_token_fd = token_fd;
39 if (token_fd)
40 attr.prog_flags |= BPF_F_TOKEN_FD;
41 libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
42
43 /* make sure loading with name works */
44 ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
45 return probe_fd(ret);
46}
47
48static int probe_kern_global_data(int token_fd)
49{
50 struct bpf_insn insns[] = {
51 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
52 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
53 BPF_MOV64_IMM(BPF_REG_0, 0),
54 BPF_EXIT_INSN(),
55 };
56 LIBBPF_OPTS(bpf_map_create_opts, map_opts,
57 .token_fd = token_fd,
58 .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
59 );
60 LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
61 .token_fd = token_fd,
62 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
63 );
64 int ret, map, insn_cnt = ARRAY_SIZE(insns);
65
66 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
67 if (map < 0) {
68 ret = -errno;
69 pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
70 __func__, errstr(ret));
71 return ret;
72 }
73
74 insns[0].imm = map;
75
76 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
77 close(map);
78 return probe_fd(ret);
79}
80
81static int probe_kern_btf(int token_fd)
82{
83 static const char strs[] = "\0int";
84 __u32 types[] = {
85 /* int */
86 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
87 };
88
89 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
90 strs, sizeof(strs), token_fd));
91}
92
93static int probe_kern_btf_func(int token_fd)
94{
95 static const char strs[] = "\0int\0x\0a";
96 /* void x(int a) {} */
97 __u32 types[] = {
98 /* int */
99 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
100 /* FUNC_PROTO */ /* [2] */
101 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
102 BTF_PARAM_ENC(7, 1),
103 /* FUNC x */ /* [3] */
104 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
105 };
106
107 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
108 strs, sizeof(strs), token_fd));
109}
110
111static int probe_kern_btf_func_global(int token_fd)
112{
113 static const char strs[] = "\0int\0x\0a";
114 /* static void x(int a) {} */
115 __u32 types[] = {
116 /* int */
117 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
118 /* FUNC_PROTO */ /* [2] */
119 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
120 BTF_PARAM_ENC(7, 1),
121 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
122 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
123 };
124
125 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
126 strs, sizeof(strs), token_fd));
127}
128
129static int probe_kern_btf_datasec(int token_fd)
130{
131 static const char strs[] = "\0x\0.data";
132 /* static int a; */
133 __u32 types[] = {
134 /* int */
135 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
136 /* VAR x */ /* [2] */
137 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
138 BTF_VAR_STATIC,
139 /* DATASEC val */ /* [3] */
140 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
141 BTF_VAR_SECINFO_ENC(2, 0, 4),
142 };
143
144 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
145 strs, sizeof(strs), token_fd));
146}
147
148static int probe_kern_btf_qmark_datasec(int token_fd)
149{
150 static const char strs[] = "\0x\0?.data";
151 /* static int a; */
152 __u32 types[] = {
153 /* int */
154 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
155 /* VAR x */ /* [2] */
156 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
157 BTF_VAR_STATIC,
158 /* DATASEC ?.data */ /* [3] */
159 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
160 BTF_VAR_SECINFO_ENC(2, 0, 4),
161 };
162
163 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
164 strs, sizeof(strs), token_fd));
165}
166
167static int probe_kern_btf_float(int token_fd)
168{
169 static const char strs[] = "\0float";
170 __u32 types[] = {
171 /* float */
172 BTF_TYPE_FLOAT_ENC(1, 4),
173 };
174
175 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
176 strs, sizeof(strs), token_fd));
177}
178
179static int probe_kern_btf_decl_tag(int token_fd)
180{
181 static const char strs[] = "\0tag";
182 __u32 types[] = {
183 /* int */
184 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
185 /* VAR x */ /* [2] */
186 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
187 BTF_VAR_STATIC,
188 /* attr */
189 BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
190 };
191
192 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
193 strs, sizeof(strs), token_fd));
194}
195
196static int probe_kern_btf_type_tag(int token_fd)
197{
198 static const char strs[] = "\0tag";
199 __u32 types[] = {
200 /* int */
201 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
202 /* attr */
203 BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
204 /* ptr */
205 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
206 };
207
208 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
209 strs, sizeof(strs), token_fd));
210}
211
212static int probe_kern_array_mmap(int token_fd)
213{
214 LIBBPF_OPTS(bpf_map_create_opts, opts,
215 .map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0),
216 .token_fd = token_fd,
217 );
218 int fd;
219
220 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
221 return probe_fd(fd);
222}
223
224static int probe_kern_exp_attach_type(int token_fd)
225{
226 LIBBPF_OPTS(bpf_prog_load_opts, opts,
227 .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
228 .token_fd = token_fd,
229 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
230 );
231 struct bpf_insn insns[] = {
232 BPF_MOV64_IMM(BPF_REG_0, 0),
233 BPF_EXIT_INSN(),
234 };
235 int fd, insn_cnt = ARRAY_SIZE(insns);
236
237 /* use any valid combination of program type and (optional)
238 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
239 * to see if kernel supports expected_attach_type field for
240 * BPF_PROG_LOAD command
241 */
242 fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
243 return probe_fd(fd);
244}
245
246static int probe_kern_probe_read_kernel(int token_fd)
247{
248 LIBBPF_OPTS(bpf_prog_load_opts, opts,
249 .token_fd = token_fd,
250 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
251 );
252 struct bpf_insn insns[] = {
253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
254 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
255 BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
256 BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
257 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
258 BPF_EXIT_INSN(),
259 };
260 int fd, insn_cnt = ARRAY_SIZE(insns);
261
262 fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
263 return probe_fd(fd);
264}
265
266static int probe_prog_bind_map(int token_fd)
267{
268 struct bpf_insn insns[] = {
269 BPF_MOV64_IMM(BPF_REG_0, 0),
270 BPF_EXIT_INSN(),
271 };
272 LIBBPF_OPTS(bpf_map_create_opts, map_opts,
273 .token_fd = token_fd,
274 .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
275 );
276 LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
277 .token_fd = token_fd,
278 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
279 );
280 int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
281
282 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
283 if (map < 0) {
284 ret = -errno;
285 pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
286 __func__, errstr(ret));
287 return ret;
288 }
289
290 prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
291 if (prog < 0) {
292 close(map);
293 return 0;
294 }
295
296 ret = bpf_prog_bind_map(prog, map, NULL);
297
298 close(map);
299 close(prog);
300
301 return ret >= 0;
302}
303
304static int probe_module_btf(int token_fd)
305{
306 static const char strs[] = "\0int";
307 __u32 types[] = {
308 /* int */
309 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
310 };
311 struct bpf_btf_info info;
312 __u32 len = sizeof(info);
313 char name[16];
314 int fd, err;
315
316 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
317 if (fd < 0)
318 return 0; /* BTF not supported at all */
319
320 memset(&info, 0, sizeof(info));
321 info.name = ptr_to_u64(name);
322 info.name_len = sizeof(name);
323
324 /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
325 * kernel's module BTF support coincides with support for
326 * name/name_len fields in struct bpf_btf_info.
327 */
328 err = bpf_btf_get_info_by_fd(fd, &info, &len);
329 close(fd);
330 return !err;
331}
332
333static int probe_perf_link(int token_fd)
334{
335 struct bpf_insn insns[] = {
336 BPF_MOV64_IMM(BPF_REG_0, 0),
337 BPF_EXIT_INSN(),
338 };
339 LIBBPF_OPTS(bpf_prog_load_opts, opts,
340 .token_fd = token_fd,
341 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
342 );
343 int prog_fd, link_fd, err;
344
345 prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
346 insns, ARRAY_SIZE(insns), &opts);
347 if (prog_fd < 0)
348 return -errno;
349
350 /* use invalid perf_event FD to get EBADF, if link is supported;
351 * otherwise EINVAL should be returned
352 */
353 link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
354 err = -errno; /* close() can clobber errno */
355
356 if (link_fd >= 0)
357 close(link_fd);
358 close(prog_fd);
359
360 return link_fd < 0 && err == -EBADF;
361}
362
363static int probe_uprobe_multi_link(int token_fd)
364{
365 LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
366 .expected_attach_type = BPF_TRACE_UPROBE_MULTI,
367 .token_fd = token_fd,
368 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
369 );
370 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
371 struct bpf_insn insns[] = {
372 BPF_MOV64_IMM(BPF_REG_0, 0),
373 BPF_EXIT_INSN(),
374 };
375 int prog_fd, link_fd, err;
376 unsigned long offset = 0;
377
378 prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
379 insns, ARRAY_SIZE(insns), &load_opts);
380 if (prog_fd < 0)
381 return -errno;
382
383 /* Creating uprobe in '/' binary should fail with -EBADF. */
384 link_opts.uprobe_multi.path = "/";
385 link_opts.uprobe_multi.offsets = &offset;
386 link_opts.uprobe_multi.cnt = 1;
387
388 link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
389 err = -errno; /* close() can clobber errno */
390
391 if (link_fd >= 0 || err != -EBADF) {
392 if (link_fd >= 0)
393 close(link_fd);
394 close(prog_fd);
395 return 0;
396 }
397
398 /* Initial multi-uprobe support in kernel didn't handle PID filtering
399 * correctly (it was doing thread filtering, not process filtering).
400 * So now we'll detect if PID filtering logic was fixed, and, if not,
401 * we'll pretend multi-uprobes are not supported, if not.
402 * Multi-uprobes are used in USDT attachment logic, and we need to be
403 * conservative here, because multi-uprobe selection happens early at
404 * load time, while the use of PID filtering is known late at
405 * attachment time, at which point it's too late to undo multi-uprobe
406 * selection.
407 *
408 * Creating uprobe with pid == -1 for (invalid) '/' binary will fail
409 * early with -EINVAL on kernels with fixed PID filtering logic;
410 * otherwise -ESRCH would be returned if passed correct binary path
411 * (but we'll just get -BADF, of course).
412 */
413 link_opts.uprobe_multi.pid = -1; /* invalid PID */
414 link_opts.uprobe_multi.path = "/"; /* invalid path */
415 link_opts.uprobe_multi.offsets = &offset;
416 link_opts.uprobe_multi.cnt = 1;
417
418 link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
419 err = -errno; /* close() can clobber errno */
420
421 if (link_fd >= 0)
422 close(link_fd);
423 close(prog_fd);
424
425 return link_fd < 0 && err == -EINVAL;
426}
427
428static int probe_kern_bpf_cookie(int token_fd)
429{
430 struct bpf_insn insns[] = {
431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
432 BPF_EXIT_INSN(),
433 };
434 LIBBPF_OPTS(bpf_prog_load_opts, opts,
435 .token_fd = token_fd,
436 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
437 );
438 int ret, insn_cnt = ARRAY_SIZE(insns);
439
440 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
441 return probe_fd(ret);
442}
443
444static int probe_kern_btf_enum64(int token_fd)
445{
446 static const char strs[] = "\0enum64";
447 __u32 types[] = {
448 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
449 };
450
451 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
452 strs, sizeof(strs), token_fd));
453}
454
455static int probe_kern_arg_ctx_tag(int token_fd)
456{
457 static const char strs[] = "\0a\0b\0arg:ctx\0";
458 const __u32 types[] = {
459 /* [1] INT */
460 BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
461 /* [2] PTR -> VOID */
462 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
463 /* [3] FUNC_PROTO `int(void *a)` */
464 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
465 BTF_PARAM_ENC(1 /* "a" */, 2),
466 /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
467 BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
468 /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
469 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
470 BTF_PARAM_ENC(3 /* "b" */, 2),
471 /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
472 BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
473 /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
474 BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
475 };
476 const struct bpf_insn insns[] = {
477 /* main prog */
478 BPF_CALL_REL(+1),
479 BPF_EXIT_INSN(),
480 /* global subprog */
481 BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
482 BPF_EXIT_INSN(),
483 };
484 const struct bpf_func_info_min func_infos[] = {
485 { 0, 4 }, /* main prog -> FUNC 'a' */
486 { 2, 6 }, /* subprog -> FUNC 'b' */
487 };
488 LIBBPF_OPTS(bpf_prog_load_opts, opts,
489 .token_fd = token_fd,
490 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
491 );
492 int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
493
494 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
495 if (btf_fd < 0)
496 return 0;
497
498 opts.prog_btf_fd = btf_fd;
499 opts.func_info = &func_infos;
500 opts.func_info_cnt = ARRAY_SIZE(func_infos);
501 opts.func_info_rec_size = sizeof(func_infos[0]);
502
503 prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
504 "GPL", insns, insn_cnt, &opts);
505 close(btf_fd);
506
507 return probe_fd(prog_fd);
508}
509
510typedef int (*feature_probe_fn)(int /* token_fd */);
511
512static struct kern_feature_cache feature_cache;
513
514static struct kern_feature_desc {
515 const char *desc;
516 feature_probe_fn probe;
517} feature_probes[__FEAT_CNT] = {
518 [FEAT_PROG_NAME] = {
519 "BPF program name", probe_kern_prog_name,
520 },
521 [FEAT_GLOBAL_DATA] = {
522 "global variables", probe_kern_global_data,
523 },
524 [FEAT_BTF] = {
525 "minimal BTF", probe_kern_btf,
526 },
527 [FEAT_BTF_FUNC] = {
528 "BTF functions", probe_kern_btf_func,
529 },
530 [FEAT_BTF_GLOBAL_FUNC] = {
531 "BTF global function", probe_kern_btf_func_global,
532 },
533 [FEAT_BTF_DATASEC] = {
534 "BTF data section and variable", probe_kern_btf_datasec,
535 },
536 [FEAT_ARRAY_MMAP] = {
537 "ARRAY map mmap()", probe_kern_array_mmap,
538 },
539 [FEAT_EXP_ATTACH_TYPE] = {
540 "BPF_PROG_LOAD expected_attach_type attribute",
541 probe_kern_exp_attach_type,
542 },
543 [FEAT_PROBE_READ_KERN] = {
544 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
545 },
546 [FEAT_PROG_BIND_MAP] = {
547 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
548 },
549 [FEAT_MODULE_BTF] = {
550 "module BTF support", probe_module_btf,
551 },
552 [FEAT_BTF_FLOAT] = {
553 "BTF_KIND_FLOAT support", probe_kern_btf_float,
554 },
555 [FEAT_PERF_LINK] = {
556 "BPF perf link support", probe_perf_link,
557 },
558 [FEAT_BTF_DECL_TAG] = {
559 "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
560 },
561 [FEAT_BTF_TYPE_TAG] = {
562 "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
563 },
564 [FEAT_MEMCG_ACCOUNT] = {
565 "memcg-based memory accounting", probe_memcg_account,
566 },
567 [FEAT_BPF_COOKIE] = {
568 "BPF cookie support", probe_kern_bpf_cookie,
569 },
570 [FEAT_BTF_ENUM64] = {
571 "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
572 },
573 [FEAT_SYSCALL_WRAPPER] = {
574 "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
575 },
576 [FEAT_UPROBE_MULTI_LINK] = {
577 "BPF multi-uprobe link support", probe_uprobe_multi_link,
578 },
579 [FEAT_ARG_CTX_TAG] = {
580 "kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
581 },
582 [FEAT_BTF_QMARK_DATASEC] = {
583 "BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
584 },
585};
586
587bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
588{
589 struct kern_feature_desc *feat = &feature_probes[feat_id];
590 int ret;
591
592 /* assume global feature cache, unless custom one is provided */
593 if (!cache)
594 cache = &feature_cache;
595
596 if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
597 ret = feat->probe(cache->token_fd);
598 if (ret > 0) {
599 WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
600 } else if (ret == 0) {
601 WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
602 } else {
603 pr_warn("Detection of kernel %s support failed: %s\n",
604 feat->desc, errstr(ret));
605 WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
606 }
607 }
608
609 return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
610}