Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
  3#include <linux/kernel.h>
  4#include <linux/filter.h>
  5#include "bpf.h"
  6#include "libbpf.h"
  7#include "libbpf_common.h"
  8#include "libbpf_internal.h"
  9#include "str_error.h"
 10
 11static inline __u64 ptr_to_u64(const void *ptr)
 12{
 13	return (__u64)(unsigned long)ptr;
 14}
 15
 16int probe_fd(int fd)
 17{
 18	if (fd >= 0)
 19		close(fd);
 20	return fd >= 0;
 21}
 22
 23static int probe_kern_prog_name(int token_fd)
 24{
 25	const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
 26	struct bpf_insn insns[] = {
 27		BPF_MOV64_IMM(BPF_REG_0, 0),
 28		BPF_EXIT_INSN(),
 29	};
 30	union bpf_attr attr;
 31	int ret;
 32
 33	memset(&attr, 0, attr_sz);
 34	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 35	attr.license = ptr_to_u64("GPL");
 36	attr.insns = ptr_to_u64(insns);
 37	attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
 38	attr.prog_token_fd = token_fd;
 39	if (token_fd)
 40		attr.prog_flags |= BPF_F_TOKEN_FD;
 41	libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
 42
 43	/* make sure loading with name works */
 44	ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
 45	return probe_fd(ret);
 46}
 47
 48static int probe_kern_global_data(int token_fd)
 49{
 50	char *cp, errmsg[STRERR_BUFSIZE];
 51	struct bpf_insn insns[] = {
 52		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
 53		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
 54		BPF_MOV64_IMM(BPF_REG_0, 0),
 55		BPF_EXIT_INSN(),
 56	};
 57	LIBBPF_OPTS(bpf_map_create_opts, map_opts,
 58		.token_fd = token_fd,
 59		.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
 60	);
 61	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
 62		.token_fd = token_fd,
 63		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
 64	);
 65	int ret, map, insn_cnt = ARRAY_SIZE(insns);
 66
 67	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
 68	if (map < 0) {
 69		ret = -errno;
 70		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
 71		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
 72			__func__, cp, -ret);
 73		return ret;
 74	}
 75
 76	insns[0].imm = map;
 77
 78	ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
 79	close(map);
 80	return probe_fd(ret);
 81}
 82
 83static int probe_kern_btf(int token_fd)
 84{
 85	static const char strs[] = "\0int";
 86	__u32 types[] = {
 87		/* int */
 88		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
 89	};
 90
 91	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
 92					     strs, sizeof(strs), token_fd));
 93}
 94
 95static int probe_kern_btf_func(int token_fd)
 96{
 97	static const char strs[] = "\0int\0x\0a";
 98	/* void x(int a) {} */
 99	__u32 types[] = {
100		/* int */
101		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
102		/* FUNC_PROTO */                                /* [2] */
103		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
104		BTF_PARAM_ENC(7, 1),
105		/* FUNC x */                                    /* [3] */
106		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
107	};
108
109	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
110					     strs, sizeof(strs), token_fd));
111}
112
113static int probe_kern_btf_func_global(int token_fd)
114{
115	static const char strs[] = "\0int\0x\0a";
116	/* static void x(int a) {} */
117	__u32 types[] = {
118		/* int */
119		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
120		/* FUNC_PROTO */                                /* [2] */
121		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
122		BTF_PARAM_ENC(7, 1),
123		/* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
124		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
125	};
126
127	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
128					     strs, sizeof(strs), token_fd));
129}
130
131static int probe_kern_btf_datasec(int token_fd)
132{
133	static const char strs[] = "\0x\0.data";
134	/* static int a; */
135	__u32 types[] = {
136		/* int */
137		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
138		/* VAR x */                                     /* [2] */
139		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
140		BTF_VAR_STATIC,
141		/* DATASEC val */                               /* [3] */
142		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
143		BTF_VAR_SECINFO_ENC(2, 0, 4),
144	};
145
146	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
147					     strs, sizeof(strs), token_fd));
148}
149
150static int probe_kern_btf_qmark_datasec(int token_fd)
151{
152	static const char strs[] = "\0x\0?.data";
153	/* static int a; */
154	__u32 types[] = {
155		/* int */
156		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
157		/* VAR x */                                     /* [2] */
158		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
159		BTF_VAR_STATIC,
160		/* DATASEC ?.data */                            /* [3] */
161		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
162		BTF_VAR_SECINFO_ENC(2, 0, 4),
163	};
164
165	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
166					     strs, sizeof(strs), token_fd));
167}
168
169static int probe_kern_btf_float(int token_fd)
170{
171	static const char strs[] = "\0float";
172	__u32 types[] = {
173		/* float */
174		BTF_TYPE_FLOAT_ENC(1, 4),
175	};
176
177	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
178					     strs, sizeof(strs), token_fd));
179}
180
181static int probe_kern_btf_decl_tag(int token_fd)
182{
183	static const char strs[] = "\0tag";
184	__u32 types[] = {
185		/* int */
186		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
187		/* VAR x */                                     /* [2] */
188		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
189		BTF_VAR_STATIC,
190		/* attr */
191		BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
192	};
193
194	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
195					     strs, sizeof(strs), token_fd));
196}
197
198static int probe_kern_btf_type_tag(int token_fd)
199{
200	static const char strs[] = "\0tag";
201	__u32 types[] = {
202		/* int */
203		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),		/* [1] */
204		/* attr */
205		BTF_TYPE_TYPE_TAG_ENC(1, 1),				/* [2] */
206		/* ptr */
207		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),	/* [3] */
208	};
209
210	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
211					     strs, sizeof(strs), token_fd));
212}
213
214static int probe_kern_array_mmap(int token_fd)
215{
216	LIBBPF_OPTS(bpf_map_create_opts, opts,
217		.map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0),
218		.token_fd = token_fd,
219	);
220	int fd;
221
222	fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
223	return probe_fd(fd);
224}
225
226static int probe_kern_exp_attach_type(int token_fd)
227{
228	LIBBPF_OPTS(bpf_prog_load_opts, opts,
229		.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
230		.token_fd = token_fd,
231		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
232	);
233	struct bpf_insn insns[] = {
234		BPF_MOV64_IMM(BPF_REG_0, 0),
235		BPF_EXIT_INSN(),
236	};
237	int fd, insn_cnt = ARRAY_SIZE(insns);
238
239	/* use any valid combination of program type and (optional)
240	 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
241	 * to see if kernel supports expected_attach_type field for
242	 * BPF_PROG_LOAD command
243	 */
244	fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
245	return probe_fd(fd);
246}
247
248static int probe_kern_probe_read_kernel(int token_fd)
249{
250	LIBBPF_OPTS(bpf_prog_load_opts, opts,
251		.token_fd = token_fd,
252		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
253	);
254	struct bpf_insn insns[] = {
255		BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),	/* r1 = r10 (fp) */
256		BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),	/* r1 += -8 */
257		BPF_MOV64_IMM(BPF_REG_2, 8),		/* r2 = 8 */
258		BPF_MOV64_IMM(BPF_REG_3, 0),		/* r3 = 0 */
259		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
260		BPF_EXIT_INSN(),
261	};
262	int fd, insn_cnt = ARRAY_SIZE(insns);
263
264	fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
265	return probe_fd(fd);
266}
267
268static int probe_prog_bind_map(int token_fd)
269{
270	char *cp, errmsg[STRERR_BUFSIZE];
271	struct bpf_insn insns[] = {
272		BPF_MOV64_IMM(BPF_REG_0, 0),
273		BPF_EXIT_INSN(),
274	};
275	LIBBPF_OPTS(bpf_map_create_opts, map_opts,
276		.token_fd = token_fd,
277		.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
278	);
279	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
280		.token_fd = token_fd,
281		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
282	);
283	int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
284
285	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
286	if (map < 0) {
287		ret = -errno;
288		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
289		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
290			__func__, cp, -ret);
291		return ret;
292	}
293
294	prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
295	if (prog < 0) {
296		close(map);
297		return 0;
298	}
299
300	ret = bpf_prog_bind_map(prog, map, NULL);
301
302	close(map);
303	close(prog);
304
305	return ret >= 0;
306}
307
308static int probe_module_btf(int token_fd)
309{
310	static const char strs[] = "\0int";
311	__u32 types[] = {
312		/* int */
313		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
314	};
315	struct bpf_btf_info info;
316	__u32 len = sizeof(info);
317	char name[16];
318	int fd, err;
319
320	fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
321	if (fd < 0)
322		return 0; /* BTF not supported at all */
323
324	memset(&info, 0, sizeof(info));
325	info.name = ptr_to_u64(name);
326	info.name_len = sizeof(name);
327
328	/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
329	 * kernel's module BTF support coincides with support for
330	 * name/name_len fields in struct bpf_btf_info.
331	 */
332	err = bpf_btf_get_info_by_fd(fd, &info, &len);
333	close(fd);
334	return !err;
335}
336
337static int probe_perf_link(int token_fd)
338{
339	struct bpf_insn insns[] = {
340		BPF_MOV64_IMM(BPF_REG_0, 0),
341		BPF_EXIT_INSN(),
342	};
343	LIBBPF_OPTS(bpf_prog_load_opts, opts,
344		.token_fd = token_fd,
345		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
346	);
347	int prog_fd, link_fd, err;
348
349	prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
350				insns, ARRAY_SIZE(insns), &opts);
351	if (prog_fd < 0)
352		return -errno;
353
354	/* use invalid perf_event FD to get EBADF, if link is supported;
355	 * otherwise EINVAL should be returned
356	 */
357	link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
358	err = -errno; /* close() can clobber errno */
359
360	if (link_fd >= 0)
361		close(link_fd);
362	close(prog_fd);
363
364	return link_fd < 0 && err == -EBADF;
365}
366
367static int probe_uprobe_multi_link(int token_fd)
368{
369	LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
370		.expected_attach_type = BPF_TRACE_UPROBE_MULTI,
371		.token_fd = token_fd,
372		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
373	);
374	LIBBPF_OPTS(bpf_link_create_opts, link_opts);
375	struct bpf_insn insns[] = {
376		BPF_MOV64_IMM(BPF_REG_0, 0),
377		BPF_EXIT_INSN(),
378	};
379	int prog_fd, link_fd, err;
380	unsigned long offset = 0;
381
382	prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
383				insns, ARRAY_SIZE(insns), &load_opts);
384	if (prog_fd < 0)
385		return -errno;
386
387	/* Creating uprobe in '/' binary should fail with -EBADF. */
388	link_opts.uprobe_multi.path = "/";
389	link_opts.uprobe_multi.offsets = &offset;
390	link_opts.uprobe_multi.cnt = 1;
391
392	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
393	err = -errno; /* close() can clobber errno */
394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395	if (link_fd >= 0)
396		close(link_fd);
397	close(prog_fd);
398
399	return link_fd < 0 && err == -EBADF;
400}
401
402static int probe_kern_bpf_cookie(int token_fd)
403{
404	struct bpf_insn insns[] = {
405		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
406		BPF_EXIT_INSN(),
407	};
408	LIBBPF_OPTS(bpf_prog_load_opts, opts,
409		.token_fd = token_fd,
410		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
411	);
412	int ret, insn_cnt = ARRAY_SIZE(insns);
413
414	ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
415	return probe_fd(ret);
416}
417
418static int probe_kern_btf_enum64(int token_fd)
419{
420	static const char strs[] = "\0enum64";
421	__u32 types[] = {
422		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
423	};
424
425	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
426					     strs, sizeof(strs), token_fd));
427}
428
429static int probe_kern_arg_ctx_tag(int token_fd)
430{
431	static const char strs[] = "\0a\0b\0arg:ctx\0";
432	const __u32 types[] = {
433		/* [1] INT */
434		BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
435		/* [2] PTR -> VOID */
436		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
437		/* [3] FUNC_PROTO `int(void *a)` */
438		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
439		BTF_PARAM_ENC(1 /* "a" */, 2),
440		/* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
441		BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
442		/* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
443		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
444		BTF_PARAM_ENC(3 /* "b" */, 2),
445		/* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
446		BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
447		/* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
448		BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
449	};
450	const struct bpf_insn insns[] = {
451		/* main prog */
452		BPF_CALL_REL(+1),
453		BPF_EXIT_INSN(),
454		/* global subprog */
455		BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
456		BPF_EXIT_INSN(),
457	};
458	const struct bpf_func_info_min func_infos[] = {
459		{ 0, 4 }, /* main prog -> FUNC 'a' */
460		{ 2, 6 }, /* subprog -> FUNC 'b' */
461	};
462	LIBBPF_OPTS(bpf_prog_load_opts, opts,
463		.token_fd = token_fd,
464		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
465	);
466	int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
467
468	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
469	if (btf_fd < 0)
470		return 0;
471
472	opts.prog_btf_fd = btf_fd;
473	opts.func_info = &func_infos;
474	opts.func_info_cnt = ARRAY_SIZE(func_infos);
475	opts.func_info_rec_size = sizeof(func_infos[0]);
476
477	prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
478				"GPL", insns, insn_cnt, &opts);
479	close(btf_fd);
480
481	return probe_fd(prog_fd);
482}
483
484typedef int (*feature_probe_fn)(int /* token_fd */);
485
486static struct kern_feature_cache feature_cache;
487
488static struct kern_feature_desc {
489	const char *desc;
490	feature_probe_fn probe;
491} feature_probes[__FEAT_CNT] = {
492	[FEAT_PROG_NAME] = {
493		"BPF program name", probe_kern_prog_name,
494	},
495	[FEAT_GLOBAL_DATA] = {
496		"global variables", probe_kern_global_data,
497	},
498	[FEAT_BTF] = {
499		"minimal BTF", probe_kern_btf,
500	},
501	[FEAT_BTF_FUNC] = {
502		"BTF functions", probe_kern_btf_func,
503	},
504	[FEAT_BTF_GLOBAL_FUNC] = {
505		"BTF global function", probe_kern_btf_func_global,
506	},
507	[FEAT_BTF_DATASEC] = {
508		"BTF data section and variable", probe_kern_btf_datasec,
509	},
510	[FEAT_ARRAY_MMAP] = {
511		"ARRAY map mmap()", probe_kern_array_mmap,
512	},
513	[FEAT_EXP_ATTACH_TYPE] = {
514		"BPF_PROG_LOAD expected_attach_type attribute",
515		probe_kern_exp_attach_type,
516	},
517	[FEAT_PROBE_READ_KERN] = {
518		"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
519	},
520	[FEAT_PROG_BIND_MAP] = {
521		"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
522	},
523	[FEAT_MODULE_BTF] = {
524		"module BTF support", probe_module_btf,
525	},
526	[FEAT_BTF_FLOAT] = {
527		"BTF_KIND_FLOAT support", probe_kern_btf_float,
528	},
529	[FEAT_PERF_LINK] = {
530		"BPF perf link support", probe_perf_link,
531	},
532	[FEAT_BTF_DECL_TAG] = {
533		"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
534	},
535	[FEAT_BTF_TYPE_TAG] = {
536		"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
537	},
538	[FEAT_MEMCG_ACCOUNT] = {
539		"memcg-based memory accounting", probe_memcg_account,
540	},
541	[FEAT_BPF_COOKIE] = {
542		"BPF cookie support", probe_kern_bpf_cookie,
543	},
544	[FEAT_BTF_ENUM64] = {
545		"BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
546	},
547	[FEAT_SYSCALL_WRAPPER] = {
548		"Kernel using syscall wrapper", probe_kern_syscall_wrapper,
549	},
550	[FEAT_UPROBE_MULTI_LINK] = {
551		"BPF multi-uprobe link support", probe_uprobe_multi_link,
552	},
553	[FEAT_ARG_CTX_TAG] = {
554		"kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
555	},
556	[FEAT_BTF_QMARK_DATASEC] = {
557		"BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
558	},
559};
560
561bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
562{
563	struct kern_feature_desc *feat = &feature_probes[feat_id];
564	int ret;
565
566	/* assume global feature cache, unless custom one is provided */
567	if (!cache)
568		cache = &feature_cache;
569
570	if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
571		ret = feat->probe(cache->token_fd);
572		if (ret > 0) {
573			WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
574		} else if (ret == 0) {
575			WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
576		} else {
577			pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
 
578			WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
579		}
580	}
581
582	return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
583}
v6.13.7
  1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
  3#include <linux/kernel.h>
  4#include <linux/filter.h>
  5#include "bpf.h"
  6#include "libbpf.h"
  7#include "libbpf_common.h"
  8#include "libbpf_internal.h"
  9#include "str_error.h"
 10
 11static inline __u64 ptr_to_u64(const void *ptr)
 12{
 13	return (__u64)(unsigned long)ptr;
 14}
 15
 16int probe_fd(int fd)
 17{
 18	if (fd >= 0)
 19		close(fd);
 20	return fd >= 0;
 21}
 22
 23static int probe_kern_prog_name(int token_fd)
 24{
 25	const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
 26	struct bpf_insn insns[] = {
 27		BPF_MOV64_IMM(BPF_REG_0, 0),
 28		BPF_EXIT_INSN(),
 29	};
 30	union bpf_attr attr;
 31	int ret;
 32
 33	memset(&attr, 0, attr_sz);
 34	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 35	attr.license = ptr_to_u64("GPL");
 36	attr.insns = ptr_to_u64(insns);
 37	attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
 38	attr.prog_token_fd = token_fd;
 39	if (token_fd)
 40		attr.prog_flags |= BPF_F_TOKEN_FD;
 41	libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
 42
 43	/* make sure loading with name works */
 44	ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
 45	return probe_fd(ret);
 46}
 47
 48static int probe_kern_global_data(int token_fd)
 49{
 
 50	struct bpf_insn insns[] = {
 51		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
 52		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
 53		BPF_MOV64_IMM(BPF_REG_0, 0),
 54		BPF_EXIT_INSN(),
 55	};
 56	LIBBPF_OPTS(bpf_map_create_opts, map_opts,
 57		.token_fd = token_fd,
 58		.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
 59	);
 60	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
 61		.token_fd = token_fd,
 62		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
 63	);
 64	int ret, map, insn_cnt = ARRAY_SIZE(insns);
 65
 66	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
 67	if (map < 0) {
 68		ret = -errno;
 69		pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
 70			__func__, errstr(ret));
 
 71		return ret;
 72	}
 73
 74	insns[0].imm = map;
 75
 76	ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
 77	close(map);
 78	return probe_fd(ret);
 79}
 80
 81static int probe_kern_btf(int token_fd)
 82{
 83	static const char strs[] = "\0int";
 84	__u32 types[] = {
 85		/* int */
 86		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
 87	};
 88
 89	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
 90					     strs, sizeof(strs), token_fd));
 91}
 92
 93static int probe_kern_btf_func(int token_fd)
 94{
 95	static const char strs[] = "\0int\0x\0a";
 96	/* void x(int a) {} */
 97	__u32 types[] = {
 98		/* int */
 99		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
100		/* FUNC_PROTO */                                /* [2] */
101		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
102		BTF_PARAM_ENC(7, 1),
103		/* FUNC x */                                    /* [3] */
104		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
105	};
106
107	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
108					     strs, sizeof(strs), token_fd));
109}
110
111static int probe_kern_btf_func_global(int token_fd)
112{
113	static const char strs[] = "\0int\0x\0a";
114	/* static void x(int a) {} */
115	__u32 types[] = {
116		/* int */
117		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
118		/* FUNC_PROTO */                                /* [2] */
119		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
120		BTF_PARAM_ENC(7, 1),
121		/* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
122		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
123	};
124
125	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
126					     strs, sizeof(strs), token_fd));
127}
128
129static int probe_kern_btf_datasec(int token_fd)
130{
131	static const char strs[] = "\0x\0.data";
132	/* static int a; */
133	__u32 types[] = {
134		/* int */
135		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
136		/* VAR x */                                     /* [2] */
137		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
138		BTF_VAR_STATIC,
139		/* DATASEC val */                               /* [3] */
140		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
141		BTF_VAR_SECINFO_ENC(2, 0, 4),
142	};
143
144	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
145					     strs, sizeof(strs), token_fd));
146}
147
148static int probe_kern_btf_qmark_datasec(int token_fd)
149{
150	static const char strs[] = "\0x\0?.data";
151	/* static int a; */
152	__u32 types[] = {
153		/* int */
154		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
155		/* VAR x */                                     /* [2] */
156		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
157		BTF_VAR_STATIC,
158		/* DATASEC ?.data */                            /* [3] */
159		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
160		BTF_VAR_SECINFO_ENC(2, 0, 4),
161	};
162
163	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
164					     strs, sizeof(strs), token_fd));
165}
166
167static int probe_kern_btf_float(int token_fd)
168{
169	static const char strs[] = "\0float";
170	__u32 types[] = {
171		/* float */
172		BTF_TYPE_FLOAT_ENC(1, 4),
173	};
174
175	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
176					     strs, sizeof(strs), token_fd));
177}
178
179static int probe_kern_btf_decl_tag(int token_fd)
180{
181	static const char strs[] = "\0tag";
182	__u32 types[] = {
183		/* int */
184		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
185		/* VAR x */                                     /* [2] */
186		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
187		BTF_VAR_STATIC,
188		/* attr */
189		BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
190	};
191
192	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
193					     strs, sizeof(strs), token_fd));
194}
195
196static int probe_kern_btf_type_tag(int token_fd)
197{
198	static const char strs[] = "\0tag";
199	__u32 types[] = {
200		/* int */
201		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),		/* [1] */
202		/* attr */
203		BTF_TYPE_TYPE_TAG_ENC(1, 1),				/* [2] */
204		/* ptr */
205		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),	/* [3] */
206	};
207
208	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
209					     strs, sizeof(strs), token_fd));
210}
211
212static int probe_kern_array_mmap(int token_fd)
213{
214	LIBBPF_OPTS(bpf_map_create_opts, opts,
215		.map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0),
216		.token_fd = token_fd,
217	);
218	int fd;
219
220	fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
221	return probe_fd(fd);
222}
223
224static int probe_kern_exp_attach_type(int token_fd)
225{
226	LIBBPF_OPTS(bpf_prog_load_opts, opts,
227		.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
228		.token_fd = token_fd,
229		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
230	);
231	struct bpf_insn insns[] = {
232		BPF_MOV64_IMM(BPF_REG_0, 0),
233		BPF_EXIT_INSN(),
234	};
235	int fd, insn_cnt = ARRAY_SIZE(insns);
236
237	/* use any valid combination of program type and (optional)
238	 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
239	 * to see if kernel supports expected_attach_type field for
240	 * BPF_PROG_LOAD command
241	 */
242	fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
243	return probe_fd(fd);
244}
245
246static int probe_kern_probe_read_kernel(int token_fd)
247{
248	LIBBPF_OPTS(bpf_prog_load_opts, opts,
249		.token_fd = token_fd,
250		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
251	);
252	struct bpf_insn insns[] = {
253		BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),	/* r1 = r10 (fp) */
254		BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),	/* r1 += -8 */
255		BPF_MOV64_IMM(BPF_REG_2, 8),		/* r2 = 8 */
256		BPF_MOV64_IMM(BPF_REG_3, 0),		/* r3 = 0 */
257		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
258		BPF_EXIT_INSN(),
259	};
260	int fd, insn_cnt = ARRAY_SIZE(insns);
261
262	fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
263	return probe_fd(fd);
264}
265
266static int probe_prog_bind_map(int token_fd)
267{
 
268	struct bpf_insn insns[] = {
269		BPF_MOV64_IMM(BPF_REG_0, 0),
270		BPF_EXIT_INSN(),
271	};
272	LIBBPF_OPTS(bpf_map_create_opts, map_opts,
273		.token_fd = token_fd,
274		.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
275	);
276	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
277		.token_fd = token_fd,
278		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
279	);
280	int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
281
282	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
283	if (map < 0) {
284		ret = -errno;
285		pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
286			__func__, errstr(ret));
 
287		return ret;
288	}
289
290	prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
291	if (prog < 0) {
292		close(map);
293		return 0;
294	}
295
296	ret = bpf_prog_bind_map(prog, map, NULL);
297
298	close(map);
299	close(prog);
300
301	return ret >= 0;
302}
303
304static int probe_module_btf(int token_fd)
305{
306	static const char strs[] = "\0int";
307	__u32 types[] = {
308		/* int */
309		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
310	};
311	struct bpf_btf_info info;
312	__u32 len = sizeof(info);
313	char name[16];
314	int fd, err;
315
316	fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
317	if (fd < 0)
318		return 0; /* BTF not supported at all */
319
320	memset(&info, 0, sizeof(info));
321	info.name = ptr_to_u64(name);
322	info.name_len = sizeof(name);
323
324	/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
325	 * kernel's module BTF support coincides with support for
326	 * name/name_len fields in struct bpf_btf_info.
327	 */
328	err = bpf_btf_get_info_by_fd(fd, &info, &len);
329	close(fd);
330	return !err;
331}
332
333static int probe_perf_link(int token_fd)
334{
335	struct bpf_insn insns[] = {
336		BPF_MOV64_IMM(BPF_REG_0, 0),
337		BPF_EXIT_INSN(),
338	};
339	LIBBPF_OPTS(bpf_prog_load_opts, opts,
340		.token_fd = token_fd,
341		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
342	);
343	int prog_fd, link_fd, err;
344
345	prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
346				insns, ARRAY_SIZE(insns), &opts);
347	if (prog_fd < 0)
348		return -errno;
349
350	/* use invalid perf_event FD to get EBADF, if link is supported;
351	 * otherwise EINVAL should be returned
352	 */
353	link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
354	err = -errno; /* close() can clobber errno */
355
356	if (link_fd >= 0)
357		close(link_fd);
358	close(prog_fd);
359
360	return link_fd < 0 && err == -EBADF;
361}
362
363static int probe_uprobe_multi_link(int token_fd)
364{
365	LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
366		.expected_attach_type = BPF_TRACE_UPROBE_MULTI,
367		.token_fd = token_fd,
368		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
369	);
370	LIBBPF_OPTS(bpf_link_create_opts, link_opts);
371	struct bpf_insn insns[] = {
372		BPF_MOV64_IMM(BPF_REG_0, 0),
373		BPF_EXIT_INSN(),
374	};
375	int prog_fd, link_fd, err;
376	unsigned long offset = 0;
377
378	prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
379				insns, ARRAY_SIZE(insns), &load_opts);
380	if (prog_fd < 0)
381		return -errno;
382
383	/* Creating uprobe in '/' binary should fail with -EBADF. */
384	link_opts.uprobe_multi.path = "/";
385	link_opts.uprobe_multi.offsets = &offset;
386	link_opts.uprobe_multi.cnt = 1;
387
388	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
389	err = -errno; /* close() can clobber errno */
390
391	if (link_fd >= 0 || err != -EBADF) {
392		if (link_fd >= 0)
393			close(link_fd);
394		close(prog_fd);
395		return 0;
396	}
397
398	/* Initial multi-uprobe support in kernel didn't handle PID filtering
399	 * correctly (it was doing thread filtering, not process filtering).
400	 * So now we'll detect if PID filtering logic was fixed, and, if not,
401	 * we'll pretend multi-uprobes are not supported, if not.
402	 * Multi-uprobes are used in USDT attachment logic, and we need to be
403	 * conservative here, because multi-uprobe selection happens early at
404	 * load time, while the use of PID filtering is known late at
405	 * attachment time, at which point it's too late to undo multi-uprobe
406	 * selection.
407	 *
408	 * Creating uprobe with pid == -1 for (invalid) '/' binary will fail
409	 * early with -EINVAL on kernels with fixed PID filtering logic;
410	 * otherwise -ESRCH would be returned if passed correct binary path
411	 * (but we'll just get -BADF, of course).
412	 */
413	link_opts.uprobe_multi.pid = -1; /* invalid PID */
414	link_opts.uprobe_multi.path = "/"; /* invalid path */
415	link_opts.uprobe_multi.offsets = &offset;
416	link_opts.uprobe_multi.cnt = 1;
417
418	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
419	err = -errno; /* close() can clobber errno */
420
421	if (link_fd >= 0)
422		close(link_fd);
423	close(prog_fd);
424
425	return link_fd < 0 && err == -EINVAL;
426}
427
428static int probe_kern_bpf_cookie(int token_fd)
429{
430	struct bpf_insn insns[] = {
431		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
432		BPF_EXIT_INSN(),
433	};
434	LIBBPF_OPTS(bpf_prog_load_opts, opts,
435		.token_fd = token_fd,
436		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
437	);
438	int ret, insn_cnt = ARRAY_SIZE(insns);
439
440	ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
441	return probe_fd(ret);
442}
443
444static int probe_kern_btf_enum64(int token_fd)
445{
446	static const char strs[] = "\0enum64";
447	__u32 types[] = {
448		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
449	};
450
451	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
452					     strs, sizeof(strs), token_fd));
453}
454
455static int probe_kern_arg_ctx_tag(int token_fd)
456{
457	static const char strs[] = "\0a\0b\0arg:ctx\0";
458	const __u32 types[] = {
459		/* [1] INT */
460		BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
461		/* [2] PTR -> VOID */
462		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
463		/* [3] FUNC_PROTO `int(void *a)` */
464		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
465		BTF_PARAM_ENC(1 /* "a" */, 2),
466		/* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
467		BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
468		/* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
469		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
470		BTF_PARAM_ENC(3 /* "b" */, 2),
471		/* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
472		BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
473		/* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
474		BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
475	};
476	const struct bpf_insn insns[] = {
477		/* main prog */
478		BPF_CALL_REL(+1),
479		BPF_EXIT_INSN(),
480		/* global subprog */
481		BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
482		BPF_EXIT_INSN(),
483	};
484	const struct bpf_func_info_min func_infos[] = {
485		{ 0, 4 }, /* main prog -> FUNC 'a' */
486		{ 2, 6 }, /* subprog -> FUNC 'b' */
487	};
488	LIBBPF_OPTS(bpf_prog_load_opts, opts,
489		.token_fd = token_fd,
490		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
491	);
492	int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
493
494	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
495	if (btf_fd < 0)
496		return 0;
497
498	opts.prog_btf_fd = btf_fd;
499	opts.func_info = &func_infos;
500	opts.func_info_cnt = ARRAY_SIZE(func_infos);
501	opts.func_info_rec_size = sizeof(func_infos[0]);
502
503	prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
504				"GPL", insns, insn_cnt, &opts);
505	close(btf_fd);
506
507	return probe_fd(prog_fd);
508}
509
510typedef int (*feature_probe_fn)(int /* token_fd */);
511
512static struct kern_feature_cache feature_cache;
513
514static struct kern_feature_desc {
515	const char *desc;
516	feature_probe_fn probe;
517} feature_probes[__FEAT_CNT] = {
518	[FEAT_PROG_NAME] = {
519		"BPF program name", probe_kern_prog_name,
520	},
521	[FEAT_GLOBAL_DATA] = {
522		"global variables", probe_kern_global_data,
523	},
524	[FEAT_BTF] = {
525		"minimal BTF", probe_kern_btf,
526	},
527	[FEAT_BTF_FUNC] = {
528		"BTF functions", probe_kern_btf_func,
529	},
530	[FEAT_BTF_GLOBAL_FUNC] = {
531		"BTF global function", probe_kern_btf_func_global,
532	},
533	[FEAT_BTF_DATASEC] = {
534		"BTF data section and variable", probe_kern_btf_datasec,
535	},
536	[FEAT_ARRAY_MMAP] = {
537		"ARRAY map mmap()", probe_kern_array_mmap,
538	},
539	[FEAT_EXP_ATTACH_TYPE] = {
540		"BPF_PROG_LOAD expected_attach_type attribute",
541		probe_kern_exp_attach_type,
542	},
543	[FEAT_PROBE_READ_KERN] = {
544		"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
545	},
546	[FEAT_PROG_BIND_MAP] = {
547		"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
548	},
549	[FEAT_MODULE_BTF] = {
550		"module BTF support", probe_module_btf,
551	},
552	[FEAT_BTF_FLOAT] = {
553		"BTF_KIND_FLOAT support", probe_kern_btf_float,
554	},
555	[FEAT_PERF_LINK] = {
556		"BPF perf link support", probe_perf_link,
557	},
558	[FEAT_BTF_DECL_TAG] = {
559		"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
560	},
561	[FEAT_BTF_TYPE_TAG] = {
562		"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
563	},
564	[FEAT_MEMCG_ACCOUNT] = {
565		"memcg-based memory accounting", probe_memcg_account,
566	},
567	[FEAT_BPF_COOKIE] = {
568		"BPF cookie support", probe_kern_bpf_cookie,
569	},
570	[FEAT_BTF_ENUM64] = {
571		"BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
572	},
573	[FEAT_SYSCALL_WRAPPER] = {
574		"Kernel using syscall wrapper", probe_kern_syscall_wrapper,
575	},
576	[FEAT_UPROBE_MULTI_LINK] = {
577		"BPF multi-uprobe link support", probe_uprobe_multi_link,
578	},
579	[FEAT_ARG_CTX_TAG] = {
580		"kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
581	},
582	[FEAT_BTF_QMARK_DATASEC] = {
583		"BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
584	},
585};
586
587bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
588{
589	struct kern_feature_desc *feat = &feature_probes[feat_id];
590	int ret;
591
592	/* assume global feature cache, unless custom one is provided */
593	if (!cache)
594		cache = &feature_cache;
595
596	if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
597		ret = feat->probe(cache->token_fd);
598		if (ret > 0) {
599			WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
600		} else if (ret == 0) {
601			WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
602		} else {
603			pr_warn("Detection of kernel %s support failed: %s\n",
604				feat->desc, errstr(ret));
605			WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
606		}
607	}
608
609	return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
610}