Loading...
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
3/*
4 * common eBPF ELF operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation;
13 * version 2.1 of the License (not later!)
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this program; if not, see <http://www.gnu.org/licenses>
22 */
23#ifndef __LIBBPF_BPF_H
24#define __LIBBPF_BPF_H
25
26#include <linux/bpf.h>
27#include <stdbool.h>
28#include <stddef.h>
29#include <stdint.h>
30
31#include "libbpf_common.h"
32#include "libbpf_legacy.h"
33
34#ifdef __cplusplus
35extern "C" {
36#endif
37
38int libbpf_set_memlock_rlim(size_t memlock_bytes);
39
40struct bpf_map_create_opts {
41 size_t sz; /* size of this struct for forward/backward compatibility */
42
43 __u32 btf_fd;
44 __u32 btf_key_type_id;
45 __u32 btf_value_type_id;
46 __u32 btf_vmlinux_value_type_id;
47
48 __u32 inner_map_fd;
49 __u32 map_flags;
50 __u64 map_extra;
51
52 __u32 numa_node;
53 __u32 map_ifindex;
54};
55#define bpf_map_create_opts__last_field map_ifindex
56
57LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
58 const char *map_name,
59 __u32 key_size,
60 __u32 value_size,
61 __u32 max_entries,
62 const struct bpf_map_create_opts *opts);
63
64struct bpf_prog_load_opts {
65 size_t sz; /* size of this struct for forward/backward compatibility */
66
67 /* libbpf can retry BPF_PROG_LOAD command if bpf() syscall returns
68 * -EAGAIN. This field determines how many attempts libbpf has to
69 * make. If not specified, libbpf will use default value of 5.
70 */
71 int attempts;
72
73 enum bpf_attach_type expected_attach_type;
74 __u32 prog_btf_fd;
75 __u32 prog_flags;
76 __u32 prog_ifindex;
77 __u32 kern_version;
78
79 __u32 attach_btf_id;
80 __u32 attach_prog_fd;
81 __u32 attach_btf_obj_fd;
82
83 const int *fd_array;
84
85 /* .BTF.ext func info data */
86 const void *func_info;
87 __u32 func_info_cnt;
88 __u32 func_info_rec_size;
89
90 /* .BTF.ext line info data */
91 const void *line_info;
92 __u32 line_info_cnt;
93 __u32 line_info_rec_size;
94
95 /* verifier log options */
96 __u32 log_level;
97 __u32 log_size;
98 char *log_buf;
99};
100#define bpf_prog_load_opts__last_field log_buf
101
102LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
103 const char *prog_name, const char *license,
104 const struct bpf_insn *insns, size_t insn_cnt,
105 const struct bpf_prog_load_opts *opts);
106
107/* Flags to direct loading requirements */
108#define MAPS_RELAX_COMPAT 0x01
109
110/* Recommended log buffer size */
111#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
112
113struct bpf_btf_load_opts {
114 size_t sz; /* size of this struct for forward/backward compatibility */
115
116 /* kernel log options */
117 char *log_buf;
118 __u32 log_level;
119 __u32 log_size;
120};
121#define bpf_btf_load_opts__last_field log_size
122
123LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
124 const struct bpf_btf_load_opts *opts);
125
126LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
127 __u64 flags);
128
129LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value);
130LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value,
131 __u64 flags);
132LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
133 void *value);
134LIBBPF_API int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key,
135 void *value, __u64 flags);
136LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
137LIBBPF_API int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags);
138LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
139LIBBPF_API int bpf_map_freeze(int fd);
140
141struct bpf_map_batch_opts {
142 size_t sz; /* size of this struct for forward/backward compatibility */
143 __u64 elem_flags;
144 __u64 flags;
145};
146#define bpf_map_batch_opts__last_field flags
147
148
149/**
150 * @brief **bpf_map_delete_batch()** allows for batch deletion of multiple
151 * elements in a BPF map.
152 *
153 * @param fd BPF map file descriptor
154 * @param keys pointer to an array of *count* keys
155 * @param count input and output parameter; on input **count** represents the
156 * number of elements in the map to delete in batch;
157 * on output if a non-EFAULT error is returned, **count** represents the number of deleted
158 * elements if the output **count** value is not equal to the input **count** value
159 * If EFAULT is returned, **count** should not be trusted to be correct.
160 * @param opts options for configuring the way the batch deletion works
161 * @return 0, on success; negative error code, otherwise (errno is also set to
162 * the error code)
163 */
164LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys,
165 __u32 *count,
166 const struct bpf_map_batch_opts *opts);
167
168/**
169 * @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements.
170 *
171 * The parameter *in_batch* is the address of the first element in the batch to read.
172 * *out_batch* is an output parameter that should be passed as *in_batch* to subsequent
173 * calls to **bpf_map_lookup_batch()**. NULL can be passed for *in_batch* to indicate
174 * that the batched lookup starts from the beginning of the map.
175 *
176 * The *keys* and *values* are output parameters which must point to memory large enough to
177 * hold *count* items based on the key and value size of the map *map_fd*. The *keys*
178 * buffer must be of *key_size* * *count*. The *values* buffer must be of
179 * *value_size* * *count*.
180 *
181 * @param fd BPF map file descriptor
182 * @param in_batch address of the first element in batch to read, can pass NULL to
183 * indicate that the batched lookup starts from the beginning of the map.
184 * @param out_batch output parameter that should be passed to next call as *in_batch*
185 * @param keys pointer to an array large enough for *count* keys
186 * @param values pointer to an array large enough for *count* values
187 * @param count input and output parameter; on input it's the number of elements
188 * in the map to read in batch; on output it's the number of elements that were
189 * successfully read.
190 * If a non-EFAULT error is returned, count will be set as the number of elements
191 * that were read before the error occurred.
192 * If EFAULT is returned, **count** should not be trusted to be correct.
193 * @param opts options for configuring the way the batch lookup works
194 * @return 0, on success; negative error code, otherwise (errno is also set to
195 * the error code)
196 */
197LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch,
198 void *keys, void *values, __u32 *count,
199 const struct bpf_map_batch_opts *opts);
200
201/**
202 * @brief **bpf_map_lookup_and_delete_batch()** allows for batch lookup and deletion
203 * of BPF map elements where each element is deleted after being retrieved.
204 *
205 * @param fd BPF map file descriptor
206 * @param in_batch address of the first element in batch to read, can pass NULL to
207 * get address of the first element in *out_batch*
208 * @param out_batch output parameter that should be passed to next call as *in_batch*
209 * @param keys pointer to an array of *count* keys
210 * @param values pointer to an array large enough for *count* values
211 * @param count input and output parameter; on input it's the number of elements
212 * in the map to read and delete in batch; on output it represents the number of
213 * elements that were successfully read and deleted
214 * If a non-**EFAULT** error code is returned and if the output **count** value
215 * is not equal to the input **count** value, up to **count** elements may
216 * have been deleted.
217 * if **EFAULT** is returned up to *count* elements may have been deleted without
218 * being returned via the *keys* and *values* output parameters.
219 * @param opts options for configuring the way the batch lookup and delete works
220 * @return 0, on success; negative error code, otherwise (errno is also set to
221 * the error code)
222 */
223LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch,
224 void *out_batch, void *keys,
225 void *values, __u32 *count,
226 const struct bpf_map_batch_opts *opts);
227
228/**
229 * @brief **bpf_map_update_batch()** updates multiple elements in a map
230 * by specifying keys and their corresponding values.
231 *
232 * The *keys* and *values* parameters must point to memory large enough
233 * to hold *count* items based on the key and value size of the map.
234 *
235 * The *opts* parameter can be used to control how *bpf_map_update_batch()*
236 * should handle keys that either do or do not already exist in the map.
237 * In particular the *flags* parameter of *bpf_map_batch_opts* can be
238 * one of the following:
239 *
240 * Note that *count* is an input and output parameter, where on output it
241 * represents how many elements were successfully updated. Also note that if
242 * **EFAULT** then *count* should not be trusted to be correct.
243 *
244 * **BPF_ANY**
245 * Create new elements or update existing.
246 *
247 * **BPF_NOEXIST**
248 * Create new elements only if they do not exist.
249 *
250 * **BPF_EXIST**
251 * Update existing elements.
252 *
253 * **BPF_F_LOCK**
254 * Update spin_lock-ed map elements. This must be
255 * specified if the map value contains a spinlock.
256 *
257 * @param fd BPF map file descriptor
258 * @param keys pointer to an array of *count* keys
259 * @param values pointer to an array of *count* values
260 * @param count input and output parameter; on input it's the number of elements
261 * in the map to update in batch; on output if a non-EFAULT error is returned,
262 * **count** represents the number of updated elements if the output **count**
263 * value is not equal to the input **count** value.
264 * If EFAULT is returned, **count** should not be trusted to be correct.
265 * @param opts options for configuring the way the batch update works
266 * @return 0, on success; negative error code, otherwise (errno is also set to
267 * the error code)
268 */
269LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values,
270 __u32 *count,
271 const struct bpf_map_batch_opts *opts);
272
273struct bpf_obj_get_opts {
274 size_t sz; /* size of this struct for forward/backward compatibility */
275
276 __u32 file_flags;
277
278 size_t :0;
279};
280#define bpf_obj_get_opts__last_field file_flags
281
282LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
283LIBBPF_API int bpf_obj_get(const char *pathname);
284LIBBPF_API int bpf_obj_get_opts(const char *pathname,
285 const struct bpf_obj_get_opts *opts);
286
287struct bpf_prog_attach_opts {
288 size_t sz; /* size of this struct for forward/backward compatibility */
289 unsigned int flags;
290 int replace_prog_fd;
291};
292#define bpf_prog_attach_opts__last_field replace_prog_fd
293
294LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
295 enum bpf_attach_type type, unsigned int flags);
296LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int attachable_fd,
297 enum bpf_attach_type type,
298 const struct bpf_prog_attach_opts *opts);
299LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
300LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
301 enum bpf_attach_type type);
302
303union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */
304struct bpf_link_create_opts {
305 size_t sz; /* size of this struct for forward/backward compatibility */
306 __u32 flags;
307 union bpf_iter_link_info *iter_info;
308 __u32 iter_info_len;
309 __u32 target_btf_id;
310 union {
311 struct {
312 __u64 bpf_cookie;
313 } perf_event;
314 struct {
315 __u32 flags;
316 __u32 cnt;
317 const char **syms;
318 const unsigned long *addrs;
319 const __u64 *cookies;
320 } kprobe_multi;
321 struct {
322 __u64 cookie;
323 } tracing;
324 };
325 size_t :0;
326};
327#define bpf_link_create_opts__last_field kprobe_multi.cookies
328
329LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
330 enum bpf_attach_type attach_type,
331 const struct bpf_link_create_opts *opts);
332
333LIBBPF_API int bpf_link_detach(int link_fd);
334
335struct bpf_link_update_opts {
336 size_t sz; /* size of this struct for forward/backward compatibility */
337 __u32 flags; /* extra flags */
338 __u32 old_prog_fd; /* expected old program FD */
339};
340#define bpf_link_update_opts__last_field old_prog_fd
341
342LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd,
343 const struct bpf_link_update_opts *opts);
344
345LIBBPF_API int bpf_iter_create(int link_fd);
346
347struct bpf_prog_test_run_attr {
348 int prog_fd;
349 int repeat;
350 const void *data_in;
351 __u32 data_size_in;
352 void *data_out; /* optional */
353 __u32 data_size_out; /* in: max length of data_out
354 * out: length of data_out */
355 __u32 retval; /* out: return code of the BPF program */
356 __u32 duration; /* out: average per repetition in ns */
357 const void *ctx_in; /* optional */
358 __u32 ctx_size_in;
359 void *ctx_out; /* optional */
360 __u32 ctx_size_out; /* in: max length of ctx_out
361 * out: length of cxt_out */
362};
363
364LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
365LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
366LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id);
367LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id);
368
369struct bpf_get_fd_by_id_opts {
370 size_t sz; /* size of this struct for forward/backward compatibility */
371 __u32 open_flags; /* permissions requested for the operation on fd */
372 size_t :0;
373};
374#define bpf_get_fd_by_id_opts__last_field open_flags
375
376LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
377LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id,
378 const struct bpf_get_fd_by_id_opts *opts);
379LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
380LIBBPF_API int bpf_map_get_fd_by_id_opts(__u32 id,
381 const struct bpf_get_fd_by_id_opts *opts);
382LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
383LIBBPF_API int bpf_btf_get_fd_by_id_opts(__u32 id,
384 const struct bpf_get_fd_by_id_opts *opts);
385LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
386LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id,
387 const struct bpf_get_fd_by_id_opts *opts);
388LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
389
390struct bpf_prog_query_opts {
391 size_t sz; /* size of this struct for forward/backward compatibility */
392 __u32 query_flags;
393 __u32 attach_flags; /* output argument */
394 __u32 *prog_ids;
395 __u32 prog_cnt; /* input+output argument */
396 __u32 *prog_attach_flags;
397};
398#define bpf_prog_query_opts__last_field prog_attach_flags
399
400LIBBPF_API int bpf_prog_query_opts(int target_fd,
401 enum bpf_attach_type type,
402 struct bpf_prog_query_opts *opts);
403LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
404 __u32 query_flags, __u32 *attach_flags,
405 __u32 *prog_ids, __u32 *prog_cnt);
406
407LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
408LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
409 __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
410 __u64 *probe_offset, __u64 *probe_addr);
411
412#ifdef __cplusplus
413/* forward-declaring enums in C++ isn't compatible with pure C enums, so
414 * instead define bpf_enable_stats() as accepting int as an input
415 */
416LIBBPF_API int bpf_enable_stats(int type);
417#else
418enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
419LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
420#endif
421
422struct bpf_prog_bind_opts {
423 size_t sz; /* size of this struct for forward/backward compatibility */
424 __u32 flags;
425};
426#define bpf_prog_bind_opts__last_field flags
427
428LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
429 const struct bpf_prog_bind_opts *opts);
430
431struct bpf_test_run_opts {
432 size_t sz; /* size of this struct for forward/backward compatibility */
433 const void *data_in; /* optional */
434 void *data_out; /* optional */
435 __u32 data_size_in;
436 __u32 data_size_out; /* in: max length of data_out
437 * out: length of data_out
438 */
439 const void *ctx_in; /* optional */
440 void *ctx_out; /* optional */
441 __u32 ctx_size_in;
442 __u32 ctx_size_out; /* in: max length of ctx_out
443 * out: length of cxt_out
444 */
445 __u32 retval; /* out: return code of the BPF program */
446 int repeat;
447 __u32 duration; /* out: average per repetition in ns */
448 __u32 flags;
449 __u32 cpu;
450 __u32 batch_size;
451};
452#define bpf_test_run_opts__last_field batch_size
453
454LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
455 struct bpf_test_run_opts *opts);
456
457#ifdef __cplusplus
458} /* extern "C" */
459#endif
460
461#endif /* __LIBBPF_BPF_H */
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
3/*
4 * Common BPF ELF operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation;
13 * version 2.1 of the License (not later!)
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this program; if not, see <http://www.gnu.org/licenses>
22 */
23#ifndef __LIBBPF_BPF_H
24#define __LIBBPF_BPF_H
25
26#include <linux/bpf.h>
27#include <stdbool.h>
28#include <stddef.h>
29#include <stdint.h>
30
31#include "libbpf_common.h"
32#include "libbpf_legacy.h"
33
34#ifdef __cplusplus
35extern "C" {
36#endif
37
38int libbpf_set_memlock_rlim(size_t memlock_bytes);
39
40struct bpf_map_create_opts {
41 size_t sz; /* size of this struct for forward/backward compatibility */
42
43 __u32 btf_fd;
44 __u32 btf_key_type_id;
45 __u32 btf_value_type_id;
46 __u32 btf_vmlinux_value_type_id;
47
48 __u32 inner_map_fd;
49 __u32 map_flags;
50 __u64 map_extra;
51
52 __u32 numa_node;
53 __u32 map_ifindex;
54};
55#define bpf_map_create_opts__last_field map_ifindex
56
57LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
58 const char *map_name,
59 __u32 key_size,
60 __u32 value_size,
61 __u32 max_entries,
62 const struct bpf_map_create_opts *opts);
63
64struct bpf_prog_load_opts {
65 size_t sz; /* size of this struct for forward/backward compatibility */
66
67 /* libbpf can retry BPF_PROG_LOAD command if bpf() syscall returns
68 * -EAGAIN. This field determines how many attempts libbpf has to
69 * make. If not specified, libbpf will use default value of 5.
70 */
71 int attempts;
72
73 enum bpf_attach_type expected_attach_type;
74 __u32 prog_btf_fd;
75 __u32 prog_flags;
76 __u32 prog_ifindex;
77 __u32 kern_version;
78
79 __u32 attach_btf_id;
80 __u32 attach_prog_fd;
81 __u32 attach_btf_obj_fd;
82
83 const int *fd_array;
84
85 /* .BTF.ext func info data */
86 const void *func_info;
87 __u32 func_info_cnt;
88 __u32 func_info_rec_size;
89
90 /* .BTF.ext line info data */
91 const void *line_info;
92 __u32 line_info_cnt;
93 __u32 line_info_rec_size;
94
95 /* verifier log options */
96 __u32 log_level;
97 __u32 log_size;
98 char *log_buf;
99 /* output: actual total log contents size (including termintaing zero).
100 * It could be both larger than original log_size (if log was
101 * truncated), or smaller (if log buffer wasn't filled completely).
102 * If kernel doesn't support this feature, log_size is left unchanged.
103 */
104 __u32 log_true_size;
105 size_t :0;
106};
107#define bpf_prog_load_opts__last_field log_true_size
108
109LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
110 const char *prog_name, const char *license,
111 const struct bpf_insn *insns, size_t insn_cnt,
112 struct bpf_prog_load_opts *opts);
113
114/* Flags to direct loading requirements */
115#define MAPS_RELAX_COMPAT 0x01
116
117/* Recommended log buffer size */
118#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
119
120struct bpf_btf_load_opts {
121 size_t sz; /* size of this struct for forward/backward compatibility */
122
123 /* kernel log options */
124 char *log_buf;
125 __u32 log_level;
126 __u32 log_size;
127 /* output: actual total log contents size (including termintaing zero).
128 * It could be both larger than original log_size (if log was
129 * truncated), or smaller (if log buffer wasn't filled completely).
130 * If kernel doesn't support this feature, log_size is left unchanged.
131 */
132 __u32 log_true_size;
133 size_t :0;
134};
135#define bpf_btf_load_opts__last_field log_true_size
136
137LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
138 struct bpf_btf_load_opts *opts);
139
140LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
141 __u64 flags);
142
143LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value);
144LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value,
145 __u64 flags);
146LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
147 void *value);
148LIBBPF_API int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key,
149 void *value, __u64 flags);
150LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
151LIBBPF_API int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags);
152LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
153LIBBPF_API int bpf_map_freeze(int fd);
154
155struct bpf_map_batch_opts {
156 size_t sz; /* size of this struct for forward/backward compatibility */
157 __u64 elem_flags;
158 __u64 flags;
159};
160#define bpf_map_batch_opts__last_field flags
161
162
163/**
164 * @brief **bpf_map_delete_batch()** allows for batch deletion of multiple
165 * elements in a BPF map.
166 *
167 * @param fd BPF map file descriptor
168 * @param keys pointer to an array of *count* keys
169 * @param count input and output parameter; on input **count** represents the
170 * number of elements in the map to delete in batch;
171 * on output if a non-EFAULT error is returned, **count** represents the number of deleted
172 * elements if the output **count** value is not equal to the input **count** value
173 * If EFAULT is returned, **count** should not be trusted to be correct.
174 * @param opts options for configuring the way the batch deletion works
175 * @return 0, on success; negative error code, otherwise (errno is also set to
176 * the error code)
177 */
178LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys,
179 __u32 *count,
180 const struct bpf_map_batch_opts *opts);
181
182/**
183 * @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements.
184 *
185 * The parameter *in_batch* is the address of the first element in the batch to read.
186 * *out_batch* is an output parameter that should be passed as *in_batch* to subsequent
187 * calls to **bpf_map_lookup_batch()**. NULL can be passed for *in_batch* to indicate
188 * that the batched lookup starts from the beginning of the map.
189 *
190 * The *keys* and *values* are output parameters which must point to memory large enough to
191 * hold *count* items based on the key and value size of the map *map_fd*. The *keys*
192 * buffer must be of *key_size* * *count*. The *values* buffer must be of
193 * *value_size* * *count*.
194 *
195 * @param fd BPF map file descriptor
196 * @param in_batch address of the first element in batch to read, can pass NULL to
197 * indicate that the batched lookup starts from the beginning of the map.
198 * @param out_batch output parameter that should be passed to next call as *in_batch*
199 * @param keys pointer to an array large enough for *count* keys
200 * @param values pointer to an array large enough for *count* values
201 * @param count input and output parameter; on input it's the number of elements
202 * in the map to read in batch; on output it's the number of elements that were
203 * successfully read.
204 * If a non-EFAULT error is returned, count will be set as the number of elements
205 * that were read before the error occurred.
206 * If EFAULT is returned, **count** should not be trusted to be correct.
207 * @param opts options for configuring the way the batch lookup works
208 * @return 0, on success; negative error code, otherwise (errno is also set to
209 * the error code)
210 */
211LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch,
212 void *keys, void *values, __u32 *count,
213 const struct bpf_map_batch_opts *opts);
214
215/**
216 * @brief **bpf_map_lookup_and_delete_batch()** allows for batch lookup and deletion
217 * of BPF map elements where each element is deleted after being retrieved.
218 *
219 * @param fd BPF map file descriptor
220 * @param in_batch address of the first element in batch to read, can pass NULL to
221 * get address of the first element in *out_batch*
222 * @param out_batch output parameter that should be passed to next call as *in_batch*
223 * @param keys pointer to an array of *count* keys
224 * @param values pointer to an array large enough for *count* values
225 * @param count input and output parameter; on input it's the number of elements
226 * in the map to read and delete in batch; on output it represents the number of
227 * elements that were successfully read and deleted
228 * If a non-**EFAULT** error code is returned and if the output **count** value
229 * is not equal to the input **count** value, up to **count** elements may
230 * have been deleted.
231 * if **EFAULT** is returned up to *count* elements may have been deleted without
232 * being returned via the *keys* and *values* output parameters.
233 * @param opts options for configuring the way the batch lookup and delete works
234 * @return 0, on success; negative error code, otherwise (errno is also set to
235 * the error code)
236 */
237LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch,
238 void *out_batch, void *keys,
239 void *values, __u32 *count,
240 const struct bpf_map_batch_opts *opts);
241
242/**
243 * @brief **bpf_map_update_batch()** updates multiple elements in a map
244 * by specifying keys and their corresponding values.
245 *
246 * The *keys* and *values* parameters must point to memory large enough
247 * to hold *count* items based on the key and value size of the map.
248 *
249 * The *opts* parameter can be used to control how *bpf_map_update_batch()*
250 * should handle keys that either do or do not already exist in the map.
251 * In particular the *flags* parameter of *bpf_map_batch_opts* can be
252 * one of the following:
253 *
254 * Note that *count* is an input and output parameter, where on output it
255 * represents how many elements were successfully updated. Also note that if
256 * **EFAULT** then *count* should not be trusted to be correct.
257 *
258 * **BPF_ANY**
259 * Create new elements or update existing.
260 *
261 * **BPF_NOEXIST**
262 * Create new elements only if they do not exist.
263 *
264 * **BPF_EXIST**
265 * Update existing elements.
266 *
267 * **BPF_F_LOCK**
268 * Update spin_lock-ed map elements. This must be
269 * specified if the map value contains a spinlock.
270 *
271 * @param fd BPF map file descriptor
272 * @param keys pointer to an array of *count* keys
273 * @param values pointer to an array of *count* values
274 * @param count input and output parameter; on input it's the number of elements
275 * in the map to update in batch; on output if a non-EFAULT error is returned,
276 * **count** represents the number of updated elements if the output **count**
277 * value is not equal to the input **count** value.
278 * If EFAULT is returned, **count** should not be trusted to be correct.
279 * @param opts options for configuring the way the batch update works
280 * @return 0, on success; negative error code, otherwise (errno is also set to
281 * the error code)
282 */
283LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values,
284 __u32 *count,
285 const struct bpf_map_batch_opts *opts);
286
287struct bpf_obj_pin_opts {
288 size_t sz; /* size of this struct for forward/backward compatibility */
289
290 __u32 file_flags;
291 int path_fd;
292
293 size_t :0;
294};
295#define bpf_obj_pin_opts__last_field path_fd
296
297LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
298LIBBPF_API int bpf_obj_pin_opts(int fd, const char *pathname,
299 const struct bpf_obj_pin_opts *opts);
300
301struct bpf_obj_get_opts {
302 size_t sz; /* size of this struct for forward/backward compatibility */
303
304 __u32 file_flags;
305 int path_fd;
306
307 size_t :0;
308};
309#define bpf_obj_get_opts__last_field path_fd
310
311LIBBPF_API int bpf_obj_get(const char *pathname);
312LIBBPF_API int bpf_obj_get_opts(const char *pathname,
313 const struct bpf_obj_get_opts *opts);
314
315LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
316 enum bpf_attach_type type, unsigned int flags);
317LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
318LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
319 enum bpf_attach_type type);
320
321struct bpf_prog_attach_opts {
322 size_t sz; /* size of this struct for forward/backward compatibility */
323 __u32 flags;
324 union {
325 int replace_prog_fd;
326 int replace_fd;
327 };
328 int relative_fd;
329 __u32 relative_id;
330 __u64 expected_revision;
331 size_t :0;
332};
333#define bpf_prog_attach_opts__last_field expected_revision
334
335struct bpf_prog_detach_opts {
336 size_t sz; /* size of this struct for forward/backward compatibility */
337 __u32 flags;
338 int relative_fd;
339 __u32 relative_id;
340 __u64 expected_revision;
341 size_t :0;
342};
343#define bpf_prog_detach_opts__last_field expected_revision
344
345/**
346 * @brief **bpf_prog_attach_opts()** attaches the BPF program corresponding to
347 * *prog_fd* to a *target* which can represent a file descriptor or netdevice
348 * ifindex.
349 *
350 * @param prog_fd BPF program file descriptor
351 * @param target attach location file descriptor or ifindex
352 * @param type attach type for the BPF program
353 * @param opts options for configuring the attachment
354 * @return 0, on success; negative error code, otherwise (errno is also set to
355 * the error code)
356 */
357LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int target,
358 enum bpf_attach_type type,
359 const struct bpf_prog_attach_opts *opts);
360
361/**
362 * @brief **bpf_prog_detach_opts()** detaches the BPF program corresponding to
363 * *prog_fd* from a *target* which can represent a file descriptor or netdevice
364 * ifindex.
365 *
366 * @param prog_fd BPF program file descriptor
367 * @param target detach location file descriptor or ifindex
368 * @param type detach type for the BPF program
369 * @param opts options for configuring the detachment
370 * @return 0, on success; negative error code, otherwise (errno is also set to
371 * the error code)
372 */
373LIBBPF_API int bpf_prog_detach_opts(int prog_fd, int target,
374 enum bpf_attach_type type,
375 const struct bpf_prog_detach_opts *opts);
376
377union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */
378struct bpf_link_create_opts {
379 size_t sz; /* size of this struct for forward/backward compatibility */
380 __u32 flags;
381 union bpf_iter_link_info *iter_info;
382 __u32 iter_info_len;
383 __u32 target_btf_id;
384 union {
385 struct {
386 __u64 bpf_cookie;
387 } perf_event;
388 struct {
389 __u32 flags;
390 __u32 cnt;
391 const char **syms;
392 const unsigned long *addrs;
393 const __u64 *cookies;
394 } kprobe_multi;
395 struct {
396 __u32 flags;
397 __u32 cnt;
398 const char *path;
399 const unsigned long *offsets;
400 const unsigned long *ref_ctr_offsets;
401 const __u64 *cookies;
402 __u32 pid;
403 } uprobe_multi;
404 struct {
405 __u64 cookie;
406 } tracing;
407 struct {
408 __u32 pf;
409 __u32 hooknum;
410 __s32 priority;
411 __u32 flags;
412 } netfilter;
413 struct {
414 __u32 relative_fd;
415 __u32 relative_id;
416 __u64 expected_revision;
417 } tcx;
418 struct {
419 __u32 relative_fd;
420 __u32 relative_id;
421 __u64 expected_revision;
422 } netkit;
423 };
424 size_t :0;
425};
426#define bpf_link_create_opts__last_field uprobe_multi.pid
427
428LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
429 enum bpf_attach_type attach_type,
430 const struct bpf_link_create_opts *opts);
431
432LIBBPF_API int bpf_link_detach(int link_fd);
433
434struct bpf_link_update_opts {
435 size_t sz; /* size of this struct for forward/backward compatibility */
436 __u32 flags; /* extra flags */
437 __u32 old_prog_fd; /* expected old program FD */
438 __u32 old_map_fd; /* expected old map FD */
439};
440#define bpf_link_update_opts__last_field old_map_fd
441
442LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd,
443 const struct bpf_link_update_opts *opts);
444
445LIBBPF_API int bpf_iter_create(int link_fd);
446
447struct bpf_prog_test_run_attr {
448 int prog_fd;
449 int repeat;
450 const void *data_in;
451 __u32 data_size_in;
452 void *data_out; /* optional */
453 __u32 data_size_out; /* in: max length of data_out
454 * out: length of data_out */
455 __u32 retval; /* out: return code of the BPF program */
456 __u32 duration; /* out: average per repetition in ns */
457 const void *ctx_in; /* optional */
458 __u32 ctx_size_in;
459 void *ctx_out; /* optional */
460 __u32 ctx_size_out; /* in: max length of ctx_out
461 * out: length of cxt_out */
462};
463
464LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
465LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
466LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id);
467LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id);
468
469struct bpf_get_fd_by_id_opts {
470 size_t sz; /* size of this struct for forward/backward compatibility */
471 __u32 open_flags; /* permissions requested for the operation on fd */
472 size_t :0;
473};
474#define bpf_get_fd_by_id_opts__last_field open_flags
475
476LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
477LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id,
478 const struct bpf_get_fd_by_id_opts *opts);
479LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
480LIBBPF_API int bpf_map_get_fd_by_id_opts(__u32 id,
481 const struct bpf_get_fd_by_id_opts *opts);
482LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
483LIBBPF_API int bpf_btf_get_fd_by_id_opts(__u32 id,
484 const struct bpf_get_fd_by_id_opts *opts);
485LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
486LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id,
487 const struct bpf_get_fd_by_id_opts *opts);
488LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
489
490/**
491 * @brief **bpf_prog_get_info_by_fd()** obtains information about the BPF
492 * program corresponding to *prog_fd*.
493 *
494 * Populates up to *info_len* bytes of *info* and updates *info_len* with the
495 * actual number of bytes written to *info*.
496 *
497 * @param prog_fd BPF program file descriptor
498 * @param info pointer to **struct bpf_prog_info** that will be populated with
499 * BPF program information
500 * @param info_len pointer to the size of *info*; on success updated with the
501 * number of bytes written to *info*
502 * @return 0, on success; negative error code, otherwise (errno is also set to
503 * the error code)
504 */
505LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len);
506
507/**
508 * @brief **bpf_map_get_info_by_fd()** obtains information about the BPF
509 * map corresponding to *map_fd*.
510 *
511 * Populates up to *info_len* bytes of *info* and updates *info_len* with the
512 * actual number of bytes written to *info*.
513 *
514 * @param map_fd BPF map file descriptor
515 * @param info pointer to **struct bpf_map_info** that will be populated with
516 * BPF map information
517 * @param info_len pointer to the size of *info*; on success updated with the
518 * number of bytes written to *info*
519 * @return 0, on success; negative error code, otherwise (errno is also set to
520 * the error code)
521 */
522LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
523
524/**
525 * @brief **bpf_btf_get_info_by_fd()** obtains information about the
526 * BTF object corresponding to *btf_fd*.
527 *
528 * Populates up to *info_len* bytes of *info* and updates *info_len* with the
529 * actual number of bytes written to *info*.
530 *
531 * @param btf_fd BTF object file descriptor
532 * @param info pointer to **struct bpf_btf_info** that will be populated with
533 * BTF object information
534 * @param info_len pointer to the size of *info*; on success updated with the
535 * number of bytes written to *info*
536 * @return 0, on success; negative error code, otherwise (errno is also set to
537 * the error code)
538 */
539LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len);
540
541/**
542 * @brief **bpf_btf_get_info_by_fd()** obtains information about the BPF
543 * link corresponding to *link_fd*.
544 *
545 * Populates up to *info_len* bytes of *info* and updates *info_len* with the
546 * actual number of bytes written to *info*.
547 *
548 * @param link_fd BPF link file descriptor
549 * @param info pointer to **struct bpf_link_info** that will be populated with
550 * BPF link information
551 * @param info_len pointer to the size of *info*; on success updated with the
552 * number of bytes written to *info*
553 * @return 0, on success; negative error code, otherwise (errno is also set to
554 * the error code)
555 */
556LIBBPF_API int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len);
557
558struct bpf_prog_query_opts {
559 size_t sz; /* size of this struct for forward/backward compatibility */
560 __u32 query_flags;
561 __u32 attach_flags; /* output argument */
562 __u32 *prog_ids;
563 union {
564 /* input+output argument */
565 __u32 prog_cnt;
566 __u32 count;
567 };
568 __u32 *prog_attach_flags;
569 __u32 *link_ids;
570 __u32 *link_attach_flags;
571 __u64 revision;
572 size_t :0;
573};
574#define bpf_prog_query_opts__last_field revision
575
576/**
577 * @brief **bpf_prog_query_opts()** queries the BPF programs and BPF links
578 * which are attached to *target* which can represent a file descriptor or
579 * netdevice ifindex.
580 *
581 * @param target query location file descriptor or ifindex
582 * @param type attach type for the BPF program
583 * @param opts options for configuring the query
584 * @return 0, on success; negative error code, otherwise (errno is also set to
585 * the error code)
586 */
587LIBBPF_API int bpf_prog_query_opts(int target, enum bpf_attach_type type,
588 struct bpf_prog_query_opts *opts);
589LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
590 __u32 query_flags, __u32 *attach_flags,
591 __u32 *prog_ids, __u32 *prog_cnt);
592
593LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
594LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
595 __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
596 __u64 *probe_offset, __u64 *probe_addr);
597
598#ifdef __cplusplus
599/* forward-declaring enums in C++ isn't compatible with pure C enums, so
600 * instead define bpf_enable_stats() as accepting int as an input
601 */
602LIBBPF_API int bpf_enable_stats(int type);
603#else
604enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
605LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
606#endif
607
608struct bpf_prog_bind_opts {
609 size_t sz; /* size of this struct for forward/backward compatibility */
610 __u32 flags;
611};
612#define bpf_prog_bind_opts__last_field flags
613
614LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
615 const struct bpf_prog_bind_opts *opts);
616
617struct bpf_test_run_opts {
618 size_t sz; /* size of this struct for forward/backward compatibility */
619 const void *data_in; /* optional */
620 void *data_out; /* optional */
621 __u32 data_size_in;
622 __u32 data_size_out; /* in: max length of data_out
623 * out: length of data_out
624 */
625 const void *ctx_in; /* optional */
626 void *ctx_out; /* optional */
627 __u32 ctx_size_in;
628 __u32 ctx_size_out; /* in: max length of ctx_out
629 * out: length of cxt_out
630 */
631 __u32 retval; /* out: return code of the BPF program */
632 int repeat;
633 __u32 duration; /* out: average per repetition in ns */
634 __u32 flags;
635 __u32 cpu;
636 __u32 batch_size;
637};
638#define bpf_test_run_opts__last_field batch_size
639
640LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
641 struct bpf_test_run_opts *opts);
642
643#ifdef __cplusplus
644} /* extern "C" */
645#endif
646
647#endif /* __LIBBPF_BPF_H */