Loading...
Note: File does not exist in v3.5.6.
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
3/*
4 * common eBPF ELF operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation;
13 * version 2.1 of the License (not later!)
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this program; if not, see <http://www.gnu.org/licenses>
22 */
23#ifndef __LIBBPF_BPF_H
24#define __LIBBPF_BPF_H
25
26#include <linux/bpf.h>
27#include <stdbool.h>
28#include <stddef.h>
29#include <stdint.h>
30
31#include "libbpf_common.h"
32#include "libbpf_legacy.h"
33
34#ifdef __cplusplus
35extern "C" {
36#endif
37
38int libbpf_set_memlock_rlim(size_t memlock_bytes);
39
40struct bpf_map_create_opts {
41 size_t sz; /* size of this struct for forward/backward compatibility */
42
43 __u32 btf_fd;
44 __u32 btf_key_type_id;
45 __u32 btf_value_type_id;
46 __u32 btf_vmlinux_value_type_id;
47
48 __u32 inner_map_fd;
49 __u32 map_flags;
50 __u64 map_extra;
51
52 __u32 numa_node;
53 __u32 map_ifindex;
54};
55#define bpf_map_create_opts__last_field map_ifindex
56
57LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
58 const char *map_name,
59 __u32 key_size,
60 __u32 value_size,
61 __u32 max_entries,
62 const struct bpf_map_create_opts *opts);
63
64struct bpf_prog_load_opts {
65 size_t sz; /* size of this struct for forward/backward compatibility */
66
67 /* libbpf can retry BPF_PROG_LOAD command if bpf() syscall returns
68 * -EAGAIN. This field determines how many attempts libbpf has to
69 * make. If not specified, libbpf will use default value of 5.
70 */
71 int attempts;
72
73 enum bpf_attach_type expected_attach_type;
74 __u32 prog_btf_fd;
75 __u32 prog_flags;
76 __u32 prog_ifindex;
77 __u32 kern_version;
78
79 __u32 attach_btf_id;
80 __u32 attach_prog_fd;
81 __u32 attach_btf_obj_fd;
82
83 const int *fd_array;
84
85 /* .BTF.ext func info data */
86 const void *func_info;
87 __u32 func_info_cnt;
88 __u32 func_info_rec_size;
89
90 /* .BTF.ext line info data */
91 const void *line_info;
92 __u32 line_info_cnt;
93 __u32 line_info_rec_size;
94
95 /* verifier log options */
96 __u32 log_level;
97 __u32 log_size;
98 char *log_buf;
99};
100#define bpf_prog_load_opts__last_field log_buf
101
102LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
103 const char *prog_name, const char *license,
104 const struct bpf_insn *insns, size_t insn_cnt,
105 const struct bpf_prog_load_opts *opts);
106
107/* Flags to direct loading requirements */
108#define MAPS_RELAX_COMPAT 0x01
109
110/* Recommended log buffer size */
111#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
112
113struct bpf_btf_load_opts {
114 size_t sz; /* size of this struct for forward/backward compatibility */
115
116 /* kernel log options */
117 char *log_buf;
118 __u32 log_level;
119 __u32 log_size;
120};
121#define bpf_btf_load_opts__last_field log_size
122
123LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
124 const struct bpf_btf_load_opts *opts);
125
126LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
127 __u64 flags);
128
129LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value);
130LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value,
131 __u64 flags);
132LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
133 void *value);
134LIBBPF_API int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key,
135 void *value, __u64 flags);
136LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
137LIBBPF_API int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags);
138LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
139LIBBPF_API int bpf_map_freeze(int fd);
140
141struct bpf_map_batch_opts {
142 size_t sz; /* size of this struct for forward/backward compatibility */
143 __u64 elem_flags;
144 __u64 flags;
145};
146#define bpf_map_batch_opts__last_field flags
147
148
149/**
150 * @brief **bpf_map_delete_batch()** allows for batch deletion of multiple
151 * elements in a BPF map.
152 *
153 * @param fd BPF map file descriptor
154 * @param keys pointer to an array of *count* keys
155 * @param count input and output parameter; on input **count** represents the
156 * number of elements in the map to delete in batch;
157 * on output if a non-EFAULT error is returned, **count** represents the number of deleted
158 * elements if the output **count** value is not equal to the input **count** value
159 * If EFAULT is returned, **count** should not be trusted to be correct.
160 * @param opts options for configuring the way the batch deletion works
161 * @return 0, on success; negative error code, otherwise (errno is also set to
162 * the error code)
163 */
164LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys,
165 __u32 *count,
166 const struct bpf_map_batch_opts *opts);
167
168/**
169 * @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements.
170 *
171 * The parameter *in_batch* is the address of the first element in the batch to read.
172 * *out_batch* is an output parameter that should be passed as *in_batch* to subsequent
173 * calls to **bpf_map_lookup_batch()**. NULL can be passed for *in_batch* to indicate
174 * that the batched lookup starts from the beginning of the map.
175 *
176 * The *keys* and *values* are output parameters which must point to memory large enough to
177 * hold *count* items based on the key and value size of the map *map_fd*. The *keys*
178 * buffer must be of *key_size* * *count*. The *values* buffer must be of
179 * *value_size* * *count*.
180 *
181 * @param fd BPF map file descriptor
182 * @param in_batch address of the first element in batch to read, can pass NULL to
183 * indicate that the batched lookup starts from the beginning of the map.
184 * @param out_batch output parameter that should be passed to next call as *in_batch*
185 * @param keys pointer to an array large enough for *count* keys
186 * @param values pointer to an array large enough for *count* values
187 * @param count input and output parameter; on input it's the number of elements
188 * in the map to read in batch; on output it's the number of elements that were
189 * successfully read.
190 * If a non-EFAULT error is returned, count will be set as the number of elements
191 * that were read before the error occurred.
192 * If EFAULT is returned, **count** should not be trusted to be correct.
193 * @param opts options for configuring the way the batch lookup works
194 * @return 0, on success; negative error code, otherwise (errno is also set to
195 * the error code)
196 */
197LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch,
198 void *keys, void *values, __u32 *count,
199 const struct bpf_map_batch_opts *opts);
200
201/**
202 * @brief **bpf_map_lookup_and_delete_batch()** allows for batch lookup and deletion
203 * of BPF map elements where each element is deleted after being retrieved.
204 *
205 * @param fd BPF map file descriptor
206 * @param in_batch address of the first element in batch to read, can pass NULL to
207 * get address of the first element in *out_batch*
208 * @param out_batch output parameter that should be passed to next call as *in_batch*
209 * @param keys pointer to an array of *count* keys
210 * @param values pointer to an array large enough for *count* values
211 * @param count input and output parameter; on input it's the number of elements
212 * in the map to read and delete in batch; on output it represents the number of
213 * elements that were successfully read and deleted
214 * If a non-**EFAULT** error code is returned and if the output **count** value
215 * is not equal to the input **count** value, up to **count** elements may
216 * have been deleted.
217 * if **EFAULT** is returned up to *count* elements may have been deleted without
218 * being returned via the *keys* and *values* output parameters.
219 * @param opts options for configuring the way the batch lookup and delete works
220 * @return 0, on success; negative error code, otherwise (errno is also set to
221 * the error code)
222 */
223LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch,
224 void *out_batch, void *keys,
225 void *values, __u32 *count,
226 const struct bpf_map_batch_opts *opts);
227
228/**
229 * @brief **bpf_map_update_batch()** updates multiple elements in a map
230 * by specifying keys and their corresponding values.
231 *
232 * The *keys* and *values* parameters must point to memory large enough
233 * to hold *count* items based on the key and value size of the map.
234 *
235 * The *opts* parameter can be used to control how *bpf_map_update_batch()*
236 * should handle keys that either do or do not already exist in the map.
237 * In particular the *flags* parameter of *bpf_map_batch_opts* can be
238 * one of the following:
239 *
240 * Note that *count* is an input and output parameter, where on output it
241 * represents how many elements were successfully updated. Also note that if
242 * **EFAULT** then *count* should not be trusted to be correct.
243 *
244 * **BPF_ANY**
245 * Create new elements or update existing.
246 *
247 * **BPF_NOEXIST**
248 * Create new elements only if they do not exist.
249 *
250 * **BPF_EXIST**
251 * Update existing elements.
252 *
253 * **BPF_F_LOCK**
254 * Update spin_lock-ed map elements. This must be
255 * specified if the map value contains a spinlock.
256 *
257 * @param fd BPF map file descriptor
258 * @param keys pointer to an array of *count* keys
259 * @param values pointer to an array of *count* values
260 * @param count input and output parameter; on input it's the number of elements
261 * in the map to update in batch; on output if a non-EFAULT error is returned,
262 * **count** represents the number of updated elements if the output **count**
263 * value is not equal to the input **count** value.
264 * If EFAULT is returned, **count** should not be trusted to be correct.
265 * @param opts options for configuring the way the batch update works
266 * @return 0, on success; negative error code, otherwise (errno is also set to
267 * the error code)
268 */
269LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values,
270 __u32 *count,
271 const struct bpf_map_batch_opts *opts);
272
273struct bpf_obj_get_opts {
274 size_t sz; /* size of this struct for forward/backward compatibility */
275
276 __u32 file_flags;
277
278 size_t :0;
279};
280#define bpf_obj_get_opts__last_field file_flags
281
282LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
283LIBBPF_API int bpf_obj_get(const char *pathname);
284LIBBPF_API int bpf_obj_get_opts(const char *pathname,
285 const struct bpf_obj_get_opts *opts);
286
287struct bpf_prog_attach_opts {
288 size_t sz; /* size of this struct for forward/backward compatibility */
289 unsigned int flags;
290 int replace_prog_fd;
291};
292#define bpf_prog_attach_opts__last_field replace_prog_fd
293
294LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
295 enum bpf_attach_type type, unsigned int flags);
296LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int attachable_fd,
297 enum bpf_attach_type type,
298 const struct bpf_prog_attach_opts *opts);
299LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
300LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
301 enum bpf_attach_type type);
302
303union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */
304struct bpf_link_create_opts {
305 size_t sz; /* size of this struct for forward/backward compatibility */
306 __u32 flags;
307 union bpf_iter_link_info *iter_info;
308 __u32 iter_info_len;
309 __u32 target_btf_id;
310 union {
311 struct {
312 __u64 bpf_cookie;
313 } perf_event;
314 struct {
315 __u32 flags;
316 __u32 cnt;
317 const char **syms;
318 const unsigned long *addrs;
319 const __u64 *cookies;
320 } kprobe_multi;
321 struct {
322 __u64 cookie;
323 } tracing;
324 };
325 size_t :0;
326};
327#define bpf_link_create_opts__last_field kprobe_multi.cookies
328
329LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
330 enum bpf_attach_type attach_type,
331 const struct bpf_link_create_opts *opts);
332
333LIBBPF_API int bpf_link_detach(int link_fd);
334
335struct bpf_link_update_opts {
336 size_t sz; /* size of this struct for forward/backward compatibility */
337 __u32 flags; /* extra flags */
338 __u32 old_prog_fd; /* expected old program FD */
339};
340#define bpf_link_update_opts__last_field old_prog_fd
341
342LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd,
343 const struct bpf_link_update_opts *opts);
344
345LIBBPF_API int bpf_iter_create(int link_fd);
346
347struct bpf_prog_test_run_attr {
348 int prog_fd;
349 int repeat;
350 const void *data_in;
351 __u32 data_size_in;
352 void *data_out; /* optional */
353 __u32 data_size_out; /* in: max length of data_out
354 * out: length of data_out */
355 __u32 retval; /* out: return code of the BPF program */
356 __u32 duration; /* out: average per repetition in ns */
357 const void *ctx_in; /* optional */
358 __u32 ctx_size_in;
359 void *ctx_out; /* optional */
360 __u32 ctx_size_out; /* in: max length of ctx_out
361 * out: length of cxt_out */
362};
363
364LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
365LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
366LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id);
367LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id);
368
369struct bpf_get_fd_by_id_opts {
370 size_t sz; /* size of this struct for forward/backward compatibility */
371 __u32 open_flags; /* permissions requested for the operation on fd */
372 size_t :0;
373};
374#define bpf_get_fd_by_id_opts__last_field open_flags
375
376LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
377LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id,
378 const struct bpf_get_fd_by_id_opts *opts);
379LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
380LIBBPF_API int bpf_map_get_fd_by_id_opts(__u32 id,
381 const struct bpf_get_fd_by_id_opts *opts);
382LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
383LIBBPF_API int bpf_btf_get_fd_by_id_opts(__u32 id,
384 const struct bpf_get_fd_by_id_opts *opts);
385LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
386LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id,
387 const struct bpf_get_fd_by_id_opts *opts);
388LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
389
390struct bpf_prog_query_opts {
391 size_t sz; /* size of this struct for forward/backward compatibility */
392 __u32 query_flags;
393 __u32 attach_flags; /* output argument */
394 __u32 *prog_ids;
395 __u32 prog_cnt; /* input+output argument */
396 __u32 *prog_attach_flags;
397};
398#define bpf_prog_query_opts__last_field prog_attach_flags
399
400LIBBPF_API int bpf_prog_query_opts(int target_fd,
401 enum bpf_attach_type type,
402 struct bpf_prog_query_opts *opts);
403LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
404 __u32 query_flags, __u32 *attach_flags,
405 __u32 *prog_ids, __u32 *prog_cnt);
406
407LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
408LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
409 __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
410 __u64 *probe_offset, __u64 *probe_addr);
411
412#ifdef __cplusplus
413/* forward-declaring enums in C++ isn't compatible with pure C enums, so
414 * instead define bpf_enable_stats() as accepting int as an input
415 */
416LIBBPF_API int bpf_enable_stats(int type);
417#else
418enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
419LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
420#endif
421
422struct bpf_prog_bind_opts {
423 size_t sz; /* size of this struct for forward/backward compatibility */
424 __u32 flags;
425};
426#define bpf_prog_bind_opts__last_field flags
427
428LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
429 const struct bpf_prog_bind_opts *opts);
430
431struct bpf_test_run_opts {
432 size_t sz; /* size of this struct for forward/backward compatibility */
433 const void *data_in; /* optional */
434 void *data_out; /* optional */
435 __u32 data_size_in;
436 __u32 data_size_out; /* in: max length of data_out
437 * out: length of data_out
438 */
439 const void *ctx_in; /* optional */
440 void *ctx_out; /* optional */
441 __u32 ctx_size_in;
442 __u32 ctx_size_out; /* in: max length of ctx_out
443 * out: length of cxt_out
444 */
445 __u32 retval; /* out: return code of the BPF program */
446 int repeat;
447 __u32 duration; /* out: average per repetition in ns */
448 __u32 flags;
449 __u32 cpu;
450 __u32 batch_size;
451};
452#define bpf_test_run_opts__last_field batch_size
453
454LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
455 struct bpf_test_run_opts *opts);
456
457#ifdef __cplusplus
458} /* extern "C" */
459#endif
460
461#endif /* __LIBBPF_BPF_H */