Loading...
Note: File does not exist in v5.4.
1#ifndef __BPF_EXPERIMENTAL__
2#define __BPF_EXPERIMENTAL__
3
4#include <vmlinux.h>
5#include <bpf/bpf_tracing.h>
6#include <bpf/bpf_helpers.h>
7#include <bpf/bpf_core_read.h>
8
9#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
10
11/* Description
12 * Allocates an object of the type represented by 'local_type_id' in
13 * program BTF. User may use the bpf_core_type_id_local macro to pass the
14 * type ID of a struct in program BTF.
15 *
16 * The 'local_type_id' parameter must be a known constant.
17 * The 'meta' parameter is rewritten by the verifier, no need for BPF
18 * program to set it.
19 * Returns
20 * A pointer to an object of the type corresponding to the passed in
21 * 'local_type_id', or NULL on failure.
22 */
23extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
24
25/* Convenience macro to wrap over bpf_obj_new_impl */
26#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
27
28/* Description
29 * Free an allocated object. All fields of the object that require
30 * destruction will be destructed before the storage is freed.
31 *
32 * The 'meta' parameter is rewritten by the verifier, no need for BPF
33 * program to set it.
34 * Returns
35 * Void.
36 */
37extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
38
39/* Convenience macro to wrap over bpf_obj_drop_impl */
40#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
41
42/* Description
43 * Increment the refcount on a refcounted local kptr, turning the
44 * non-owning reference input into an owning reference in the process.
45 *
46 * The 'meta' parameter is rewritten by the verifier, no need for BPF
47 * program to set it.
48 * Returns
49 * An owning reference to the object pointed to by 'kptr'
50 */
51extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
52
53/* Convenience macro to wrap over bpf_refcount_acquire_impl */
54#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
55
56/* Description
57 * Add a new entry to the beginning of the BPF linked list.
58 *
59 * The 'meta' and 'off' parameters are rewritten by the verifier, no need
60 * for BPF programs to set them
61 * Returns
62 * 0 if the node was successfully added
63 * -EINVAL if the node wasn't added because it's already in a list
64 */
65extern int bpf_list_push_front_impl(struct bpf_list_head *head,
66 struct bpf_list_node *node,
67 void *meta, __u64 off) __ksym;
68
69/* Convenience macro to wrap over bpf_list_push_front_impl */
70#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
71
72/* Description
73 * Add a new entry to the end of the BPF linked list.
74 *
75 * The 'meta' and 'off' parameters are rewritten by the verifier, no need
76 * for BPF programs to set them
77 * Returns
78 * 0 if the node was successfully added
79 * -EINVAL if the node wasn't added because it's already in a list
80 */
81extern int bpf_list_push_back_impl(struct bpf_list_head *head,
82 struct bpf_list_node *node,
83 void *meta, __u64 off) __ksym;
84
85/* Convenience macro to wrap over bpf_list_push_back_impl */
86#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
87
88/* Description
89 * Remove the entry at the beginning of the BPF linked list.
90 * Returns
91 * Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
92 */
93extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
94
95/* Description
96 * Remove the entry at the end of the BPF linked list.
97 * Returns
98 * Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
99 */
100extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
101
102/* Description
103 * Remove 'node' from rbtree with root 'root'
104 * Returns
105 * Pointer to the removed node, or NULL if 'root' didn't contain 'node'
106 */
107extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
108 struct bpf_rb_node *node) __ksym;
109
110/* Description
111 * Add 'node' to rbtree with root 'root' using comparator 'less'
112 *
113 * The 'meta' and 'off' parameters are rewritten by the verifier, no need
114 * for BPF programs to set them
115 * Returns
116 * 0 if the node was successfully added
117 * -EINVAL if the node wasn't added because it's already in a tree
118 */
119extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
120 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
121 void *meta, __u64 off) __ksym;
122
123/* Convenience macro to wrap over bpf_rbtree_add_impl */
124#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
125
126/* Description
127 * Return the first (leftmost) node in input tree
128 * Returns
129 * Pointer to the node, which is _not_ removed from the tree. If the tree
130 * contains no nodes, returns NULL.
131 */
132extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
133
134/* Description
135 * Allocates a percpu object of the type represented by 'local_type_id' in
136 * program BTF. User may use the bpf_core_type_id_local macro to pass the
137 * type ID of a struct in program BTF.
138 *
139 * The 'local_type_id' parameter must be a known constant.
140 * The 'meta' parameter is rewritten by the verifier, no need for BPF
141 * program to set it.
142 * Returns
143 * A pointer to a percpu object of the type corresponding to the passed in
144 * 'local_type_id', or NULL on failure.
145 */
146extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
147
148/* Convenience macro to wrap over bpf_percpu_obj_new_impl */
149#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))
150
151/* Description
152 * Free an allocated percpu object. All fields of the object that require
153 * destruction will be destructed before the storage is freed.
154 *
155 * The 'meta' parameter is rewritten by the verifier, no need for BPF
156 * program to set it.
157 * Returns
158 * Void.
159 */
160extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym;
161
162struct bpf_iter_task_vma;
163
164extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
165 struct task_struct *task,
166 unsigned long addr) __ksym;
167extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;
168extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
169
170/* Convenience macro to wrap over bpf_obj_drop_impl */
171#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)
172
173/* Description
174 * Throw a BPF exception from the program, immediately terminating its
175 * execution and unwinding the stack. The supplied 'cookie' parameter
176 * will be the return value of the program when an exception is thrown,
177 * and the default exception callback is used. Otherwise, if an exception
178 * callback is set using the '__exception_cb(callback)' declaration tag
179 * on the main program, the 'cookie' parameter will be the callback's only
180 * input argument.
181 *
182 * Thus, in case of default exception callback, 'cookie' is subjected to
183 * constraints on the program's return value (as with R0 on exit).
184 * Otherwise, the return value of the marked exception callback will be
185 * subjected to the same checks.
186 *
187 * Note that throwing an exception with lingering resources (locks,
188 * references, etc.) will lead to a verification error.
189 *
190 * Note that callbacks *cannot* call this helper.
191 * Returns
192 * Never.
193 * Throws
194 * An exception with the specified 'cookie' value.
195 */
196extern void bpf_throw(u64 cookie) __ksym;
197
198/* This macro must be used to mark the exception callback corresponding to the
199 * main program. For example:
200 *
201 * int exception_cb(u64 cookie) {
202 * return cookie;
203 * }
204 *
205 * SEC("tc")
206 * __exception_cb(exception_cb)
207 * int main_prog(struct __sk_buff *ctx) {
208 * ...
209 * return TC_ACT_OK;
210 * }
211 *
212 * Here, exception callback for the main program will be 'exception_cb'. Note
213 * that this attribute can only be used once, and multiple exception callbacks
214 * specified for the main program will lead to verification error.
215 */
216#define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name)))
217
218#define __bpf_assert_signed(x) _Generic((x), \
219 unsigned long: 0, \
220 unsigned long long: 0, \
221 signed long: 1, \
222 signed long long: 1 \
223)
224
225#define __bpf_assert_check(LHS, op, RHS) \
226 _Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
227 _Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n"); \
228 _Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert"); \
229 _Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression")
230
231#define __bpf_assert(LHS, op, cons, RHS, VAL) \
232 ({ \
233 (void)bpf_throw; \
234 asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw" \
235 : : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : ); \
236 })
237
238#define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign) \
239 ({ \
240 __bpf_assert_check(LHS, op, RHS); \
241 if (__bpf_assert_signed(LHS) && !(supp_sign)) \
242 __bpf_assert(LHS, "s" #op, cons, RHS, VAL); \
243 else \
244 __bpf_assert(LHS, #op, cons, RHS, VAL); \
245 })
246
247#define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign) \
248 ({ \
249 if (sizeof(typeof(RHS)) == 8) { \
250 const typeof(RHS) rhs_var = (RHS); \
251 __bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign); \
252 } else { \
253 __bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign); \
254 } \
255 })
256
257#define __cmp_cannot_be_signed(x) \
258 __builtin_strcmp(#x, "==") == 0 || __builtin_strcmp(#x, "!=") == 0 || \
259 __builtin_strcmp(#x, "&") == 0
260
261#define __is_signed_type(type) (((type)(-1)) < (type)1)
262
263#define __bpf_cmp(LHS, OP, SIGN, PRED, RHS, DEFAULT) \
264 ({ \
265 __label__ l_true; \
266 bool ret = DEFAULT; \
267 asm volatile goto("if %[lhs] " SIGN #OP " %[rhs] goto %l[l_true]" \
268 :: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true); \
269 ret = !DEFAULT; \
270l_true: \
271 ret; \
272 })
273
274/* C type conversions coupled with comparison operator are tricky.
275 * Make sure BPF program is compiled with -Wsign-compare then
276 * __lhs OP __rhs below will catch the mistake.
277 * Be aware that we check only __lhs to figure out the sign of compare.
278 */
279#define _bpf_cmp(LHS, OP, RHS, NOFLIP) \
280 ({ \
281 typeof(LHS) __lhs = (LHS); \
282 typeof(RHS) __rhs = (RHS); \
283 bool ret; \
284 _Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
285 (void)(__lhs OP __rhs); \
286 if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) { \
287 if (sizeof(__rhs) == 8) \
288 ret = __bpf_cmp(__lhs, OP, "", "r", __rhs, NOFLIP); \
289 else \
290 ret = __bpf_cmp(__lhs, OP, "", "i", __rhs, NOFLIP); \
291 } else { \
292 if (sizeof(__rhs) == 8) \
293 ret = __bpf_cmp(__lhs, OP, "s", "r", __rhs, NOFLIP); \
294 else \
295 ret = __bpf_cmp(__lhs, OP, "s", "i", __rhs, NOFLIP); \
296 } \
297 ret; \
298 })
299
300#ifndef bpf_cmp_unlikely
301#define bpf_cmp_unlikely(LHS, OP, RHS) _bpf_cmp(LHS, OP, RHS, true)
302#endif
303
304#ifndef bpf_cmp_likely
305#define bpf_cmp_likely(LHS, OP, RHS) \
306 ({ \
307 bool ret; \
308 if (__builtin_strcmp(#OP, "==") == 0) \
309 ret = _bpf_cmp(LHS, !=, RHS, false); \
310 else if (__builtin_strcmp(#OP, "!=") == 0) \
311 ret = _bpf_cmp(LHS, ==, RHS, false); \
312 else if (__builtin_strcmp(#OP, "<=") == 0) \
313 ret = _bpf_cmp(LHS, >, RHS, false); \
314 else if (__builtin_strcmp(#OP, "<") == 0) \
315 ret = _bpf_cmp(LHS, >=, RHS, false); \
316 else if (__builtin_strcmp(#OP, ">") == 0) \
317 ret = _bpf_cmp(LHS, <=, RHS, false); \
318 else if (__builtin_strcmp(#OP, ">=") == 0) \
319 ret = _bpf_cmp(LHS, <, RHS, false); \
320 else \
321 (void) "bug"; \
322 ret; \
323 })
324#endif
325
326#ifndef bpf_nop_mov
327#define bpf_nop_mov(var) \
328 asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var))
329#endif
330
331/* Description
332 * Assert that a conditional expression is true.
333 * Returns
334 * Void.
335 * Throws
336 * An exception with the value zero when the assertion fails.
337 */
338#define bpf_assert(cond) if (!(cond)) bpf_throw(0);
339
340/* Description
341 * Assert that a conditional expression is true.
342 * Returns
343 * Void.
344 * Throws
345 * An exception with the specified value when the assertion fails.
346 */
347#define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
348
349/* Description
350 * Assert that LHS is in the range [BEG, END] (inclusive of both). This
351 * statement updates the known bounds of LHS during verification. Note
352 * that both BEG and END must be constant values, and must fit within the
353 * data type of LHS.
354 * Returns
355 * Void.
356 * Throws
357 * An exception with the value zero when the assertion fails.
358 */
359#define bpf_assert_range(LHS, BEG, END) \
360 ({ \
361 _Static_assert(BEG <= END, "BEG must be <= END"); \
362 barrier_var(LHS); \
363 __bpf_assert_op(LHS, >=, BEG, 0, false); \
364 __bpf_assert_op(LHS, <=, END, 0, false); \
365 })
366
367/* Description
368 * Assert that LHS is in the range [BEG, END] (inclusive of both). This
369 * statement updates the known bounds of LHS during verification. Note
370 * that both BEG and END must be constant values, and must fit within the
371 * data type of LHS.
372 * Returns
373 * Void.
374 * Throws
375 * An exception with the specified value when the assertion fails.
376 */
377#define bpf_assert_range_with(LHS, BEG, END, value) \
378 ({ \
379 _Static_assert(BEG <= END, "BEG must be <= END"); \
380 barrier_var(LHS); \
381 __bpf_assert_op(LHS, >=, BEG, value, false); \
382 __bpf_assert_op(LHS, <=, END, value, false); \
383 })
384
385struct bpf_iter_css_task;
386struct cgroup_subsys_state;
387extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
388 struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym;
389extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym;
390extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym;
391
392struct bpf_iter_task;
393extern int bpf_iter_task_new(struct bpf_iter_task *it,
394 struct task_struct *task, unsigned int flags) __weak __ksym;
395extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym;
396extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym;
397
398struct bpf_iter_css;
399extern int bpf_iter_css_new(struct bpf_iter_css *it,
400 struct cgroup_subsys_state *start, unsigned int flags) __weak __ksym;
401extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
402extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
403
404#endif