Linux Audio

Check our new training course

Loading...
v6.13.7
  1#ifndef __BPF_EXPERIMENTAL__
  2#define __BPF_EXPERIMENTAL__
  3
  4#include <vmlinux.h>
  5#include <bpf/bpf_tracing.h>
  6#include <bpf/bpf_helpers.h>
  7#include <bpf/bpf_core_read.h>
  8
  9#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
 10
 11/* Description
 12 *	Allocates an object of the type represented by 'local_type_id' in
 13 *	program BTF. User may use the bpf_core_type_id_local macro to pass the
 14 *	type ID of a struct in program BTF.
 15 *
 16 *	The 'local_type_id' parameter must be a known constant.
 17 *	The 'meta' parameter is rewritten by the verifier, no need for BPF
 18 *	program to set it.
 19 * Returns
 20 *	A pointer to an object of the type corresponding to the passed in
 21 *	'local_type_id', or NULL on failure.
 22 */
 23extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
 24
 25/* Convenience macro to wrap over bpf_obj_new_impl */
 26#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
 27
 28/* Description
 29 *	Free an allocated object. All fields of the object that require
 30 *	destruction will be destructed before the storage is freed.
 31 *
 32 *	The 'meta' parameter is rewritten by the verifier, no need for BPF
 33 *	program to set it.
 34 * Returns
 35 *	Void.
 36 */
 37extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
 38
 39/* Convenience macro to wrap over bpf_obj_drop_impl */
 40#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
 41
 42/* Description
 43 *	Increment the refcount on a refcounted local kptr, turning the
 44 *	non-owning reference input into an owning reference in the process.
 45 *
 46 *	The 'meta' parameter is rewritten by the verifier, no need for BPF
 47 *	program to set it.
 48 * Returns
 49 *	An owning reference to the object pointed to by 'kptr'
 50 */
 51extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
 52
 53/* Convenience macro to wrap over bpf_refcount_acquire_impl */
 54#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
 55
 56/* Description
 57 *	Add a new entry to the beginning of the BPF linked list.
 58 *
 59 *	The 'meta' and 'off' parameters are rewritten by the verifier, no need
 60 *	for BPF programs to set them
 61 * Returns
 62 *	0 if the node was successfully added
 63 *	-EINVAL if the node wasn't added because it's already in a list
 64 */
 65extern int bpf_list_push_front_impl(struct bpf_list_head *head,
 66				    struct bpf_list_node *node,
 67				    void *meta, __u64 off) __ksym;
 68
 69/* Convenience macro to wrap over bpf_list_push_front_impl */
 70#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
 71
 72/* Description
 73 *	Add a new entry to the end of the BPF linked list.
 74 *
 75 *	The 'meta' and 'off' parameters are rewritten by the verifier, no need
 76 *	for BPF programs to set them
 77 * Returns
 78 *	0 if the node was successfully added
 79 *	-EINVAL if the node wasn't added because it's already in a list
 80 */
 81extern int bpf_list_push_back_impl(struct bpf_list_head *head,
 82				   struct bpf_list_node *node,
 83				   void *meta, __u64 off) __ksym;
 84
 85/* Convenience macro to wrap over bpf_list_push_back_impl */
 86#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
 87
 88/* Description
 89 *	Remove the entry at the beginning of the BPF linked list.
 90 * Returns
 91 *	Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
 92 */
 93extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
 94
 95/* Description
 96 *	Remove the entry at the end of the BPF linked list.
 97 * Returns
 98 *	Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
 99 */
100extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
101
102/* Description
103 *	Remove 'node' from rbtree with root 'root'
104 * Returns
105 * 	Pointer to the removed node, or NULL if 'root' didn't contain 'node'
106 */
107extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
108					     struct bpf_rb_node *node) __ksym;
109
110/* Description
111 *	Add 'node' to rbtree with root 'root' using comparator 'less'
112 *
113 *	The 'meta' and 'off' parameters are rewritten by the verifier, no need
114 *	for BPF programs to set them
115 * Returns
116 *	0 if the node was successfully added
117 *	-EINVAL if the node wasn't added because it's already in a tree
118 */
119extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
120			       bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
121			       void *meta, __u64 off) __ksym;
122
123/* Convenience macro to wrap over bpf_rbtree_add_impl */
124#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
125
126/* Description
127 *	Return the first (leftmost) node in input tree
128 * Returns
129 *	Pointer to the node, which is _not_ removed from the tree. If the tree
130 *	contains no nodes, returns NULL.
131 */
132extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
133
134/* Description
135 *	Allocates a percpu object of the type represented by 'local_type_id' in
136 *	program BTF. User may use the bpf_core_type_id_local macro to pass the
137 *	type ID of a struct in program BTF.
138 *
139 *	The 'local_type_id' parameter must be a known constant.
140 *	The 'meta' parameter is rewritten by the verifier, no need for BPF
141 *	program to set it.
142 * Returns
143 *	A pointer to a percpu object of the type corresponding to the passed in
144 *	'local_type_id', or NULL on failure.
145 */
146extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
147
148/* Convenience macro to wrap over bpf_percpu_obj_new_impl */
149#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))
150
151/* Description
152 *	Free an allocated percpu object. All fields of the object that require
153 *	destruction will be destructed before the storage is freed.
154 *
155 *	The 'meta' parameter is rewritten by the verifier, no need for BPF
156 *	program to set it.
157 * Returns
158 *	Void.
159 */
160extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym;
161
162struct bpf_iter_task_vma;
163
164extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
165				 struct task_struct *task,
166				 __u64 addr) __ksym;
167extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;
168extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
169
170/* Convenience macro to wrap over bpf_obj_drop_impl */
171#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)
172
173/* Description
174 *	Throw a BPF exception from the program, immediately terminating its
175 *	execution and unwinding the stack. The supplied 'cookie' parameter
176 *	will be the return value of the program when an exception is thrown,
177 *	and the default exception callback is used. Otherwise, if an exception
178 *	callback is set using the '__exception_cb(callback)' declaration tag
179 *	on the main program, the 'cookie' parameter will be the callback's only
180 *	input argument.
181 *
182 *	Thus, in case of default exception callback, 'cookie' is subjected to
183 *	constraints on the program's return value (as with R0 on exit).
184 *	Otherwise, the return value of the marked exception callback will be
185 *	subjected to the same checks.
186 *
187 *	Note that throwing an exception with lingering resources (locks,
188 *	references, etc.) will lead to a verification error.
189 *
190 *	Note that callbacks *cannot* call this helper.
191 * Returns
192 *	Never.
193 * Throws
194 *	An exception with the specified 'cookie' value.
195 */
196extern void bpf_throw(u64 cookie) __ksym;
197
198/* Description
199 *	Acquire a reference on the exe_file member field belonging to the
200 *	mm_struct that is nested within the supplied task_struct. The supplied
201 *	task_struct must be trusted/referenced.
202 * Returns
203 *	A referenced file pointer pointing to the exe_file member field of the
204 *	mm_struct nested in the supplied task_struct, or NULL.
205 */
206extern struct file *bpf_get_task_exe_file(struct task_struct *task) __ksym;
207
208/* Description
209 *	Release a reference on the supplied file. The supplied file must be
210 *	acquired.
211 */
212extern void bpf_put_file(struct file *file) __ksym;
213
214/* Description
215 *	Resolve a pathname for the supplied path and store it in the supplied
216 *	buffer. The supplied path must be trusted/referenced.
217 * Returns
218 *	A positive integer corresponding to the length of the resolved pathname,
219 *	including the NULL termination character, stored in the supplied
220 *	buffer. On error, a negative integer is returned.
221 */
222extern int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz) __ksym;
223
224/* This macro must be used to mark the exception callback corresponding to the
225 * main program. For example:
226 *
227 * int exception_cb(u64 cookie) {
228 *	return cookie;
229 * }
230 *
231 * SEC("tc")
232 * __exception_cb(exception_cb)
233 * int main_prog(struct __sk_buff *ctx) {
234 *	...
235 *	return TC_ACT_OK;
236 * }
237 *
238 * Here, exception callback for the main program will be 'exception_cb'. Note
239 * that this attribute can only be used once, and multiple exception callbacks
240 * specified for the main program will lead to verification error.
241 */
242#define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name)))
243
244#define __bpf_assert_signed(x) _Generic((x), \
245    unsigned long: 0,       \
246    unsigned long long: 0,  \
247    signed long: 1,         \
248    signed long long: 1     \
249)
250
251#define __bpf_assert_check(LHS, op, RHS)								 \
252	_Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression");			 \
253	_Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n");			 \
254	_Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert");	 \
255	_Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression")
256
257#define __bpf_assert(LHS, op, cons, RHS, VAL)							\
258	({											\
259		(void)bpf_throw;								\
260		asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw"	\
261			       : : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : );	\
262	})
263
264#define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign)			\
265	({										\
266		__bpf_assert_check(LHS, op, RHS);					\
267		if (__bpf_assert_signed(LHS) && !(supp_sign))				\
268			__bpf_assert(LHS, "s" #op, cons, RHS, VAL);			\
269		else									\
270			__bpf_assert(LHS, #op, cons, RHS, VAL);				\
271	 })
272
273#define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign)					\
274	({										\
275		if (sizeof(typeof(RHS)) == 8) {						\
276			const typeof(RHS) rhs_var = (RHS);				\
277			__bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign);	\
278		} else {								\
279			__bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign);	\
280		}									\
281	 })
282
283#define __cmp_cannot_be_signed(x) \
284	__builtin_strcmp(#x, "==") == 0 || __builtin_strcmp(#x, "!=") == 0 || \
285	__builtin_strcmp(#x, "&") == 0
286
287#define __is_signed_type(type) (((type)(-1)) < (type)1)
288
289#define __bpf_cmp(LHS, OP, PRED, RHS, DEFAULT)						\
290	({											\
291		__label__ l_true;								\
292		bool ret = DEFAULT;								\
293		asm volatile goto("if %[lhs] " OP " %[rhs] goto %l[l_true]"		\
294				  :: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true);	\
295		ret = !DEFAULT;									\
296l_true:												\
297		ret;										\
298       })
299
300/* C type conversions coupled with comparison operator are tricky.
301 * Make sure BPF program is compiled with -Wsign-compare then
302 * __lhs OP __rhs below will catch the mistake.
303 * Be aware that we check only __lhs to figure out the sign of compare.
304 */
305#define _bpf_cmp(LHS, OP, RHS, UNLIKELY)								\
306	({											\
307		typeof(LHS) __lhs = (LHS);							\
308		typeof(RHS) __rhs = (RHS);							\
309		bool ret;									\
310		_Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression");	\
311		(void)(__lhs OP __rhs);								\
312		if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) {		\
313			if (sizeof(__rhs) == 8)							\
314				/* "i" will truncate 64-bit constant into s32,			\
315				 * so we have to use extra register via "r".			\
316				 */								\
317				ret = __bpf_cmp(__lhs, #OP, "r", __rhs, UNLIKELY);		\
318			else									\
319				ret = __bpf_cmp(__lhs, #OP, "ri", __rhs, UNLIKELY);		\
320		} else {									\
321			if (sizeof(__rhs) == 8)							\
322				ret = __bpf_cmp(__lhs, "s"#OP, "r", __rhs, UNLIKELY);		\
323			else									\
324				ret = __bpf_cmp(__lhs, "s"#OP, "ri", __rhs, UNLIKELY);		\
325		}										\
326		ret;										\
327       })
328
329#ifndef bpf_cmp_unlikely
330#define bpf_cmp_unlikely(LHS, OP, RHS) _bpf_cmp(LHS, OP, RHS, true)
331#endif
332
333#ifndef bpf_cmp_likely
334#define bpf_cmp_likely(LHS, OP, RHS)								\
335	({											\
336		bool ret = 0;									\
337		if (__builtin_strcmp(#OP, "==") == 0)						\
338			ret = _bpf_cmp(LHS, !=, RHS, false);					\
339		else if (__builtin_strcmp(#OP, "!=") == 0)					\
340			ret = _bpf_cmp(LHS, ==, RHS, false);					\
341		else if (__builtin_strcmp(#OP, "<=") == 0)					\
342			ret = _bpf_cmp(LHS, >, RHS, false);					\
343		else if (__builtin_strcmp(#OP, "<") == 0)					\
344			ret = _bpf_cmp(LHS, >=, RHS, false);					\
345		else if (__builtin_strcmp(#OP, ">") == 0)					\
346			ret = _bpf_cmp(LHS, <=, RHS, false);					\
347		else if (__builtin_strcmp(#OP, ">=") == 0)					\
348			ret = _bpf_cmp(LHS, <, RHS, false);					\
349		else										\
350			asm volatile("r0 " #OP " invalid compare");				\
351		ret;										\
352       })
353#endif
354
355/*
356 * Note that cond_break can only be portably used in the body of a breakable
357 * construct, whereas can_loop can be used anywhere.
358 */
359#ifdef __BPF_FEATURE_MAY_GOTO
360#define can_loop					\
361	({ __label__ l_break, l_continue;		\
362	bool ret = true;				\
363	asm volatile goto("may_goto %l[l_break]"	\
364		      :::: l_break);			\
365	goto l_continue;				\
366	l_break: ret = false;				\
367	l_continue:;					\
368	ret;						\
369	})
370
371#define cond_break					\
372	({ __label__ l_break, l_continue;		\
373	asm volatile goto("may_goto %l[l_break]"	\
374		      :::: l_break);			\
375	goto l_continue;				\
376	l_break: break;					\
377	l_continue:;					\
378	})
379#else
380#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
381#define can_loop					\
382	({ __label__ l_break, l_continue;		\
383	bool ret = true;				\
384	asm volatile goto("1:.byte 0xe5;		\
385		      .byte 0;				\
386		      .long ((%l[l_break] - 1b - 8) / 8) & 0xffff;	\
387		      .short 0"				\
388		      :::: l_break);			\
389	goto l_continue;				\
390	l_break: ret = false;				\
391	l_continue:;					\
392	ret;						\
393	})
394
395#define cond_break					\
396	({ __label__ l_break, l_continue;		\
397	asm volatile goto("1:.byte 0xe5;		\
398		      .byte 0;				\
399		      .long ((%l[l_break] - 1b - 8) / 8) & 0xffff;	\
400		      .short 0"				\
401		      :::: l_break);			\
402	goto l_continue;				\
403	l_break: break;					\
404	l_continue:;					\
405	})
406#else
407#define can_loop					\
408	({ __label__ l_break, l_continue;		\
409	bool ret = true;				\
410	asm volatile goto("1:.byte 0xe5;		\
411		      .byte 0;				\
412		      .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16;	\
413		      .short 0"				\
414		      :::: l_break);			\
415	goto l_continue;				\
416	l_break: ret = false;				\
417	l_continue:;					\
418	ret;						\
419	})
420
421#define cond_break					\
422	({ __label__ l_break, l_continue;		\
423	asm volatile goto("1:.byte 0xe5;		\
424		      .byte 0;				\
425		      .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16;	\
426		      .short 0"				\
427		      :::: l_break);			\
428	goto l_continue;				\
429	l_break: break;					\
430	l_continue:;					\
431	})
432#endif
433#endif
434
435#ifndef bpf_nop_mov
436#define bpf_nop_mov(var) \
437	asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var))
438#endif
439
440/* emit instruction:
441 * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as
442 */
443#ifndef bpf_addr_space_cast
444#define bpf_addr_space_cast(var, dst_as, src_as)\
445	asm volatile(".byte 0xBF;		\
446		     .ifc %[reg], r0;		\
447		     .byte 0x00;		\
448		     .endif;			\
449		     .ifc %[reg], r1;		\
450		     .byte 0x11;		\
451		     .endif;			\
452		     .ifc %[reg], r2;		\
453		     .byte 0x22;		\
454		     .endif;			\
455		     .ifc %[reg], r3;		\
456		     .byte 0x33;		\
457		     .endif;			\
458		     .ifc %[reg], r4;		\
459		     .byte 0x44;		\
460		     .endif;			\
461		     .ifc %[reg], r5;		\
462		     .byte 0x55;		\
463		     .endif;			\
464		     .ifc %[reg], r6;		\
465		     .byte 0x66;		\
466		     .endif;			\
467		     .ifc %[reg], r7;		\
468		     .byte 0x77;		\
469		     .endif;			\
470		     .ifc %[reg], r8;		\
471		     .byte 0x88;		\
472		     .endif;			\
473		     .ifc %[reg], r9;		\
474		     .byte 0x99;		\
475		     .endif;			\
476		     .short %[off];		\
477		     .long %[as]"		\
478		     : [reg]"+r"(var)		\
479		     : [off]"i"(BPF_ADDR_SPACE_CAST) \
480		     , [as]"i"((dst_as << 16) | src_as));
481#endif
482
483void bpf_preempt_disable(void) __weak __ksym;
484void bpf_preempt_enable(void) __weak __ksym;
485
486typedef struct {
487} __bpf_preempt_t;
488
489static inline __bpf_preempt_t __bpf_preempt_constructor(void)
490{
491	__bpf_preempt_t ret = {};
492
493	bpf_preempt_disable();
494	return ret;
495}
496static inline void __bpf_preempt_destructor(__bpf_preempt_t *t)
497{
498	bpf_preempt_enable();
499}
500#define bpf_guard_preempt() \
501	__bpf_preempt_t ___bpf_apply(preempt, __COUNTER__)			\
502	__attribute__((__unused__, __cleanup__(__bpf_preempt_destructor))) =	\
503	__bpf_preempt_constructor()
504
505/* Description
506 *	Assert that a conditional expression is true.
507 * Returns
508 *	Void.
509 * Throws
510 *	An exception with the value zero when the assertion fails.
511 */
512#define bpf_assert(cond) if (!(cond)) bpf_throw(0);
513
514/* Description
515 *	Assert that a conditional expression is true.
516 * Returns
517 *	Void.
518 * Throws
519 *	An exception with the specified value when the assertion fails.
520 */
521#define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
522
523/* Description
524 *	Assert that LHS is in the range [BEG, END] (inclusive of both). This
525 *	statement updates the known bounds of LHS during verification. Note
526 *	that both BEG and END must be constant values, and must fit within the
527 *	data type of LHS.
528 * Returns
529 *	Void.
530 * Throws
531 *	An exception with the value zero when the assertion fails.
532 */
533#define bpf_assert_range(LHS, BEG, END)					\
534	({								\
535		_Static_assert(BEG <= END, "BEG must be <= END");	\
536		barrier_var(LHS);					\
537		__bpf_assert_op(LHS, >=, BEG, 0, false);		\
538		__bpf_assert_op(LHS, <=, END, 0, false);		\
539	})
540
541/* Description
542 *	Assert that LHS is in the range [BEG, END] (inclusive of both). This
543 *	statement updates the known bounds of LHS during verification. Note
544 *	that both BEG and END must be constant values, and must fit within the
545 *	data type of LHS.
546 * Returns
547 *	Void.
548 * Throws
549 *	An exception with the specified value when the assertion fails.
550 */
551#define bpf_assert_range_with(LHS, BEG, END, value)			\
552	({								\
553		_Static_assert(BEG <= END, "BEG must be <= END");	\
554		barrier_var(LHS);					\
555		__bpf_assert_op(LHS, >=, BEG, value, false);		\
556		__bpf_assert_op(LHS, <=, END, value, false);		\
557	})
558
559struct bpf_iter_css_task;
560struct cgroup_subsys_state;
561extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
562		struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym;
563extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym;
564extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym;
565
566struct bpf_iter_task;
567extern int bpf_iter_task_new(struct bpf_iter_task *it,
568		struct task_struct *task, unsigned int flags) __weak __ksym;
569extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym;
570extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym;
571
572struct bpf_iter_css;
573extern int bpf_iter_css_new(struct bpf_iter_css *it,
574				struct cgroup_subsys_state *start, unsigned int flags) __weak __ksym;
575extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
576extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
577
578extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
579extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
580extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
581		int (callback_fn)(void *map, int *key, void *value),
582		unsigned int flags__k, void *aux__ign) __ksym;
583#define bpf_wq_set_callback(timer, cb, flags) \
584	bpf_wq_set_callback_impl(timer, cb, flags, NULL)
585
586struct bpf_iter_kmem_cache;
587extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
588extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;
589extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;
590
591#endif
v6.8
  1#ifndef __BPF_EXPERIMENTAL__
  2#define __BPF_EXPERIMENTAL__
  3
  4#include <vmlinux.h>
  5#include <bpf/bpf_tracing.h>
  6#include <bpf/bpf_helpers.h>
  7#include <bpf/bpf_core_read.h>
  8
  9#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
 10
 11/* Description
 12 *	Allocates an object of the type represented by 'local_type_id' in
 13 *	program BTF. User may use the bpf_core_type_id_local macro to pass the
 14 *	type ID of a struct in program BTF.
 15 *
 16 *	The 'local_type_id' parameter must be a known constant.
 17 *	The 'meta' parameter is rewritten by the verifier, no need for BPF
 18 *	program to set it.
 19 * Returns
 20 *	A pointer to an object of the type corresponding to the passed in
 21 *	'local_type_id', or NULL on failure.
 22 */
 23extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
 24
 25/* Convenience macro to wrap over bpf_obj_new_impl */
 26#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
 27
 28/* Description
 29 *	Free an allocated object. All fields of the object that require
 30 *	destruction will be destructed before the storage is freed.
 31 *
 32 *	The 'meta' parameter is rewritten by the verifier, no need for BPF
 33 *	program to set it.
 34 * Returns
 35 *	Void.
 36 */
 37extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
 38
 39/* Convenience macro to wrap over bpf_obj_drop_impl */
 40#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
 41
 42/* Description
 43 *	Increment the refcount on a refcounted local kptr, turning the
 44 *	non-owning reference input into an owning reference in the process.
 45 *
 46 *	The 'meta' parameter is rewritten by the verifier, no need for BPF
 47 *	program to set it.
 48 * Returns
 49 *	An owning reference to the object pointed to by 'kptr'
 50 */
 51extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
 52
 53/* Convenience macro to wrap over bpf_refcount_acquire_impl */
 54#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
 55
 56/* Description
 57 *	Add a new entry to the beginning of the BPF linked list.
 58 *
 59 *	The 'meta' and 'off' parameters are rewritten by the verifier, no need
 60 *	for BPF programs to set them
 61 * Returns
 62 *	0 if the node was successfully added
 63 *	-EINVAL if the node wasn't added because it's already in a list
 64 */
 65extern int bpf_list_push_front_impl(struct bpf_list_head *head,
 66				    struct bpf_list_node *node,
 67				    void *meta, __u64 off) __ksym;
 68
 69/* Convenience macro to wrap over bpf_list_push_front_impl */
 70#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
 71
 72/* Description
 73 *	Add a new entry to the end of the BPF linked list.
 74 *
 75 *	The 'meta' and 'off' parameters are rewritten by the verifier, no need
 76 *	for BPF programs to set them
 77 * Returns
 78 *	0 if the node was successfully added
 79 *	-EINVAL if the node wasn't added because it's already in a list
 80 */
 81extern int bpf_list_push_back_impl(struct bpf_list_head *head,
 82				   struct bpf_list_node *node,
 83				   void *meta, __u64 off) __ksym;
 84
 85/* Convenience macro to wrap over bpf_list_push_back_impl */
 86#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
 87
 88/* Description
 89 *	Remove the entry at the beginning of the BPF linked list.
 90 * Returns
 91 *	Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
 92 */
 93extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
 94
 95/* Description
 96 *	Remove the entry at the end of the BPF linked list.
 97 * Returns
 98 *	Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
 99 */
100extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
101
102/* Description
103 *	Remove 'node' from rbtree with root 'root'
104 * Returns
105 * 	Pointer to the removed node, or NULL if 'root' didn't contain 'node'
106 */
107extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
108					     struct bpf_rb_node *node) __ksym;
109
110/* Description
111 *	Add 'node' to rbtree with root 'root' using comparator 'less'
112 *
113 *	The 'meta' and 'off' parameters are rewritten by the verifier, no need
114 *	for BPF programs to set them
115 * Returns
116 *	0 if the node was successfully added
117 *	-EINVAL if the node wasn't added because it's already in a tree
118 */
119extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
120			       bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
121			       void *meta, __u64 off) __ksym;
122
123/* Convenience macro to wrap over bpf_rbtree_add_impl */
124#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
125
126/* Description
127 *	Return the first (leftmost) node in input tree
128 * Returns
129 *	Pointer to the node, which is _not_ removed from the tree. If the tree
130 *	contains no nodes, returns NULL.
131 */
132extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
133
134/* Description
135 *	Allocates a percpu object of the type represented by 'local_type_id' in
136 *	program BTF. User may use the bpf_core_type_id_local macro to pass the
137 *	type ID of a struct in program BTF.
138 *
139 *	The 'local_type_id' parameter must be a known constant.
140 *	The 'meta' parameter is rewritten by the verifier, no need for BPF
141 *	program to set it.
142 * Returns
143 *	A pointer to a percpu object of the type corresponding to the passed in
144 *	'local_type_id', or NULL on failure.
145 */
146extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
147
148/* Convenience macro to wrap over bpf_percpu_obj_new_impl */
149#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))
150
151/* Description
152 *	Free an allocated percpu object. All fields of the object that require
153 *	destruction will be destructed before the storage is freed.
154 *
155 *	The 'meta' parameter is rewritten by the verifier, no need for BPF
156 *	program to set it.
157 * Returns
158 *	Void.
159 */
160extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym;
161
162struct bpf_iter_task_vma;
163
164extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
165				 struct task_struct *task,
166				 unsigned long addr) __ksym;
167extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;
168extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
169
170/* Convenience macro to wrap over bpf_obj_drop_impl */
171#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)
172
173/* Description
174 *	Throw a BPF exception from the program, immediately terminating its
175 *	execution and unwinding the stack. The supplied 'cookie' parameter
176 *	will be the return value of the program when an exception is thrown,
177 *	and the default exception callback is used. Otherwise, if an exception
178 *	callback is set using the '__exception_cb(callback)' declaration tag
179 *	on the main program, the 'cookie' parameter will be the callback's only
180 *	input argument.
181 *
182 *	Thus, in case of default exception callback, 'cookie' is subjected to
183 *	constraints on the program's return value (as with R0 on exit).
184 *	Otherwise, the return value of the marked exception callback will be
185 *	subjected to the same checks.
186 *
187 *	Note that throwing an exception with lingering resources (locks,
188 *	references, etc.) will lead to a verification error.
189 *
190 *	Note that callbacks *cannot* call this helper.
191 * Returns
192 *	Never.
193 * Throws
194 *	An exception with the specified 'cookie' value.
195 */
196extern void bpf_throw(u64 cookie) __ksym;
197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198/* This macro must be used to mark the exception callback corresponding to the
199 * main program. For example:
200 *
201 * int exception_cb(u64 cookie) {
202 *	return cookie;
203 * }
204 *
205 * SEC("tc")
206 * __exception_cb(exception_cb)
207 * int main_prog(struct __sk_buff *ctx) {
208 *	...
209 *	return TC_ACT_OK;
210 * }
211 *
212 * Here, exception callback for the main program will be 'exception_cb'. Note
213 * that this attribute can only be used once, and multiple exception callbacks
214 * specified for the main program will lead to verification error.
215 */
216#define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name)))
217
218#define __bpf_assert_signed(x) _Generic((x), \
219    unsigned long: 0,       \
220    unsigned long long: 0,  \
221    signed long: 1,         \
222    signed long long: 1     \
223)
224
225#define __bpf_assert_check(LHS, op, RHS)								 \
226	_Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression");			 \
227	_Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n");			 \
228	_Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert");	 \
229	_Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression")
230
231#define __bpf_assert(LHS, op, cons, RHS, VAL)							\
232	({											\
233		(void)bpf_throw;								\
234		asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw"	\
235			       : : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : );	\
236	})
237
238#define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign)			\
239	({										\
240		__bpf_assert_check(LHS, op, RHS);					\
241		if (__bpf_assert_signed(LHS) && !(supp_sign))				\
242			__bpf_assert(LHS, "s" #op, cons, RHS, VAL);			\
243		else									\
244			__bpf_assert(LHS, #op, cons, RHS, VAL);				\
245	 })
246
247#define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign)					\
248	({										\
249		if (sizeof(typeof(RHS)) == 8) {						\
250			const typeof(RHS) rhs_var = (RHS);				\
251			__bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign);	\
252		} else {								\
253			__bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign);	\
254		}									\
255	 })
256
257#define __cmp_cannot_be_signed(x) \
258	__builtin_strcmp(#x, "==") == 0 || __builtin_strcmp(#x, "!=") == 0 || \
259	__builtin_strcmp(#x, "&") == 0
260
261#define __is_signed_type(type) (((type)(-1)) < (type)1)
262
263#define __bpf_cmp(LHS, OP, SIGN, PRED, RHS, DEFAULT)						\
264	({											\
265		__label__ l_true;								\
266		bool ret = DEFAULT;								\
267		asm volatile goto("if %[lhs] " SIGN #OP " %[rhs] goto %l[l_true]"		\
268				  :: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true);	\
269		ret = !DEFAULT;									\
270l_true:												\
271		ret;										\
272       })
273
274/* C type conversions coupled with comparison operator are tricky.
275 * Make sure BPF program is compiled with -Wsign-compare then
276 * __lhs OP __rhs below will catch the mistake.
277 * Be aware that we check only __lhs to figure out the sign of compare.
278 */
279#define _bpf_cmp(LHS, OP, RHS, NOFLIP)								\
280	({											\
281		typeof(LHS) __lhs = (LHS);							\
282		typeof(RHS) __rhs = (RHS);							\
283		bool ret;									\
284		_Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression");	\
285		(void)(__lhs OP __rhs);								\
286		if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) {		\
287			if (sizeof(__rhs) == 8)							\
288				ret = __bpf_cmp(__lhs, OP, "", "r", __rhs, NOFLIP);		\
 
 
 
289			else									\
290				ret = __bpf_cmp(__lhs, OP, "", "i", __rhs, NOFLIP);		\
291		} else {									\
292			if (sizeof(__rhs) == 8)							\
293				ret = __bpf_cmp(__lhs, OP, "s", "r", __rhs, NOFLIP);		\
294			else									\
295				ret = __bpf_cmp(__lhs, OP, "s", "i", __rhs, NOFLIP);		\
296		}										\
297		ret;										\
298       })
299
300#ifndef bpf_cmp_unlikely
301#define bpf_cmp_unlikely(LHS, OP, RHS) _bpf_cmp(LHS, OP, RHS, true)
302#endif
303
304#ifndef bpf_cmp_likely
305#define bpf_cmp_likely(LHS, OP, RHS)								\
306	({											\
307		bool ret;									\
308		if (__builtin_strcmp(#OP, "==") == 0)						\
309			ret = _bpf_cmp(LHS, !=, RHS, false);					\
310		else if (__builtin_strcmp(#OP, "!=") == 0)					\
311			ret = _bpf_cmp(LHS, ==, RHS, false);					\
312		else if (__builtin_strcmp(#OP, "<=") == 0)					\
313			ret = _bpf_cmp(LHS, >, RHS, false);					\
314		else if (__builtin_strcmp(#OP, "<") == 0)					\
315			ret = _bpf_cmp(LHS, >=, RHS, false);					\
316		else if (__builtin_strcmp(#OP, ">") == 0)					\
317			ret = _bpf_cmp(LHS, <=, RHS, false);					\
318		else if (__builtin_strcmp(#OP, ">=") == 0)					\
319			ret = _bpf_cmp(LHS, <, RHS, false);					\
320		else										\
321			(void) "bug";								\
322		ret;										\
323       })
324#endif
325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326#ifndef bpf_nop_mov
327#define bpf_nop_mov(var) \
328	asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var))
329#endif
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331/* Description
332 *	Assert that a conditional expression is true.
333 * Returns
334 *	Void.
335 * Throws
336 *	An exception with the value zero when the assertion fails.
337 */
338#define bpf_assert(cond) if (!(cond)) bpf_throw(0);
339
340/* Description
341 *	Assert that a conditional expression is true.
342 * Returns
343 *	Void.
344 * Throws
345 *	An exception with the specified value when the assertion fails.
346 */
347#define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
348
349/* Description
350 *	Assert that LHS is in the range [BEG, END] (inclusive of both). This
351 *	statement updates the known bounds of LHS during verification. Note
352 *	that both BEG and END must be constant values, and must fit within the
353 *	data type of LHS.
354 * Returns
355 *	Void.
356 * Throws
357 *	An exception with the value zero when the assertion fails.
358 */
359#define bpf_assert_range(LHS, BEG, END)					\
360	({								\
361		_Static_assert(BEG <= END, "BEG must be <= END");	\
362		barrier_var(LHS);					\
363		__bpf_assert_op(LHS, >=, BEG, 0, false);		\
364		__bpf_assert_op(LHS, <=, END, 0, false);		\
365	})
366
367/* Description
368 *	Assert that LHS is in the range [BEG, END] (inclusive of both). This
369 *	statement updates the known bounds of LHS during verification. Note
370 *	that both BEG and END must be constant values, and must fit within the
371 *	data type of LHS.
372 * Returns
373 *	Void.
374 * Throws
375 *	An exception with the specified value when the assertion fails.
376 */
377#define bpf_assert_range_with(LHS, BEG, END, value)			\
378	({								\
379		_Static_assert(BEG <= END, "BEG must be <= END");	\
380		barrier_var(LHS);					\
381		__bpf_assert_op(LHS, >=, BEG, value, false);		\
382		__bpf_assert_op(LHS, <=, END, value, false);		\
383	})
384
385struct bpf_iter_css_task;
386struct cgroup_subsys_state;
387extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
388		struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym;
389extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym;
390extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym;
391
392struct bpf_iter_task;
393extern int bpf_iter_task_new(struct bpf_iter_task *it,
394		struct task_struct *task, unsigned int flags) __weak __ksym;
395extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym;
396extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym;
397
398struct bpf_iter_css;
399extern int bpf_iter_css_new(struct bpf_iter_css *it,
400				struct cgroup_subsys_state *start, unsigned int flags) __weak __ksym;
401extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
402extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
 
 
 
 
 
 
 
 
 
 
 
 
 
403
404#endif