Loading...
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2#ifndef __BPF_HELPERS__
3#define __BPF_HELPERS__
4
5/*
6 * Note that bpf programs need to include either
7 * vmlinux.h (auto-generated from BTF) or linux/types.h
8 * in advance since bpf_helper_defs.h uses such types
9 * as __u64.
10 */
11#include "bpf_helper_defs.h"
12
13#define __uint(name, val) int (*name)[val]
14#define __type(name, val) typeof(val) *name
15#define __array(name, val) typeof(val) *name[]
16
17/*
18 * Helper macro to place programs, maps, license in
19 * different sections in elf_bpf file. Section names
20 * are interpreted by libbpf depending on the context (BPF programs, BPF maps,
21 * extern variables, etc).
22 * To allow use of SEC() with externs (e.g., for extern .maps declarations),
23 * make sure __attribute__((unused)) doesn't trigger compilation warning.
24 */
25#if __GNUC__ && !__clang__
26
27/*
28 * Pragma macros are broken on GCC
29 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
30 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400
31 */
32#define SEC(name) __attribute__((section(name), used))
33
34#else
35
36#define SEC(name) \
37 _Pragma("GCC diagnostic push") \
38 _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
39 __attribute__((section(name), used)) \
40 _Pragma("GCC diagnostic pop") \
41
42#endif
43
44/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
45#undef __always_inline
46#define __always_inline inline __attribute__((always_inline))
47
48#ifndef __noinline
49#define __noinline __attribute__((noinline))
50#endif
51#ifndef __weak
52#define __weak __attribute__((weak))
53#endif
54
55/*
56 * Use __hidden attribute to mark a non-static BPF subprogram effectively
57 * static for BPF verifier's verification algorithm purposes, allowing more
58 * extensive and permissive BPF verification process, taking into account
59 * subprogram's caller context.
60 */
61#define __hidden __attribute__((visibility("hidden")))
62
63/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include
64 * any system-level headers (such as stddef.h, linux/version.h, etc), and
65 * commonly-used macros like NULL and KERNEL_VERSION aren't available through
66 * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define
67 * them on their own. So as a convenience, provide such definitions here.
68 */
69#ifndef NULL
70#define NULL ((void *)0)
71#endif
72
73#ifndef KERNEL_VERSION
74#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
75#endif
76
77/*
78 * Helper macros to manipulate data structures
79 */
80#ifndef offsetof
81#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
82#endif
83#ifndef container_of
84#define container_of(ptr, type, member) \
85 ({ \
86 void *__mptr = (void *)(ptr); \
87 ((type *)(__mptr - offsetof(type, member))); \
88 })
89#endif
90
91/*
92 * Compiler (optimization) barrier.
93 */
94#ifndef barrier
95#define barrier() asm volatile("" ::: "memory")
96#endif
97
98/* Variable-specific compiler (optimization) barrier. It's a no-op which makes
99 * compiler believe that there is some black box modification of a given
100 * variable and thus prevents compiler from making extra assumption about its
101 * value and potential simplifications and optimizations on this variable.
102 *
103 * E.g., compiler might often delay or even omit 32-bit to 64-bit casting of
104 * a variable, making some code patterns unverifiable. Putting barrier_var()
105 * in place will ensure that cast is performed before the barrier_var()
106 * invocation, because compiler has to pessimistically assume that embedded
107 * asm section might perform some extra operations on that variable.
108 *
109 * This is a variable-specific variant of more global barrier().
110 */
111#ifndef barrier_var
112#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
113#endif
114
115/*
116 * Helper macro to throw a compilation error if __bpf_unreachable() gets
117 * built into the resulting code. This works given BPF back end does not
118 * implement __builtin_trap(). This is useful to assert that certain paths
119 * of the program code are never used and hence eliminated by the compiler.
120 *
121 * For example, consider a switch statement that covers known cases used by
122 * the program. __bpf_unreachable() can then reside in the default case. If
123 * the program gets extended such that a case is not covered in the switch
124 * statement, then it will throw a build error due to the default case not
125 * being compiled out.
126 */
127#ifndef __bpf_unreachable
128# define __bpf_unreachable() __builtin_trap()
129#endif
130
131/*
132 * Helper function to perform a tail call with a constant/immediate map slot.
133 */
134#if __clang_major__ >= 8 && defined(__bpf__)
135static __always_inline void
136bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
137{
138 if (!__builtin_constant_p(slot))
139 __bpf_unreachable();
140
141 /*
142 * Provide a hard guarantee that LLVM won't optimize setting r2 (map
143 * pointer) and r3 (constant map index) from _different paths_ ending
144 * up at the _same_ call insn as otherwise we won't be able to use the
145 * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
146 * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
147 * tracking for prog array pokes") for details on verifier tracking.
148 *
149 * Note on clobber list: we need to stay in-line with BPF calling
150 * convention, so even if we don't end up using r0, r4, r5, we need
151 * to mark them as clobber so that LLVM doesn't end up using them
152 * before / after the call.
153 */
154 asm volatile("r1 = %[ctx]\n\t"
155 "r2 = %[map]\n\t"
156 "r3 = %[slot]\n\t"
157 "call 12"
158 :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
159 : "r0", "r1", "r2", "r3", "r4", "r5");
160}
161#endif
162
163enum libbpf_pin_type {
164 LIBBPF_PIN_NONE,
165 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
166 LIBBPF_PIN_BY_NAME,
167};
168
169enum libbpf_tristate {
170 TRI_NO = 0,
171 TRI_YES = 1,
172 TRI_MODULE = 2,
173};
174
175#define __kconfig __attribute__((section(".kconfig")))
176#define __ksym __attribute__((section(".ksyms")))
177#define __kptr __attribute__((btf_type_tag("kptr")))
178#define __kptr_ref __attribute__((btf_type_tag("kptr_ref")))
179
180#ifndef ___bpf_concat
181#define ___bpf_concat(a, b) a ## b
182#endif
183#ifndef ___bpf_apply
184#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
185#endif
186#ifndef ___bpf_nth
187#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
188#endif
189#ifndef ___bpf_narg
190#define ___bpf_narg(...) \
191 ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
192#endif
193
194#define ___bpf_fill0(arr, p, x) do {} while (0)
195#define ___bpf_fill1(arr, p, x) arr[p] = x
196#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
197#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
198#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
199#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
200#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
201#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
202#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
203#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
204#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
205#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
206#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
207#define ___bpf_fill(arr, args...) \
208 ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
209
210/*
211 * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
212 * in a structure.
213 */
214#define BPF_SEQ_PRINTF(seq, fmt, args...) \
215({ \
216 static const char ___fmt[] = fmt; \
217 unsigned long long ___param[___bpf_narg(args)]; \
218 \
219 _Pragma("GCC diagnostic push") \
220 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
221 ___bpf_fill(___param, args); \
222 _Pragma("GCC diagnostic pop") \
223 \
224 bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
225 ___param, sizeof(___param)); \
226})
227
228/*
229 * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
230 * an array of u64.
231 */
232#define BPF_SNPRINTF(out, out_size, fmt, args...) \
233({ \
234 static const char ___fmt[] = fmt; \
235 unsigned long long ___param[___bpf_narg(args)]; \
236 \
237 _Pragma("GCC diagnostic push") \
238 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
239 ___bpf_fill(___param, args); \
240 _Pragma("GCC diagnostic pop") \
241 \
242 bpf_snprintf(out, out_size, ___fmt, \
243 ___param, sizeof(___param)); \
244})
245
246#ifdef BPF_NO_GLOBAL_DATA
247#define BPF_PRINTK_FMT_MOD
248#else
249#define BPF_PRINTK_FMT_MOD static const
250#endif
251
252#define __bpf_printk(fmt, ...) \
253({ \
254 BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
255 bpf_trace_printk(____fmt, sizeof(____fmt), \
256 ##__VA_ARGS__); \
257})
258
259/*
260 * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
261 * instead of an array of u64.
262 */
263#define __bpf_vprintk(fmt, args...) \
264({ \
265 static const char ___fmt[] = fmt; \
266 unsigned long long ___param[___bpf_narg(args)]; \
267 \
268 _Pragma("GCC diagnostic push") \
269 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
270 ___bpf_fill(___param, args); \
271 _Pragma("GCC diagnostic pop") \
272 \
273 bpf_trace_vprintk(___fmt, sizeof(___fmt), \
274 ___param, sizeof(___param)); \
275})
276
277/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
278 * Otherwise use __bpf_vprintk
279 */
280#define ___bpf_pick_printk(...) \
281 ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
282 __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
283 __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
284 __bpf_printk /*1*/, __bpf_printk /*0*/)
285
286/* Helper macro to print out debug messages */
287#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
288
289#endif
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2#ifndef __BPF_HELPERS__
3#define __BPF_HELPERS__
4
5/*
6 * Note that bpf programs need to include either
7 * vmlinux.h (auto-generated from BTF) or linux/types.h
8 * in advance since bpf_helper_defs.h uses such types
9 * as __u64.
10 */
11#include "bpf_helper_defs.h"
12
13#define __uint(name, val) int (*name)[val]
14#define __type(name, val) typeof(val) *name
15#define __array(name, val) typeof(val) *name[]
16
17/*
18 * Helper macro to place programs, maps, license in
19 * different sections in elf_bpf file. Section names
20 * are interpreted by libbpf depending on the context (BPF programs, BPF maps,
21 * extern variables, etc).
22 * To allow use of SEC() with externs (e.g., for extern .maps declarations),
23 * make sure __attribute__((unused)) doesn't trigger compilation warning.
24 */
25#if __GNUC__ && !__clang__
26
27/*
28 * Pragma macros are broken on GCC
29 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
30 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400
31 */
32#define SEC(name) __attribute__((section(name), used))
33
34#else
35
36#define SEC(name) \
37 _Pragma("GCC diagnostic push") \
38 _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
39 __attribute__((section(name), used)) \
40 _Pragma("GCC diagnostic pop") \
41
42#endif
43
44/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
45#undef __always_inline
46#define __always_inline inline __attribute__((always_inline))
47
48#ifndef __noinline
49#define __noinline __attribute__((noinline))
50#endif
51#ifndef __weak
52#define __weak __attribute__((weak))
53#endif
54
55/*
56 * Use __hidden attribute to mark a non-static BPF subprogram effectively
57 * static for BPF verifier's verification algorithm purposes, allowing more
58 * extensive and permissive BPF verification process, taking into account
59 * subprogram's caller context.
60 */
61#define __hidden __attribute__((visibility("hidden")))
62
63/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include
64 * any system-level headers (such as stddef.h, linux/version.h, etc), and
65 * commonly-used macros like NULL and KERNEL_VERSION aren't available through
66 * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define
67 * them on their own. So as a convenience, provide such definitions here.
68 */
69#ifndef NULL
70#define NULL ((void *)0)
71#endif
72
73#ifndef KERNEL_VERSION
74#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
75#endif
76
77/*
78 * Helper macros to manipulate data structures
79 */
80
81/* offsetof() definition that uses __builtin_offset() might not preserve field
82 * offset CO-RE relocation properly, so force-redefine offsetof() using
83 * old-school approach which works with CO-RE correctly
84 */
85#undef offsetof
86#define offsetof(type, member) ((unsigned long)&((type *)0)->member)
87
88/* redefined container_of() to ensure we use the above offsetof() macro */
89#undef container_of
90#define container_of(ptr, type, member) \
91 ({ \
92 void *__mptr = (void *)(ptr); \
93 ((type *)(__mptr - offsetof(type, member))); \
94 })
95
96/*
97 * Compiler (optimization) barrier.
98 */
99#ifndef barrier
100#define barrier() asm volatile("" ::: "memory")
101#endif
102
103/* Variable-specific compiler (optimization) barrier. It's a no-op which makes
104 * compiler believe that there is some black box modification of a given
105 * variable and thus prevents compiler from making extra assumption about its
106 * value and potential simplifications and optimizations on this variable.
107 *
108 * E.g., compiler might often delay or even omit 32-bit to 64-bit casting of
109 * a variable, making some code patterns unverifiable. Putting barrier_var()
110 * in place will ensure that cast is performed before the barrier_var()
111 * invocation, because compiler has to pessimistically assume that embedded
112 * asm section might perform some extra operations on that variable.
113 *
114 * This is a variable-specific variant of more global barrier().
115 */
116#ifndef barrier_var
117#define barrier_var(var) asm volatile("" : "+r"(var))
118#endif
119
120/*
121 * Helper macro to throw a compilation error if __bpf_unreachable() gets
122 * built into the resulting code. This works given BPF back end does not
123 * implement __builtin_trap(). This is useful to assert that certain paths
124 * of the program code are never used and hence eliminated by the compiler.
125 *
126 * For example, consider a switch statement that covers known cases used by
127 * the program. __bpf_unreachable() can then reside in the default case. If
128 * the program gets extended such that a case is not covered in the switch
129 * statement, then it will throw a build error due to the default case not
130 * being compiled out.
131 */
132#ifndef __bpf_unreachable
133# define __bpf_unreachable() __builtin_trap()
134#endif
135
136/*
137 * Helper function to perform a tail call with a constant/immediate map slot.
138 */
139#if __clang_major__ >= 8 && defined(__bpf__)
140static __always_inline void
141bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
142{
143 if (!__builtin_constant_p(slot))
144 __bpf_unreachable();
145
146 /*
147 * Provide a hard guarantee that LLVM won't optimize setting r2 (map
148 * pointer) and r3 (constant map index) from _different paths_ ending
149 * up at the _same_ call insn as otherwise we won't be able to use the
150 * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
151 * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
152 * tracking for prog array pokes") for details on verifier tracking.
153 *
154 * Note on clobber list: we need to stay in-line with BPF calling
155 * convention, so even if we don't end up using r0, r4, r5, we need
156 * to mark them as clobber so that LLVM doesn't end up using them
157 * before / after the call.
158 */
159 asm volatile("r1 = %[ctx]\n\t"
160 "r2 = %[map]\n\t"
161 "r3 = %[slot]\n\t"
162 "call 12"
163 :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
164 : "r0", "r1", "r2", "r3", "r4", "r5");
165}
166#endif
167
168enum libbpf_pin_type {
169 LIBBPF_PIN_NONE,
170 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
171 LIBBPF_PIN_BY_NAME,
172};
173
174enum libbpf_tristate {
175 TRI_NO = 0,
176 TRI_YES = 1,
177 TRI_MODULE = 2,
178};
179
180#define __kconfig __attribute__((section(".kconfig")))
181#define __ksym __attribute__((section(".ksyms")))
182#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
183#define __kptr __attribute__((btf_type_tag("kptr")))
184#define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr")))
185
186#define bpf_ksym_exists(sym) ({ \
187 _Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \
188 !!sym; \
189})
190
191#define __arg_ctx __attribute__((btf_decl_tag("arg:ctx")))
192#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
193
194#ifndef ___bpf_concat
195#define ___bpf_concat(a, b) a ## b
196#endif
197#ifndef ___bpf_apply
198#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
199#endif
200#ifndef ___bpf_nth
201#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
202#endif
203#ifndef ___bpf_narg
204#define ___bpf_narg(...) \
205 ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
206#endif
207
208#define ___bpf_fill0(arr, p, x) do {} while (0)
209#define ___bpf_fill1(arr, p, x) arr[p] = x
210#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
211#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
212#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
213#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
214#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
215#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
216#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
217#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
218#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
219#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
220#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
221#define ___bpf_fill(arr, args...) \
222 ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
223
224/*
225 * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
226 * in a structure.
227 */
228#define BPF_SEQ_PRINTF(seq, fmt, args...) \
229({ \
230 static const char ___fmt[] = fmt; \
231 unsigned long long ___param[___bpf_narg(args)]; \
232 \
233 _Pragma("GCC diagnostic push") \
234 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
235 ___bpf_fill(___param, args); \
236 _Pragma("GCC diagnostic pop") \
237 \
238 bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
239 ___param, sizeof(___param)); \
240})
241
242/*
243 * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
244 * an array of u64.
245 */
246#define BPF_SNPRINTF(out, out_size, fmt, args...) \
247({ \
248 static const char ___fmt[] = fmt; \
249 unsigned long long ___param[___bpf_narg(args)]; \
250 \
251 _Pragma("GCC diagnostic push") \
252 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
253 ___bpf_fill(___param, args); \
254 _Pragma("GCC diagnostic pop") \
255 \
256 bpf_snprintf(out, out_size, ___fmt, \
257 ___param, sizeof(___param)); \
258})
259
260#ifdef BPF_NO_GLOBAL_DATA
261#define BPF_PRINTK_FMT_MOD
262#else
263#define BPF_PRINTK_FMT_MOD static const
264#endif
265
266#define __bpf_printk(fmt, ...) \
267({ \
268 BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
269 bpf_trace_printk(____fmt, sizeof(____fmt), \
270 ##__VA_ARGS__); \
271})
272
273/*
274 * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
275 * instead of an array of u64.
276 */
277#define __bpf_vprintk(fmt, args...) \
278({ \
279 static const char ___fmt[] = fmt; \
280 unsigned long long ___param[___bpf_narg(args)]; \
281 \
282 _Pragma("GCC diagnostic push") \
283 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
284 ___bpf_fill(___param, args); \
285 _Pragma("GCC diagnostic pop") \
286 \
287 bpf_trace_vprintk(___fmt, sizeof(___fmt), \
288 ___param, sizeof(___param)); \
289})
290
291/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
292 * Otherwise use __bpf_vprintk
293 */
294#define ___bpf_pick_printk(...) \
295 ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
296 __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
297 __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
298 __bpf_printk /*1*/, __bpf_printk /*0*/)
299
300/* Helper macro to print out debug messages */
301#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
302
303struct bpf_iter_num;
304
305extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak __ksym;
306extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym;
307extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
308
309#ifndef bpf_for_each
310/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for
311 * using BPF open-coded iterators without having to write mundane explicit
312 * low-level loop logic. Instead, it provides for()-like generic construct
313 * that can be used pretty naturally. E.g., for some hypothetical cgroup
314 * iterator, you'd write:
315 *
316 * struct cgroup *cg, *parent_cg = <...>;
317 *
318 * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) {
319 * bpf_printk("Child cgroup id = %d", cg->cgroup_id);
320 * if (cg->cgroup_id == 123)
321 * break;
322 * }
323 *
324 * I.e., it looks almost like high-level for each loop in other languages,
325 * supports continue/break, and is verifiable by BPF verifier.
326 *
327 * For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
328 * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
329 * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
330 * *`, not just `int`. So for integers bpf_for() is more convenient.
331 *
332 * Note: this macro relies on C99 feature of allowing to declare variables
333 * inside for() loop, bound to for() loop lifetime. It also utilizes GCC
334 * extension: __attribute__((cleanup(<func>))), supported by both GCC and
335 * Clang.
336 */
337#define bpf_for_each(type, cur, args...) for ( \
338 /* initialize and define destructor */ \
339 struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \
340 cleanup(bpf_iter_##type##_destroy))), \
341 /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \
342 *___p __attribute__((unused)) = ( \
343 bpf_iter_##type##_new(&___it, ##args), \
344 /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
345 /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \
346 (void)bpf_iter_##type##_destroy, (void *)0); \
347 /* iteration and termination check */ \
348 (((cur) = bpf_iter_##type##_next(&___it))); \
349)
350#endif /* bpf_for_each */
351
352#ifndef bpf_for
353/* bpf_for(i, start, end) implements a for()-like looping construct that sets
354 * provided integer variable *i* to values starting from *start* through,
355 * but not including, *end*. It also proves to BPF verifier that *i* belongs
356 * to range [start, end), so this can be used for accessing arrays without
357 * extra checks.
358 *
359 * Note: *start* and *end* are assumed to be expressions with no side effects
360 * and whose values do not change throughout bpf_for() loop execution. They do
361 * not have to be statically known or constant, though.
362 *
363 * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
364 * loop bound variables and cleanup attribute, supported by GCC and Clang.
365 */
366#define bpf_for(i, start, end) for ( \
367 /* initialize and define destructor */ \
368 struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
369 cleanup(bpf_iter_num_destroy))), \
370 /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
371 *___p __attribute__((unused)) = ( \
372 bpf_iter_num_new(&___it, (start), (end)), \
373 /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
374 /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
375 (void)bpf_iter_num_destroy, (void *)0); \
376 ({ \
377 /* iteration step */ \
378 int *___t = bpf_iter_num_next(&___it); \
379 /* termination and bounds check */ \
380 (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \
381 }); \
382)
383#endif /* bpf_for */
384
385#ifndef bpf_repeat
386/* bpf_repeat(N) performs N iterations without exposing iteration number
387 *
388 * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
389 * loop bound variables and cleanup attribute, supported by GCC and Clang.
390 */
391#define bpf_repeat(N) for ( \
392 /* initialize and define destructor */ \
393 struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
394 cleanup(bpf_iter_num_destroy))), \
395 /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
396 *___p __attribute__((unused)) = ( \
397 bpf_iter_num_new(&___it, 0, (N)), \
398 /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
399 /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
400 (void)bpf_iter_num_destroy, (void *)0); \
401 bpf_iter_num_next(&___it); \
402 /* nothing here */ \
403)
404#endif /* bpf_repeat */
405
406#endif