Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_helpers.h>
6#include <bpf/bpf_tracing.h>
7#include "bpf_misc.h"
8#include "xdp_metadata.h"
9#include "bpf_kfuncs.h"
10#include "err.h"
11
12/* The compiler may be able to detect the access to uninitialized
13 memory in the routines performing out of bound memory accesses and
14 emit warnings about it. This is the case of GCC. */
15#if !defined(__clang__)
16#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
17#endif
18
19int arr[1];
20int unkn_idx;
21const volatile bool call_dead_subprog = false;
22
23__noinline long global_bad(void)
24{
25 return arr[unkn_idx]; /* BOOM */
26}
27
28__noinline long global_good(void)
29{
30 return arr[0];
31}
32
33__noinline long global_calls_bad(void)
34{
35 return global_good() + global_bad() /* does BOOM indirectly */;
36}
37
38__noinline long global_calls_good_only(void)
39{
40 return global_good();
41}
42
43__noinline long global_dead(void)
44{
45 return arr[0] * 2;
46}
47
48SEC("?raw_tp")
49__success __log_level(2)
50/* main prog is validated completely first */
51__msg("('global_calls_good_only') is global and assumed valid.")
52/* eventually global_good() is transitively validated as well */
53__msg("Validating global_good() func")
54__msg("('global_good') is safe for any args that match its prototype")
55int chained_global_func_calls_success(void)
56{
57 int sum = 0;
58
59 if (call_dead_subprog)
60 sum += global_dead();
61 return global_calls_good_only() + sum;
62}
63
64SEC("?raw_tp")
65__failure __log_level(2)
66/* main prog validated successfully first */
67__msg("('global_calls_bad') is global and assumed valid.")
68/* eventually we validate global_bad() and fail */
69__msg("Validating global_bad() func")
70__msg("math between map_value pointer and register") /* BOOM */
71int chained_global_func_calls_bad(void)
72{
73 return global_calls_bad();
74}
75
76/* do out of bounds access forcing verifier to fail verification if this
77 * global func is called
78 */
79__noinline int global_unsupp(const int *mem)
80{
81 if (!mem)
82 return 0;
83 return mem[100]; /* BOOM */
84}
85
86const volatile bool skip_unsupp_global = true;
87
88SEC("?raw_tp")
89__success
90int guarded_unsupp_global_called(void)
91{
92 if (!skip_unsupp_global)
93 return global_unsupp(NULL);
94 return 0;
95}
96
97SEC("?raw_tp")
98__failure __log_level(2)
99__msg("Func#1 ('global_unsupp') is global and assumed valid.")
100__msg("Validating global_unsupp() func#1...")
101__msg("value is outside of the allowed memory range")
102int unguarded_unsupp_global_called(void)
103{
104 int x = 0;
105
106 return global_unsupp(&x);
107}
108
109long stack[128];
110
111__weak int subprog_nullable_ptr_bad(int *p)
112{
113 return (*p) * 2; /* bad, missing null check */
114}
115
116SEC("?raw_tp")
117__failure __log_level(2)
118__msg("invalid mem access 'mem_or_null'")
119int arg_tag_nullable_ptr_fail(void *ctx)
120{
121 int x = 42;
122
123 return subprog_nullable_ptr_bad(&x);
124}
125
126typedef struct {
127 int x;
128} user_struct_t;
129
130__noinline __weak int subprog_user_anon_mem(user_struct_t *t)
131{
132 return t ? t->x : 0;
133}
134
135SEC("?tracepoint")
136__failure __log_level(2)
137__msg("invalid bpf_context access")
138__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
139int anon_user_mem_invalid(void *ctx)
140{
141 /* can't pass PTR_TO_CTX as user memory */
142 return subprog_user_anon_mem(ctx);
143}
144
145SEC("?tracepoint")
146__success __log_level(2)
147__msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype")
148int anon_user_mem_valid(void *ctx)
149{
150 user_struct_t t = { .x = 42 };
151
152 return subprog_user_anon_mem(&t);
153}
154
155__noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
156{
157 return (*p1) * (*p2); /* good, no need for NULL checks */
158}
159
160int x = 47;
161
162SEC("?raw_tp")
163__success __log_level(2)
164int arg_tag_nonnull_ptr_good(void *ctx)
165{
166 int y = 74;
167
168 return subprog_nonnull_ptr_good(&x, &y);
169}
170
171/* this global subprog can be now called from many types of entry progs, each
172 * with different context type
173 */
174__weak int subprog_ctx_tag(void *ctx __arg_ctx)
175{
176 return bpf_get_stack(ctx, stack, sizeof(stack), 0);
177}
178
179__weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx)
180{
181 return 0;
182}
183
184__weak int raw_tp_u64_array(u64 *ctx __arg_ctx)
185{
186 return 0;
187}
188
189SEC("?raw_tp")
190__success __log_level(2)
191int arg_tag_ctx_raw_tp(void *ctx)
192{
193 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
194}
195
196SEC("?raw_tp.w")
197__success __log_level(2)
198int arg_tag_ctx_raw_tp_writable(void *ctx)
199{
200 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
201}
202
203SEC("?tp_btf/sys_enter")
204__success __log_level(2)
205int arg_tag_ctx_raw_tp_btf(void *ctx)
206{
207 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
208}
209
210struct whatever { };
211
212__weak int tp_whatever(struct whatever *ctx __arg_ctx)
213{
214 return 0;
215}
216
217SEC("?tp")
218__success __log_level(2)
219int arg_tag_ctx_tp(void *ctx)
220{
221 return subprog_ctx_tag(ctx) + tp_whatever(ctx);
222}
223
224__weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx)
225{
226 return 0;
227}
228
229__weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
230{
231 return 0;
232}
233
234SEC("?kprobe")
235__success __log_level(2)
236int arg_tag_ctx_kprobe(void *ctx)
237{
238 return subprog_ctx_tag(ctx) +
239 kprobe_subprog_pt_regs(ctx) +
240 kprobe_subprog_typedef(ctx);
241}
242
243__weak int perf_subprog_regs(
244#if defined(bpf_target_riscv)
245 struct user_regs_struct *ctx __arg_ctx
246#elif defined(bpf_target_s390)
247 /* user_pt_regs typedef is anonymous struct, so only `void *` works */
248 void *ctx __arg_ctx
249#elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc)
250 struct user_pt_regs *ctx __arg_ctx
251#else
252 struct pt_regs *ctx __arg_ctx
253#endif
254)
255{
256 return 0;
257}
258
259__weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
260{
261 return 0;
262}
263
264__weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx)
265{
266 return 0;
267}
268
269SEC("?perf_event")
270__success __log_level(2)
271int arg_tag_ctx_perf(void *ctx)
272{
273 return subprog_ctx_tag(ctx) +
274 perf_subprog_regs(ctx) +
275 perf_subprog_typedef(ctx) +
276 perf_subprog_canonical(ctx);
277}
278
279__weak int iter_subprog_void(void *ctx __arg_ctx)
280{
281 return 0;
282}
283
284__weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx)
285{
286 return 0;
287}
288
289SEC("?iter/task")
290__success __log_level(2)
291int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx)
292{
293 return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1;
294}
295
296__weak int tracing_subprog_void(void *ctx __arg_ctx)
297{
298 return 0;
299}
300
301__weak int tracing_subprog_u64(u64 *ctx __arg_ctx)
302{
303 return 0;
304}
305
306int acc;
307
308SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
309__success __log_level(2)
310int BPF_PROG(arg_tag_ctx_fentry)
311{
312 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
313 return 0;
314}
315
316SEC("?fexit/" SYS_PREFIX "sys_nanosleep")
317__success __log_level(2)
318int BPF_PROG(arg_tag_ctx_fexit)
319{
320 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
321 return 0;
322}
323
324SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep")
325__success __log_level(2)
326int BPF_PROG(arg_tag_ctx_fmod_ret)
327{
328 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
329}
330
331SEC("?lsm/bpf")
332__success __log_level(2)
333int BPF_PROG(arg_tag_ctx_lsm)
334{
335 int ret;
336
337 ret = tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
338 set_if_not_errno_or_zero(ret, -1);
339 return ret;
340}
341
342SEC("?struct_ops/test_1")
343__success __log_level(2)
344int BPF_PROG(arg_tag_ctx_struct_ops)
345{
346 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
347}
348
349SEC(".struct_ops")
350struct bpf_dummy_ops dummy_1 = {
351 .test_1 = (void *)arg_tag_ctx_struct_ops,
352};
353
354SEC("?syscall")
355__success __log_level(2)
356int arg_tag_ctx_syscall(void *ctx)
357{
358 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
359}
360
361__weak int subprog_dynptr(struct bpf_dynptr *dptr)
362{
363 long *d, t, buf[1] = {};
364
365 d = bpf_dynptr_data(dptr, 0, sizeof(long));
366 if (!d)
367 return 0;
368
369 t = *d + 1;
370
371 d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long));
372 if (!d)
373 return t;
374
375 t = *d + 2;
376
377 return t;
378}
379
380SEC("?xdp")
381__success __log_level(2)
382int arg_tag_dynptr(struct xdp_md *ctx)
383{
384 struct bpf_dynptr dptr;
385
386 bpf_dynptr_from_xdp(ctx, 0, &dptr);
387
388 return subprog_dynptr(&dptr);
389}
390
391char _license[] SEC("license") = "GPL";
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_helpers.h>
6#include <bpf/bpf_tracing.h>
7#include "bpf_misc.h"
8#include "xdp_metadata.h"
9#include "bpf_kfuncs.h"
10
11int arr[1];
12int unkn_idx;
13const volatile bool call_dead_subprog = false;
14
15__noinline long global_bad(void)
16{
17 return arr[unkn_idx]; /* BOOM */
18}
19
20__noinline long global_good(void)
21{
22 return arr[0];
23}
24
25__noinline long global_calls_bad(void)
26{
27 return global_good() + global_bad() /* does BOOM indirectly */;
28}
29
30__noinline long global_calls_good_only(void)
31{
32 return global_good();
33}
34
35__noinline long global_dead(void)
36{
37 return arr[0] * 2;
38}
39
40SEC("?raw_tp")
41__success __log_level(2)
42/* main prog is validated completely first */
43__msg("('global_calls_good_only') is global and assumed valid.")
44/* eventually global_good() is transitively validated as well */
45__msg("Validating global_good() func")
46__msg("('global_good') is safe for any args that match its prototype")
47int chained_global_func_calls_success(void)
48{
49 int sum = 0;
50
51 if (call_dead_subprog)
52 sum += global_dead();
53 return global_calls_good_only() + sum;
54}
55
56SEC("?raw_tp")
57__failure __log_level(2)
58/* main prog validated successfully first */
59__msg("('global_calls_bad') is global and assumed valid.")
60/* eventually we validate global_bad() and fail */
61__msg("Validating global_bad() func")
62__msg("math between map_value pointer and register") /* BOOM */
63int chained_global_func_calls_bad(void)
64{
65 return global_calls_bad();
66}
67
68/* do out of bounds access forcing verifier to fail verification if this
69 * global func is called
70 */
71__noinline int global_unsupp(const int *mem)
72{
73 if (!mem)
74 return 0;
75 return mem[100]; /* BOOM */
76}
77
78const volatile bool skip_unsupp_global = true;
79
80SEC("?raw_tp")
81__success
82int guarded_unsupp_global_called(void)
83{
84 if (!skip_unsupp_global)
85 return global_unsupp(NULL);
86 return 0;
87}
88
89SEC("?raw_tp")
90__failure __log_level(2)
91__msg("Func#1 ('global_unsupp') is global and assumed valid.")
92__msg("Validating global_unsupp() func#1...")
93__msg("value is outside of the allowed memory range")
94int unguarded_unsupp_global_called(void)
95{
96 int x = 0;
97
98 return global_unsupp(&x);
99}
100
101long stack[128];
102
103__weak int subprog_nullable_ptr_bad(int *p)
104{
105 return (*p) * 2; /* bad, missing null check */
106}
107
108SEC("?raw_tp")
109__failure __log_level(2)
110__msg("invalid mem access 'mem_or_null'")
111int arg_tag_nullable_ptr_fail(void *ctx)
112{
113 int x = 42;
114
115 return subprog_nullable_ptr_bad(&x);
116}
117
118__noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
119{
120 return (*p1) * (*p2); /* good, no need for NULL checks */
121}
122
123int x = 47;
124
125SEC("?raw_tp")
126__success __log_level(2)
127int arg_tag_nonnull_ptr_good(void *ctx)
128{
129 int y = 74;
130
131 return subprog_nonnull_ptr_good(&x, &y);
132}
133
134/* this global subprog can be now called from many types of entry progs, each
135 * with different context type
136 */
137__weak int subprog_ctx_tag(void *ctx __arg_ctx)
138{
139 return bpf_get_stack(ctx, stack, sizeof(stack), 0);
140}
141
142__weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx)
143{
144 return 0;
145}
146
147__weak int raw_tp_u64_array(u64 *ctx __arg_ctx)
148{
149 return 0;
150}
151
152SEC("?raw_tp")
153__success __log_level(2)
154int arg_tag_ctx_raw_tp(void *ctx)
155{
156 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
157}
158
159SEC("?raw_tp.w")
160__success __log_level(2)
161int arg_tag_ctx_raw_tp_writable(void *ctx)
162{
163 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
164}
165
166SEC("?tp_btf/sys_enter")
167__success __log_level(2)
168int arg_tag_ctx_raw_tp_btf(void *ctx)
169{
170 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
171}
172
173struct whatever { };
174
175__weak int tp_whatever(struct whatever *ctx __arg_ctx)
176{
177 return 0;
178}
179
180SEC("?tp")
181__success __log_level(2)
182int arg_tag_ctx_tp(void *ctx)
183{
184 return subprog_ctx_tag(ctx) + tp_whatever(ctx);
185}
186
187__weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx)
188{
189 return 0;
190}
191
192__weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
193{
194 return 0;
195}
196
197SEC("?kprobe")
198__success __log_level(2)
199int arg_tag_ctx_kprobe(void *ctx)
200{
201 return subprog_ctx_tag(ctx) +
202 kprobe_subprog_pt_regs(ctx) +
203 kprobe_subprog_typedef(ctx);
204}
205
206__weak int perf_subprog_regs(
207#if defined(bpf_target_riscv)
208 struct user_regs_struct *ctx __arg_ctx
209#elif defined(bpf_target_s390)
210 /* user_pt_regs typedef is anonymous struct, so only `void *` works */
211 void *ctx __arg_ctx
212#elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc)
213 struct user_pt_regs *ctx __arg_ctx
214#else
215 struct pt_regs *ctx __arg_ctx
216#endif
217)
218{
219 return 0;
220}
221
222__weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
223{
224 return 0;
225}
226
227__weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx)
228{
229 return 0;
230}
231
232SEC("?perf_event")
233__success __log_level(2)
234int arg_tag_ctx_perf(void *ctx)
235{
236 return subprog_ctx_tag(ctx) +
237 perf_subprog_regs(ctx) +
238 perf_subprog_typedef(ctx) +
239 perf_subprog_canonical(ctx);
240}
241
242__weak int iter_subprog_void(void *ctx __arg_ctx)
243{
244 return 0;
245}
246
247__weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx)
248{
249 return 0;
250}
251
252SEC("?iter/task")
253__success __log_level(2)
254int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx)
255{
256 return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1;
257}
258
259__weak int tracing_subprog_void(void *ctx __arg_ctx)
260{
261 return 0;
262}
263
264__weak int tracing_subprog_u64(u64 *ctx __arg_ctx)
265{
266 return 0;
267}
268
269int acc;
270
271SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
272__success __log_level(2)
273int BPF_PROG(arg_tag_ctx_fentry)
274{
275 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
276 return 0;
277}
278
279SEC("?fexit/" SYS_PREFIX "sys_nanosleep")
280__success __log_level(2)
281int BPF_PROG(arg_tag_ctx_fexit)
282{
283 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
284 return 0;
285}
286
287SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep")
288__success __log_level(2)
289int BPF_PROG(arg_tag_ctx_fmod_ret)
290{
291 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
292}
293
294SEC("?lsm/bpf")
295__success __log_level(2)
296int BPF_PROG(arg_tag_ctx_lsm)
297{
298 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
299}
300
301SEC("?struct_ops/test_1")
302__success __log_level(2)
303int BPF_PROG(arg_tag_ctx_struct_ops)
304{
305 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
306}
307
308SEC(".struct_ops")
309struct bpf_dummy_ops dummy_1 = {
310 .test_1 = (void *)arg_tag_ctx_struct_ops,
311};
312
313SEC("?syscall")
314__success __log_level(2)
315int arg_tag_ctx_syscall(void *ctx)
316{
317 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
318}
319
320__weak int subprog_dynptr(struct bpf_dynptr *dptr)
321{
322 long *d, t, buf[1] = {};
323
324 d = bpf_dynptr_data(dptr, 0, sizeof(long));
325 if (!d)
326 return 0;
327
328 t = *d + 1;
329
330 d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long));
331 if (!d)
332 return t;
333
334 t = *d + 2;
335
336 return t;
337}
338
339SEC("?xdp")
340__success __log_level(2)
341int arg_tag_dynptr(struct xdp_md *ctx)
342{
343 struct bpf_dynptr dptr;
344
345 bpf_dynptr_from_xdp(ctx, 0, &dptr);
346
347 return subprog_dynptr(&dptr);
348}
349
350char _license[] SEC("license") = "GPL";