Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
  3
  4#include <vmlinux.h>
  5#include <bpf/bpf_helpers.h>
  6#include <bpf/bpf_tracing.h>
  7#include "bpf_misc.h"
  8#include "xdp_metadata.h"
  9#include "bpf_kfuncs.h"
 10#include "err.h"
 11
 12/* The compiler may be able to detect the access to uninitialized
 13   memory in the routines performing out of bound memory accesses and
 14   emit warnings about it.  This is the case of GCC. */
 15#if !defined(__clang__)
 16#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
 17#endif
 18
 19int arr[1];
 20int unkn_idx;
 21const volatile bool call_dead_subprog = false;
 22
 23__noinline long global_bad(void)
 24{
 25	return arr[unkn_idx]; /* BOOM */
 26}
 27
 28__noinline long global_good(void)
 29{
 30	return arr[0];
 31}
 32
 33__noinline long global_calls_bad(void)
 34{
 35	return global_good() + global_bad() /* does BOOM indirectly */;
 36}
 37
 38__noinline long global_calls_good_only(void)
 39{
 40	return global_good();
 41}
 42
 43__noinline long global_dead(void)
 44{
 45	return arr[0] * 2;
 46}
 47
 48SEC("?raw_tp")
 49__success __log_level(2)
 50/* main prog is validated completely first */
 51__msg("('global_calls_good_only') is global and assumed valid.")
 52/* eventually global_good() is transitively validated as well */
 53__msg("Validating global_good() func")
 54__msg("('global_good') is safe for any args that match its prototype")
 55int chained_global_func_calls_success(void)
 56{
 57	int sum = 0;
 58
 59	if (call_dead_subprog)
 60		sum += global_dead();
 61	return global_calls_good_only() + sum;
 62}
 63
 64SEC("?raw_tp")
 65__failure __log_level(2)
 66/* main prog validated successfully first */
 67__msg("('global_calls_bad') is global and assumed valid.")
 68/* eventually we validate global_bad() and fail */
 69__msg("Validating global_bad() func")
 70__msg("math between map_value pointer and register") /* BOOM */
 71int chained_global_func_calls_bad(void)
 72{
 73	return global_calls_bad();
 74}
 75
 76/* do out of bounds access forcing verifier to fail verification if this
 77 * global func is called
 78 */
 79__noinline int global_unsupp(const int *mem)
 80{
 81	if (!mem)
 82		return 0;
 83	return mem[100]; /* BOOM */
 84}
 85
 86const volatile bool skip_unsupp_global = true;
 87
 88SEC("?raw_tp")
 89__success
 90int guarded_unsupp_global_called(void)
 91{
 92	if (!skip_unsupp_global)
 93		return global_unsupp(NULL);
 94	return 0;
 95}
 96
 97SEC("?raw_tp")
 98__failure __log_level(2)
 99__msg("Func#1 ('global_unsupp') is global and assumed valid.")
100__msg("Validating global_unsupp() func#1...")
101__msg("value is outside of the allowed memory range")
102int unguarded_unsupp_global_called(void)
103{
104	int x = 0;
105
106	return global_unsupp(&x);
107}
108
109long stack[128];
110
111__weak int subprog_nullable_ptr_bad(int *p)
112{
113	return (*p) * 2; /* bad, missing null check */
114}
115
116SEC("?raw_tp")
117__failure __log_level(2)
118__msg("invalid mem access 'mem_or_null'")
119int arg_tag_nullable_ptr_fail(void *ctx)
120{
121	int x = 42;
122
123	return subprog_nullable_ptr_bad(&x);
124}
125
126typedef struct {
127	int x;
128} user_struct_t;
129
130__noinline __weak int subprog_user_anon_mem(user_struct_t *t)
131{
132	return t ? t->x : 0;
133}
134
135SEC("?tracepoint")
136__failure __log_level(2)
137__msg("invalid bpf_context access")
138__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
139int anon_user_mem_invalid(void *ctx)
140{
141	/* can't pass PTR_TO_CTX as user memory */
142	return subprog_user_anon_mem(ctx);
143}
144
145SEC("?tracepoint")
146__success __log_level(2)
147__msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype")
148int anon_user_mem_valid(void *ctx)
149{
150	user_struct_t t = { .x = 42 };
151
152	return subprog_user_anon_mem(&t);
153}
154
155__noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
156{
157	return (*p1) * (*p2); /* good, no need for NULL checks */
158}
159
160int x = 47;
161
162SEC("?raw_tp")
163__success __log_level(2)
164int arg_tag_nonnull_ptr_good(void *ctx)
165{
166	int y = 74;
167
168	return subprog_nonnull_ptr_good(&x, &y);
169}
170
171/* this global subprog can be now called from many types of entry progs, each
172 * with different context type
173 */
174__weak int subprog_ctx_tag(void *ctx __arg_ctx)
175{
176	return bpf_get_stack(ctx, stack, sizeof(stack), 0);
177}
178
179__weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx)
180{
181	return 0;
182}
183
184__weak int raw_tp_u64_array(u64 *ctx __arg_ctx)
185{
186	return 0;
187}
188
189SEC("?raw_tp")
190__success __log_level(2)
191int arg_tag_ctx_raw_tp(void *ctx)
192{
193	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
194}
195
196SEC("?raw_tp.w")
197__success __log_level(2)
198int arg_tag_ctx_raw_tp_writable(void *ctx)
199{
200	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
201}
202
203SEC("?tp_btf/sys_enter")
204__success __log_level(2)
205int arg_tag_ctx_raw_tp_btf(void *ctx)
206{
207	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
208}
209
210struct whatever { };
211
212__weak int tp_whatever(struct whatever *ctx __arg_ctx)
213{
214	return 0;
215}
216
217SEC("?tp")
218__success __log_level(2)
219int arg_tag_ctx_tp(void *ctx)
220{
221	return subprog_ctx_tag(ctx) + tp_whatever(ctx);
222}
223
224__weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx)
225{
226	return 0;
227}
228
229__weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
230{
231	return 0;
232}
233
234SEC("?kprobe")
235__success __log_level(2)
236int arg_tag_ctx_kprobe(void *ctx)
237{
238	return subprog_ctx_tag(ctx) +
239	       kprobe_subprog_pt_regs(ctx) +
240	       kprobe_subprog_typedef(ctx);
241}
242
243__weak int perf_subprog_regs(
244#if defined(bpf_target_riscv)
245	struct user_regs_struct *ctx __arg_ctx
246#elif defined(bpf_target_s390)
247	/* user_pt_regs typedef is anonymous struct, so only `void *` works */
248	void *ctx __arg_ctx
249#elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc)
250	struct user_pt_regs *ctx __arg_ctx
251#else
252	struct pt_regs *ctx __arg_ctx
253#endif
254)
255{
256	return 0;
257}
258
259__weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
260{
261	return 0;
262}
263
264__weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx)
265{
266	return 0;
267}
268
269SEC("?perf_event")
270__success __log_level(2)
271int arg_tag_ctx_perf(void *ctx)
272{
273	return subprog_ctx_tag(ctx) +
274	       perf_subprog_regs(ctx) +
275	       perf_subprog_typedef(ctx) +
276	       perf_subprog_canonical(ctx);
277}
278
279__weak int iter_subprog_void(void *ctx __arg_ctx)
280{
281	return 0;
282}
283
284__weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx)
285{
286	return 0;
287}
288
289SEC("?iter/task")
290__success __log_level(2)
291int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx)
292{
293	return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1;
294}
295
296__weak int tracing_subprog_void(void *ctx __arg_ctx)
297{
298	return 0;
299}
300
301__weak int tracing_subprog_u64(u64 *ctx __arg_ctx)
302{
303	return 0;
304}
305
306int acc;
307
308SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
309__success __log_level(2)
310int BPF_PROG(arg_tag_ctx_fentry)
311{
312	acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
313	return 0;
314}
315
316SEC("?fexit/" SYS_PREFIX "sys_nanosleep")
317__success __log_level(2)
318int BPF_PROG(arg_tag_ctx_fexit)
319{
320	acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
321	return 0;
322}
323
324SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep")
325__success __log_level(2)
326int BPF_PROG(arg_tag_ctx_fmod_ret)
327{
328	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
329}
330
331SEC("?lsm/bpf")
332__success __log_level(2)
333int BPF_PROG(arg_tag_ctx_lsm)
334{
335	int ret;
336
337	ret = tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
338	set_if_not_errno_or_zero(ret, -1);
339	return ret;
340}
341
342SEC("?struct_ops/test_1")
343__success __log_level(2)
344int BPF_PROG(arg_tag_ctx_struct_ops)
345{
346	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
347}
348
349SEC(".struct_ops")
350struct bpf_dummy_ops dummy_1 = {
351	.test_1 = (void *)arg_tag_ctx_struct_ops,
352};
353
354SEC("?syscall")
355__success __log_level(2)
356int arg_tag_ctx_syscall(void *ctx)
357{
358	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
359}
360
361__weak int subprog_dynptr(struct bpf_dynptr *dptr)
362{
363	long *d, t, buf[1] = {};
364
365	d = bpf_dynptr_data(dptr, 0, sizeof(long));
366	if (!d)
367		return 0;
368
369	t = *d + 1;
370
371	d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long));
372	if (!d)
373		return t;
374
375	t = *d + 2;
376
377	return t;
378}
379
380SEC("?xdp")
381__success __log_level(2)
382int arg_tag_dynptr(struct xdp_md *ctx)
383{
384	struct bpf_dynptr dptr;
385
386	bpf_dynptr_from_xdp(ctx, 0, &dptr);
387
388	return subprog_dynptr(&dptr);
389}
390
391char _license[] SEC("license") = "GPL";
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
  3
  4#include <vmlinux.h>
  5#include <bpf/bpf_helpers.h>
  6#include <bpf/bpf_tracing.h>
  7#include "bpf_misc.h"
  8#include "xdp_metadata.h"
  9#include "bpf_kfuncs.h"
 
 
 
 
 
 
 
 
 10
 11int arr[1];
 12int unkn_idx;
 13const volatile bool call_dead_subprog = false;
 14
 15__noinline long global_bad(void)
 16{
 17	return arr[unkn_idx]; /* BOOM */
 18}
 19
 20__noinline long global_good(void)
 21{
 22	return arr[0];
 23}
 24
 25__noinline long global_calls_bad(void)
 26{
 27	return global_good() + global_bad() /* does BOOM indirectly */;
 28}
 29
 30__noinline long global_calls_good_only(void)
 31{
 32	return global_good();
 33}
 34
 35__noinline long global_dead(void)
 36{
 37	return arr[0] * 2;
 38}
 39
 40SEC("?raw_tp")
 41__success __log_level(2)
 42/* main prog is validated completely first */
 43__msg("('global_calls_good_only') is global and assumed valid.")
 44/* eventually global_good() is transitively validated as well */
 45__msg("Validating global_good() func")
 46__msg("('global_good') is safe for any args that match its prototype")
 47int chained_global_func_calls_success(void)
 48{
 49	int sum = 0;
 50
 51	if (call_dead_subprog)
 52		sum += global_dead();
 53	return global_calls_good_only() + sum;
 54}
 55
 56SEC("?raw_tp")
 57__failure __log_level(2)
 58/* main prog validated successfully first */
 59__msg("('global_calls_bad') is global and assumed valid.")
 60/* eventually we validate global_bad() and fail */
 61__msg("Validating global_bad() func")
 62__msg("math between map_value pointer and register") /* BOOM */
 63int chained_global_func_calls_bad(void)
 64{
 65	return global_calls_bad();
 66}
 67
 68/* do out of bounds access forcing verifier to fail verification if this
 69 * global func is called
 70 */
 71__noinline int global_unsupp(const int *mem)
 72{
 73	if (!mem)
 74		return 0;
 75	return mem[100]; /* BOOM */
 76}
 77
 78const volatile bool skip_unsupp_global = true;
 79
 80SEC("?raw_tp")
 81__success
 82int guarded_unsupp_global_called(void)
 83{
 84	if (!skip_unsupp_global)
 85		return global_unsupp(NULL);
 86	return 0;
 87}
 88
 89SEC("?raw_tp")
 90__failure __log_level(2)
 91__msg("Func#1 ('global_unsupp') is global and assumed valid.")
 92__msg("Validating global_unsupp() func#1...")
 93__msg("value is outside of the allowed memory range")
 94int unguarded_unsupp_global_called(void)
 95{
 96	int x = 0;
 97
 98	return global_unsupp(&x);
 99}
100
101long stack[128];
102
103__weak int subprog_nullable_ptr_bad(int *p)
104{
105	return (*p) * 2; /* bad, missing null check */
106}
107
108SEC("?raw_tp")
109__failure __log_level(2)
110__msg("invalid mem access 'mem_or_null'")
111int arg_tag_nullable_ptr_fail(void *ctx)
112{
113	int x = 42;
114
115	return subprog_nullable_ptr_bad(&x);
116}
117
118typedef struct {
119	int x;
120} user_struct_t;
121
122__noinline __weak int subprog_user_anon_mem(user_struct_t *t)
123{
124	return t ? t->x : 0;
125}
126
127SEC("?tracepoint")
128__failure __log_level(2)
129__msg("invalid bpf_context access")
130__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
131int anon_user_mem_invalid(void *ctx)
132{
133	/* can't pass PTR_TO_CTX as user memory */
134	return subprog_user_anon_mem(ctx);
135}
136
137SEC("?tracepoint")
138__success __log_level(2)
139__msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype")
140int anon_user_mem_valid(void *ctx)
141{
142	user_struct_t t = { .x = 42 };
143
144	return subprog_user_anon_mem(&t);
145}
146
147__noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
148{
149	return (*p1) * (*p2); /* good, no need for NULL checks */
150}
151
152int x = 47;
153
154SEC("?raw_tp")
155__success __log_level(2)
156int arg_tag_nonnull_ptr_good(void *ctx)
157{
158	int y = 74;
159
160	return subprog_nonnull_ptr_good(&x, &y);
161}
162
163/* this global subprog can be now called from many types of entry progs, each
164 * with different context type
165 */
166__weak int subprog_ctx_tag(void *ctx __arg_ctx)
167{
168	return bpf_get_stack(ctx, stack, sizeof(stack), 0);
169}
170
171__weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx)
172{
173	return 0;
174}
175
176__weak int raw_tp_u64_array(u64 *ctx __arg_ctx)
177{
178	return 0;
179}
180
181SEC("?raw_tp")
182__success __log_level(2)
183int arg_tag_ctx_raw_tp(void *ctx)
184{
185	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
186}
187
188SEC("?raw_tp.w")
189__success __log_level(2)
190int arg_tag_ctx_raw_tp_writable(void *ctx)
191{
192	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
193}
194
195SEC("?tp_btf/sys_enter")
196__success __log_level(2)
197int arg_tag_ctx_raw_tp_btf(void *ctx)
198{
199	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
200}
201
202struct whatever { };
203
204__weak int tp_whatever(struct whatever *ctx __arg_ctx)
205{
206	return 0;
207}
208
209SEC("?tp")
210__success __log_level(2)
211int arg_tag_ctx_tp(void *ctx)
212{
213	return subprog_ctx_tag(ctx) + tp_whatever(ctx);
214}
215
216__weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx)
217{
218	return 0;
219}
220
221__weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
222{
223	return 0;
224}
225
226SEC("?kprobe")
227__success __log_level(2)
228int arg_tag_ctx_kprobe(void *ctx)
229{
230	return subprog_ctx_tag(ctx) +
231	       kprobe_subprog_pt_regs(ctx) +
232	       kprobe_subprog_typedef(ctx);
233}
234
235__weak int perf_subprog_regs(
236#if defined(bpf_target_riscv)
237	struct user_regs_struct *ctx __arg_ctx
238#elif defined(bpf_target_s390)
239	/* user_pt_regs typedef is anonymous struct, so only `void *` works */
240	void *ctx __arg_ctx
241#elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc)
242	struct user_pt_regs *ctx __arg_ctx
243#else
244	struct pt_regs *ctx __arg_ctx
245#endif
246)
247{
248	return 0;
249}
250
251__weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
252{
253	return 0;
254}
255
256__weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx)
257{
258	return 0;
259}
260
261SEC("?perf_event")
262__success __log_level(2)
263int arg_tag_ctx_perf(void *ctx)
264{
265	return subprog_ctx_tag(ctx) +
266	       perf_subprog_regs(ctx) +
267	       perf_subprog_typedef(ctx) +
268	       perf_subprog_canonical(ctx);
269}
270
271__weak int iter_subprog_void(void *ctx __arg_ctx)
272{
273	return 0;
274}
275
276__weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx)
277{
278	return 0;
279}
280
281SEC("?iter/task")
282__success __log_level(2)
283int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx)
284{
285	return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1;
286}
287
288__weak int tracing_subprog_void(void *ctx __arg_ctx)
289{
290	return 0;
291}
292
293__weak int tracing_subprog_u64(u64 *ctx __arg_ctx)
294{
295	return 0;
296}
297
298int acc;
299
300SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
301__success __log_level(2)
302int BPF_PROG(arg_tag_ctx_fentry)
303{
304	acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
305	return 0;
306}
307
308SEC("?fexit/" SYS_PREFIX "sys_nanosleep")
309__success __log_level(2)
310int BPF_PROG(arg_tag_ctx_fexit)
311{
312	acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
313	return 0;
314}
315
316SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep")
317__success __log_level(2)
318int BPF_PROG(arg_tag_ctx_fmod_ret)
319{
320	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
321}
322
323SEC("?lsm/bpf")
324__success __log_level(2)
325int BPF_PROG(arg_tag_ctx_lsm)
326{
327	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
 
 
 
 
328}
329
330SEC("?struct_ops/test_1")
331__success __log_level(2)
332int BPF_PROG(arg_tag_ctx_struct_ops)
333{
334	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
335}
336
337SEC(".struct_ops")
338struct bpf_dummy_ops dummy_1 = {
339	.test_1 = (void *)arg_tag_ctx_struct_ops,
340};
341
342SEC("?syscall")
343__success __log_level(2)
344int arg_tag_ctx_syscall(void *ctx)
345{
346	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
347}
348
349__weak int subprog_dynptr(struct bpf_dynptr *dptr)
350{
351	long *d, t, buf[1] = {};
352
353	d = bpf_dynptr_data(dptr, 0, sizeof(long));
354	if (!d)
355		return 0;
356
357	t = *d + 1;
358
359	d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long));
360	if (!d)
361		return t;
362
363	t = *d + 2;
364
365	return t;
366}
367
368SEC("?xdp")
369__success __log_level(2)
370int arg_tag_dynptr(struct xdp_md *ctx)
371{
372	struct bpf_dynptr dptr;
373
374	bpf_dynptr_from_xdp(ctx, 0, &dptr);
375
376	return subprog_dynptr(&dptr);
377}
378
379char _license[] SEC("license") = "GPL";