Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
  3
  4#include <vmlinux.h>
  5#include <bpf/bpf_tracing.h>
  6#include <bpf/bpf_helpers.h>
  7
  8#include "../bpf_experimental.h"
  9#include "task_kfunc_common.h"
 10
 11char _license[] SEC("license") = "GPL";
 12
 13int err, pid;
 14
 15/* Prototype for all of the program trace events below:
 16 *
 17 * TRACE_EVENT(task_newtask,
 18 *         TP_PROTO(struct task_struct *p, u64 clone_flags)
 19 */
 20
 21struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
 22
 23struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
 24/* The two-param bpf_task_acquire doesn't exist */
 25struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
 26/* Incorrect type for first param */
 27struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
 28
 29void invalid_kfunc(void) __ksym __weak;
 30void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
 31
 32static bool is_test_kfunc_task(void)
 33{
 34	int cur_pid = bpf_get_current_pid_tgid() >> 32;
 35
 36	return pid == cur_pid;
 37}
 38
 39static int test_acquire_release(struct task_struct *task)
 40{
 41	struct task_struct *acquired = NULL;
 42
 43	if (!bpf_ksym_exists(bpf_task_acquire)) {
 44		err = 3;
 45		return 0;
 46	}
 47	if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
 48		err = 4;
 49		return 0;
 50	}
 51	if (bpf_ksym_exists(invalid_kfunc)) {
 52		/* the verifier's dead code elimination should remove this */
 53		err = 5;
 54		asm volatile ("goto -1"); /* for (;;); */
 55	}
 56
 57	acquired = bpf_task_acquire(task);
 58	if (acquired)
 59		bpf_task_release(acquired);
 60	else
 61		err = 6;
 62
 63	return 0;
 64}
 65
 66SEC("tp_btf/task_newtask")
 67int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
 68{
 69	struct task_struct *acquired = NULL;
 70	int fake_ctx = 42;
 71
 72	if (bpf_ksym_exists(bpf_task_acquire___one)) {
 73		acquired = bpf_task_acquire___one(task);
 74	} else if (bpf_ksym_exists(bpf_task_acquire___two)) {
 75		/* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
 76		 * call will find vmlinux's bpf_task_acquire, but subsequent
 77		 * bpf_core_types_are_compat will fail
 78		 */
 79		acquired = bpf_task_acquire___two(task, &fake_ctx);
 80		err = 3;
 81		return 0;
 82	} else if (bpf_ksym_exists(bpf_task_acquire___three)) {
 83		/* bpf_core_types_are_compat will fail similarly to above case */
 84		acquired = bpf_task_acquire___three(&fake_ctx);
 85		err = 4;
 86		return 0;
 87	}
 88
 89	if (acquired)
 90		bpf_task_release(acquired);
 91	else
 92		err = 5;
 93	return 0;
 94}
 95
 96SEC("tp_btf/task_newtask")
 97int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
 98{
 99	/* Neither symbol should successfully resolve.
100	 * Success or failure of one ___flavor should not affect others
101	 */
102	if (bpf_ksym_exists(bpf_task_acquire___two))
103		err = 1;
104	else if (bpf_ksym_exists(bpf_task_acquire___three))
105		err = 2;
106
107	return 0;
108}
109
110SEC("tp_btf/task_newtask")
111int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
112{
113	if (!is_test_kfunc_task())
114		return 0;
115
116	return test_acquire_release(task);
117}
118
119SEC("tp_btf/task_newtask")
120int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
121{
122	if (!is_test_kfunc_task())
123		return 0;
124
125	return test_acquire_release(bpf_get_current_task_btf());
126}
127
128SEC("tp_btf/task_newtask")
129int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
130{
131	long status;
132
133	if (!is_test_kfunc_task())
134		return 0;
135
136	status = tasks_kfunc_map_insert(task);
137	if (status)
138		err = 1;
139
140	return 0;
141}
142
143SEC("tp_btf/task_newtask")
144int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
145{
146	struct task_struct *kptr, *acquired;
147	struct __tasks_kfunc_map_value *v, *local;
148	int refcnt, refcnt_after_drop;
149	long status;
150
151	if (!is_test_kfunc_task())
152		return 0;
153
154	status = tasks_kfunc_map_insert(task);
155	if (status) {
156		err = 1;
157		return 0;
158	}
159
160	v = tasks_kfunc_map_value_lookup(task);
161	if (!v) {
162		err = 2;
163		return 0;
164	}
165
166	kptr = bpf_kptr_xchg(&v->task, NULL);
167	if (!kptr) {
168		err = 3;
169		return 0;
170	}
171
172	local = bpf_obj_new(typeof(*local));
173	if (!local) {
174		err = 4;
175		bpf_task_release(kptr);
176		return 0;
177	}
178
179	kptr = bpf_kptr_xchg(&local->task, kptr);
180	if (kptr) {
181		err = 5;
182		bpf_obj_drop(local);
183		bpf_task_release(kptr);
184		return 0;
185	}
186
187	kptr = bpf_kptr_xchg(&local->task, NULL);
188	if (!kptr) {
189		err = 6;
190		bpf_obj_drop(local);
191		return 0;
192	}
193
194	/* Stash a copy into local kptr and check if it is released recursively */
195	acquired = bpf_task_acquire(kptr);
196	if (!acquired) {
197		err = 7;
198		bpf_obj_drop(local);
199		bpf_task_release(kptr);
200		return 0;
201	}
202	bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users);
203
204	acquired = bpf_kptr_xchg(&local->task, acquired);
205	if (acquired) {
206		err = 8;
207		bpf_obj_drop(local);
208		bpf_task_release(kptr);
209		bpf_task_release(acquired);
210		return 0;
211	}
212
213	bpf_obj_drop(local);
214
215	bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users);
216	if (refcnt != refcnt_after_drop + 1) {
217		err = 9;
218		bpf_task_release(kptr);
219		return 0;
220	}
221
222	bpf_task_release(kptr);
223
224	return 0;
225}
226
227SEC("tp_btf/task_newtask")
228int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
229{
230	struct task_struct *kptr;
231	struct __tasks_kfunc_map_value *v;
232	long status;
233
234	if (!is_test_kfunc_task())
235		return 0;
236
237	status = tasks_kfunc_map_insert(task);
238	if (status) {
239		err = 1;
240		return 0;
241	}
242
243	v = tasks_kfunc_map_value_lookup(task);
244	if (!v) {
245		err = 2;
246		return 0;
247	}
248
249	bpf_rcu_read_lock();
250	kptr = v->task;
251	if (!kptr) {
252		err = 3;
253	} else {
254		kptr = bpf_task_acquire(kptr);
255		if (!kptr)
256			err = 4;
257		else
258			bpf_task_release(kptr);
259	}
260	bpf_rcu_read_unlock();
261
262	return 0;
263}
264
265SEC("tp_btf/task_newtask")
266int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
267{
268	struct task_struct *current, *acquired;
269
270	if (!is_test_kfunc_task())
271		return 0;
272
273	current = bpf_get_current_task_btf();
274	acquired = bpf_task_acquire(current);
275	if (acquired)
276		bpf_task_release(acquired);
277	else
278		err = 1;
279
280	return 0;
281}
282
283static void lookup_compare_pid(const struct task_struct *p)
284{
285	struct task_struct *acquired;
286
287	acquired = bpf_task_from_pid(p->pid);
288	if (!acquired) {
289		err = 1;
290		return;
291	}
292
293	if (acquired->pid != p->pid)
294		err = 2;
295	bpf_task_release(acquired);
296}
297
298SEC("tp_btf/task_newtask")
299int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
300{
301	if (!is_test_kfunc_task())
302		return 0;
303
304	lookup_compare_pid(task);
305	return 0;
306}
307
308SEC("tp_btf/task_newtask")
309int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
310{
311	if (!is_test_kfunc_task())
312		return 0;
313
314	lookup_compare_pid(bpf_get_current_task_btf());
315	return 0;
316}
317
318static int is_pid_lookup_valid(s32 pid)
319{
320	struct task_struct *acquired;
321
322	acquired = bpf_task_from_pid(pid);
323	if (acquired) {
324		bpf_task_release(acquired);
325		return 1;
326	}
327
328	return 0;
329}
330
331SEC("tp_btf/task_newtask")
332int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
333{
334	if (!is_test_kfunc_task())
335		return 0;
336
337	bpf_strncmp(task->comm, 12, "foo");
338	bpf_strncmp(task->comm, 16, "foo");
339	bpf_strncmp(&task->comm[8], 4, "foo");
340
341	if (is_pid_lookup_valid(-1)) {
342		err = 1;
343		return 0;
344	}
345
346	if (is_pid_lookup_valid(0xcafef00d)) {
347		err = 2;
348		return 0;
349	}
350
351	return 0;
352}
353
354SEC("tp_btf/task_newtask")
355int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
356{
357	struct task_struct *acquired;
358
359	/* task->group_leader is listed as a trusted, non-NULL field of task struct. */
360	acquired = bpf_task_acquire(task->group_leader);
361	if (acquired)
362		bpf_task_release(acquired);
363	else
364		err = 1;
365
366
367	return 0;
368}
369
370SEC("syscall")
371int test_task_from_vpid_current(const void *ctx)
372{
373	struct task_struct *current, *v_task;
374
375	v_task = bpf_task_from_vpid(1);
376	if (!v_task) {
377		err = 1;
378		return 0;
379	}
380
381	current = bpf_get_current_task_btf();
382
383	/* The current process should be the init process (pid 1) in the new pid namespace. */
384	if (current != v_task)
385		err = 2;
386
387	bpf_task_release(v_task);
388	return 0;
389}
390
391SEC("syscall")
392int test_task_from_vpid_invalid(const void *ctx)
393{
394	struct task_struct *v_task;
395
396	v_task = bpf_task_from_vpid(-1);
397	if (v_task) {
398		err = 1;
399		goto err;
400	}
401
402	/* There should be only one process (current process) in the new pid namespace. */
403	v_task = bpf_task_from_vpid(2);
404	if (v_task) {
405		err = 2;
406		goto err;
407	}
408
409	v_task = bpf_task_from_vpid(9999);
410	if (v_task) {
411		err = 3;
412		goto err;
413	}
414
415	return 0;
416err:
417	bpf_task_release(v_task);
418	return 0;
419}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
  3
  4#include <vmlinux.h>
  5#include <bpf/bpf_tracing.h>
  6#include <bpf/bpf_helpers.h>
  7
 
  8#include "task_kfunc_common.h"
  9
 10char _license[] SEC("license") = "GPL";
 11
 12int err, pid;
 13
 14/* Prototype for all of the program trace events below:
 15 *
 16 * TRACE_EVENT(task_newtask,
 17 *         TP_PROTO(struct task_struct *p, u64 clone_flags)
 18 */
 19
 20struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
 21
 22struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
 23/* The two-param bpf_task_acquire doesn't exist */
 24struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
 25/* Incorrect type for first param */
 26struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
 27
 28void invalid_kfunc(void) __ksym __weak;
 29void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
 30
 31static bool is_test_kfunc_task(void)
 32{
 33	int cur_pid = bpf_get_current_pid_tgid() >> 32;
 34
 35	return pid == cur_pid;
 36}
 37
 38static int test_acquire_release(struct task_struct *task)
 39{
 40	struct task_struct *acquired = NULL;
 41
 42	if (!bpf_ksym_exists(bpf_task_acquire)) {
 43		err = 3;
 44		return 0;
 45	}
 46	if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
 47		err = 4;
 48		return 0;
 49	}
 50	if (bpf_ksym_exists(invalid_kfunc)) {
 51		/* the verifier's dead code elimination should remove this */
 52		err = 5;
 53		asm volatile ("goto -1"); /* for (;;); */
 54	}
 55
 56	acquired = bpf_task_acquire(task);
 57	if (acquired)
 58		bpf_task_release(acquired);
 59	else
 60		err = 6;
 61
 62	return 0;
 63}
 64
 65SEC("tp_btf/task_newtask")
 66int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
 67{
 68	struct task_struct *acquired = NULL;
 69	int fake_ctx = 42;
 70
 71	if (bpf_ksym_exists(bpf_task_acquire___one)) {
 72		acquired = bpf_task_acquire___one(task);
 73	} else if (bpf_ksym_exists(bpf_task_acquire___two)) {
 74		/* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
 75		 * call will find vmlinux's bpf_task_acquire, but subsequent
 76		 * bpf_core_types_are_compat will fail
 77		 */
 78		acquired = bpf_task_acquire___two(task, &fake_ctx);
 79		err = 3;
 80		return 0;
 81	} else if (bpf_ksym_exists(bpf_task_acquire___three)) {
 82		/* bpf_core_types_are_compat will fail similarly to above case */
 83		acquired = bpf_task_acquire___three(&fake_ctx);
 84		err = 4;
 85		return 0;
 86	}
 87
 88	if (acquired)
 89		bpf_task_release(acquired);
 90	else
 91		err = 5;
 92	return 0;
 93}
 94
 95SEC("tp_btf/task_newtask")
 96int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
 97{
 98	/* Neither symbol should successfully resolve.
 99	 * Success or failure of one ___flavor should not affect others
100	 */
101	if (bpf_ksym_exists(bpf_task_acquire___two))
102		err = 1;
103	else if (bpf_ksym_exists(bpf_task_acquire___three))
104		err = 2;
105
106	return 0;
107}
108
109SEC("tp_btf/task_newtask")
110int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
111{
112	if (!is_test_kfunc_task())
113		return 0;
114
115	return test_acquire_release(task);
116}
117
118SEC("tp_btf/task_newtask")
119int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
120{
121	if (!is_test_kfunc_task())
122		return 0;
123
124	return test_acquire_release(bpf_get_current_task_btf());
125}
126
127SEC("tp_btf/task_newtask")
128int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
129{
130	long status;
131
132	if (!is_test_kfunc_task())
133		return 0;
134
135	status = tasks_kfunc_map_insert(task);
136	if (status)
137		err = 1;
138
139	return 0;
140}
141
142SEC("tp_btf/task_newtask")
143int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
144{
145	struct task_struct *kptr;
146	struct __tasks_kfunc_map_value *v;
 
147	long status;
148
149	if (!is_test_kfunc_task())
150		return 0;
151
152	status = tasks_kfunc_map_insert(task);
153	if (status) {
154		err = 1;
155		return 0;
156	}
157
158	v = tasks_kfunc_map_value_lookup(task);
159	if (!v) {
160		err = 2;
161		return 0;
162	}
163
164	kptr = bpf_kptr_xchg(&v->task, NULL);
165	if (!kptr) {
166		err = 3;
167		return 0;
168	}
169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170	bpf_task_release(kptr);
171
172	return 0;
173}
174
175SEC("tp_btf/task_newtask")
176int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
177{
178	struct task_struct *kptr;
179	struct __tasks_kfunc_map_value *v;
180	long status;
181
182	if (!is_test_kfunc_task())
183		return 0;
184
185	status = tasks_kfunc_map_insert(task);
186	if (status) {
187		err = 1;
188		return 0;
189	}
190
191	v = tasks_kfunc_map_value_lookup(task);
192	if (!v) {
193		err = 2;
194		return 0;
195	}
196
197	bpf_rcu_read_lock();
198	kptr = v->task;
199	if (!kptr) {
200		err = 3;
201	} else {
202		kptr = bpf_task_acquire(kptr);
203		if (!kptr)
204			err = 4;
205		else
206			bpf_task_release(kptr);
207	}
208	bpf_rcu_read_unlock();
209
210	return 0;
211}
212
213SEC("tp_btf/task_newtask")
214int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
215{
216	struct task_struct *current, *acquired;
217
218	if (!is_test_kfunc_task())
219		return 0;
220
221	current = bpf_get_current_task_btf();
222	acquired = bpf_task_acquire(current);
223	if (acquired)
224		bpf_task_release(acquired);
225	else
226		err = 1;
227
228	return 0;
229}
230
231static void lookup_compare_pid(const struct task_struct *p)
232{
233	struct task_struct *acquired;
234
235	acquired = bpf_task_from_pid(p->pid);
236	if (!acquired) {
237		err = 1;
238		return;
239	}
240
241	if (acquired->pid != p->pid)
242		err = 2;
243	bpf_task_release(acquired);
244}
245
246SEC("tp_btf/task_newtask")
247int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
248{
249	if (!is_test_kfunc_task())
250		return 0;
251
252	lookup_compare_pid(task);
253	return 0;
254}
255
256SEC("tp_btf/task_newtask")
257int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
258{
259	if (!is_test_kfunc_task())
260		return 0;
261
262	lookup_compare_pid(bpf_get_current_task_btf());
263	return 0;
264}
265
266static int is_pid_lookup_valid(s32 pid)
267{
268	struct task_struct *acquired;
269
270	acquired = bpf_task_from_pid(pid);
271	if (acquired) {
272		bpf_task_release(acquired);
273		return 1;
274	}
275
276	return 0;
277}
278
279SEC("tp_btf/task_newtask")
280int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
281{
282	if (!is_test_kfunc_task())
283		return 0;
284
285	bpf_strncmp(task->comm, 12, "foo");
286	bpf_strncmp(task->comm, 16, "foo");
287	bpf_strncmp(&task->comm[8], 4, "foo");
288
289	if (is_pid_lookup_valid(-1)) {
290		err = 1;
291		return 0;
292	}
293
294	if (is_pid_lookup_valid(0xcafef00d)) {
295		err = 2;
296		return 0;
297	}
298
299	return 0;
300}
301
302SEC("tp_btf/task_newtask")
303int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
304{
305	struct task_struct *acquired;
306
307	/* task->group_leader is listed as a trusted, non-NULL field of task struct. */
308	acquired = bpf_task_acquire(task->group_leader);
309	if (acquired)
310		bpf_task_release(acquired);
311	else
312		err = 1;
313
314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315	return 0;
316}