Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_tracing.h>
6#include <bpf/bpf_helpers.h>
7
8#include "../bpf_experimental.h"
9#include "task_kfunc_common.h"
10
11char _license[] SEC("license") = "GPL";
12
13int err, pid;
14
15/* Prototype for all of the program trace events below:
16 *
17 * TRACE_EVENT(task_newtask,
18 * TP_PROTO(struct task_struct *p, u64 clone_flags)
19 */
20
21struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
22
23struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
24/* The two-param bpf_task_acquire doesn't exist */
25struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
26/* Incorrect type for first param */
27struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
28
29void invalid_kfunc(void) __ksym __weak;
30void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
31
32static bool is_test_kfunc_task(void)
33{
34 int cur_pid = bpf_get_current_pid_tgid() >> 32;
35
36 return pid == cur_pid;
37}
38
39static int test_acquire_release(struct task_struct *task)
40{
41 struct task_struct *acquired = NULL;
42
43 if (!bpf_ksym_exists(bpf_task_acquire)) {
44 err = 3;
45 return 0;
46 }
47 if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
48 err = 4;
49 return 0;
50 }
51 if (bpf_ksym_exists(invalid_kfunc)) {
52 /* the verifier's dead code elimination should remove this */
53 err = 5;
54 asm volatile ("goto -1"); /* for (;;); */
55 }
56
57 acquired = bpf_task_acquire(task);
58 if (acquired)
59 bpf_task_release(acquired);
60 else
61 err = 6;
62
63 return 0;
64}
65
66SEC("tp_btf/task_newtask")
67int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
68{
69 struct task_struct *acquired = NULL;
70 int fake_ctx = 42;
71
72 if (bpf_ksym_exists(bpf_task_acquire___one)) {
73 acquired = bpf_task_acquire___one(task);
74 } else if (bpf_ksym_exists(bpf_task_acquire___two)) {
75 /* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
76 * call will find vmlinux's bpf_task_acquire, but subsequent
77 * bpf_core_types_are_compat will fail
78 */
79 acquired = bpf_task_acquire___two(task, &fake_ctx);
80 err = 3;
81 return 0;
82 } else if (bpf_ksym_exists(bpf_task_acquire___three)) {
83 /* bpf_core_types_are_compat will fail similarly to above case */
84 acquired = bpf_task_acquire___three(&fake_ctx);
85 err = 4;
86 return 0;
87 }
88
89 if (acquired)
90 bpf_task_release(acquired);
91 else
92 err = 5;
93 return 0;
94}
95
96SEC("tp_btf/task_newtask")
97int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
98{
99 /* Neither symbol should successfully resolve.
100 * Success or failure of one ___flavor should not affect others
101 */
102 if (bpf_ksym_exists(bpf_task_acquire___two))
103 err = 1;
104 else if (bpf_ksym_exists(bpf_task_acquire___three))
105 err = 2;
106
107 return 0;
108}
109
110SEC("tp_btf/task_newtask")
111int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
112{
113 if (!is_test_kfunc_task())
114 return 0;
115
116 return test_acquire_release(task);
117}
118
119SEC("tp_btf/task_newtask")
120int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
121{
122 if (!is_test_kfunc_task())
123 return 0;
124
125 return test_acquire_release(bpf_get_current_task_btf());
126}
127
128SEC("tp_btf/task_newtask")
129int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
130{
131 long status;
132
133 if (!is_test_kfunc_task())
134 return 0;
135
136 status = tasks_kfunc_map_insert(task);
137 if (status)
138 err = 1;
139
140 return 0;
141}
142
143SEC("tp_btf/task_newtask")
144int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
145{
146 struct task_struct *kptr, *acquired;
147 struct __tasks_kfunc_map_value *v, *local;
148 int refcnt, refcnt_after_drop;
149 long status;
150
151 if (!is_test_kfunc_task())
152 return 0;
153
154 status = tasks_kfunc_map_insert(task);
155 if (status) {
156 err = 1;
157 return 0;
158 }
159
160 v = tasks_kfunc_map_value_lookup(task);
161 if (!v) {
162 err = 2;
163 return 0;
164 }
165
166 kptr = bpf_kptr_xchg(&v->task, NULL);
167 if (!kptr) {
168 err = 3;
169 return 0;
170 }
171
172 local = bpf_obj_new(typeof(*local));
173 if (!local) {
174 err = 4;
175 bpf_task_release(kptr);
176 return 0;
177 }
178
179 kptr = bpf_kptr_xchg(&local->task, kptr);
180 if (kptr) {
181 err = 5;
182 bpf_obj_drop(local);
183 bpf_task_release(kptr);
184 return 0;
185 }
186
187 kptr = bpf_kptr_xchg(&local->task, NULL);
188 if (!kptr) {
189 err = 6;
190 bpf_obj_drop(local);
191 return 0;
192 }
193
194 /* Stash a copy into local kptr and check if it is released recursively */
195 acquired = bpf_task_acquire(kptr);
196 if (!acquired) {
197 err = 7;
198 bpf_obj_drop(local);
199 bpf_task_release(kptr);
200 return 0;
201 }
202 bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users);
203
204 acquired = bpf_kptr_xchg(&local->task, acquired);
205 if (acquired) {
206 err = 8;
207 bpf_obj_drop(local);
208 bpf_task_release(kptr);
209 bpf_task_release(acquired);
210 return 0;
211 }
212
213 bpf_obj_drop(local);
214
215 bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users);
216 if (refcnt != refcnt_after_drop + 1) {
217 err = 9;
218 bpf_task_release(kptr);
219 return 0;
220 }
221
222 bpf_task_release(kptr);
223
224 return 0;
225}
226
227SEC("tp_btf/task_newtask")
228int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
229{
230 struct task_struct *kptr;
231 struct __tasks_kfunc_map_value *v;
232 long status;
233
234 if (!is_test_kfunc_task())
235 return 0;
236
237 status = tasks_kfunc_map_insert(task);
238 if (status) {
239 err = 1;
240 return 0;
241 }
242
243 v = tasks_kfunc_map_value_lookup(task);
244 if (!v) {
245 err = 2;
246 return 0;
247 }
248
249 bpf_rcu_read_lock();
250 kptr = v->task;
251 if (!kptr) {
252 err = 3;
253 } else {
254 kptr = bpf_task_acquire(kptr);
255 if (!kptr)
256 err = 4;
257 else
258 bpf_task_release(kptr);
259 }
260 bpf_rcu_read_unlock();
261
262 return 0;
263}
264
265SEC("tp_btf/task_newtask")
266int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
267{
268 struct task_struct *current, *acquired;
269
270 if (!is_test_kfunc_task())
271 return 0;
272
273 current = bpf_get_current_task_btf();
274 acquired = bpf_task_acquire(current);
275 if (acquired)
276 bpf_task_release(acquired);
277 else
278 err = 1;
279
280 return 0;
281}
282
283static void lookup_compare_pid(const struct task_struct *p)
284{
285 struct task_struct *acquired;
286
287 acquired = bpf_task_from_pid(p->pid);
288 if (!acquired) {
289 err = 1;
290 return;
291 }
292
293 if (acquired->pid != p->pid)
294 err = 2;
295 bpf_task_release(acquired);
296}
297
298SEC("tp_btf/task_newtask")
299int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
300{
301 if (!is_test_kfunc_task())
302 return 0;
303
304 lookup_compare_pid(task);
305 return 0;
306}
307
308SEC("tp_btf/task_newtask")
309int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
310{
311 if (!is_test_kfunc_task())
312 return 0;
313
314 lookup_compare_pid(bpf_get_current_task_btf());
315 return 0;
316}
317
318static int is_pid_lookup_valid(s32 pid)
319{
320 struct task_struct *acquired;
321
322 acquired = bpf_task_from_pid(pid);
323 if (acquired) {
324 bpf_task_release(acquired);
325 return 1;
326 }
327
328 return 0;
329}
330
331SEC("tp_btf/task_newtask")
332int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
333{
334 if (!is_test_kfunc_task())
335 return 0;
336
337 bpf_strncmp(task->comm, 12, "foo");
338 bpf_strncmp(task->comm, 16, "foo");
339 bpf_strncmp(&task->comm[8], 4, "foo");
340
341 if (is_pid_lookup_valid(-1)) {
342 err = 1;
343 return 0;
344 }
345
346 if (is_pid_lookup_valid(0xcafef00d)) {
347 err = 2;
348 return 0;
349 }
350
351 return 0;
352}
353
354SEC("tp_btf/task_newtask")
355int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
356{
357 struct task_struct *acquired;
358
359 /* task->group_leader is listed as a trusted, non-NULL field of task struct. */
360 acquired = bpf_task_acquire(task->group_leader);
361 if (acquired)
362 bpf_task_release(acquired);
363 else
364 err = 1;
365
366
367 return 0;
368}
369
370SEC("syscall")
371int test_task_from_vpid_current(const void *ctx)
372{
373 struct task_struct *current, *v_task;
374
375 v_task = bpf_task_from_vpid(1);
376 if (!v_task) {
377 err = 1;
378 return 0;
379 }
380
381 current = bpf_get_current_task_btf();
382
383 /* The current process should be the init process (pid 1) in the new pid namespace. */
384 if (current != v_task)
385 err = 2;
386
387 bpf_task_release(v_task);
388 return 0;
389}
390
391SEC("syscall")
392int test_task_from_vpid_invalid(const void *ctx)
393{
394 struct task_struct *v_task;
395
396 v_task = bpf_task_from_vpid(-1);
397 if (v_task) {
398 err = 1;
399 goto err;
400 }
401
402 /* There should be only one process (current process) in the new pid namespace. */
403 v_task = bpf_task_from_vpid(2);
404 if (v_task) {
405 err = 2;
406 goto err;
407 }
408
409 v_task = bpf_task_from_vpid(9999);
410 if (v_task) {
411 err = 3;
412 goto err;
413 }
414
415 return 0;
416err:
417 bpf_task_release(v_task);
418 return 0;
419}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_tracing.h>
6#include <bpf/bpf_helpers.h>
7
8#include "task_kfunc_common.h"
9
10char _license[] SEC("license") = "GPL";
11
12int err, pid;
13
14/* Prototype for all of the program trace events below:
15 *
16 * TRACE_EVENT(task_newtask,
17 * TP_PROTO(struct task_struct *p, u64 clone_flags)
18 */
19
20static bool is_test_kfunc_task(void)
21{
22 int cur_pid = bpf_get_current_pid_tgid() >> 32;
23
24 return pid == cur_pid;
25}
26
27static int test_acquire_release(struct task_struct *task)
28{
29 struct task_struct *acquired;
30
31 acquired = bpf_task_acquire(task);
32 bpf_task_release(acquired);
33
34 return 0;
35}
36
37SEC("tp_btf/task_newtask")
38int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
39{
40 if (!is_test_kfunc_task())
41 return 0;
42
43 return test_acquire_release(task);
44}
45
46SEC("tp_btf/task_newtask")
47int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
48{
49 if (!is_test_kfunc_task())
50 return 0;
51
52 return test_acquire_release(bpf_get_current_task_btf());
53}
54
55SEC("tp_btf/task_newtask")
56int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
57{
58 long status;
59
60 if (!is_test_kfunc_task())
61 return 0;
62
63 status = tasks_kfunc_map_insert(task);
64 if (status)
65 err = 1;
66
67 return 0;
68}
69
70SEC("tp_btf/task_newtask")
71int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
72{
73 struct task_struct *kptr;
74 struct __tasks_kfunc_map_value *v;
75 long status;
76
77 if (!is_test_kfunc_task())
78 return 0;
79
80 status = tasks_kfunc_map_insert(task);
81 if (status) {
82 err = 1;
83 return 0;
84 }
85
86 v = tasks_kfunc_map_value_lookup(task);
87 if (!v) {
88 err = 2;
89 return 0;
90 }
91
92 kptr = bpf_kptr_xchg(&v->task, NULL);
93 if (!kptr) {
94 err = 3;
95 return 0;
96 }
97
98 bpf_task_release(kptr);
99
100 return 0;
101}
102
103SEC("tp_btf/task_newtask")
104int BPF_PROG(test_task_get_release, struct task_struct *task, u64 clone_flags)
105{
106 struct task_struct *kptr;
107 struct __tasks_kfunc_map_value *v;
108 long status;
109
110 if (!is_test_kfunc_task())
111 return 0;
112
113 status = tasks_kfunc_map_insert(task);
114 if (status) {
115 err = 1;
116 return 0;
117 }
118
119 v = tasks_kfunc_map_value_lookup(task);
120 if (!v) {
121 err = 2;
122 return 0;
123 }
124
125 kptr = bpf_task_kptr_get(&v->task);
126 if (kptr) {
127 /* Until we resolve the issues with using task->rcu_users, we
128 * expect bpf_task_kptr_get() to return a NULL task. See the
129 * comment at the definition of bpf_task_acquire_not_zero() for
130 * more details.
131 */
132 bpf_task_release(kptr);
133 err = 3;
134 return 0;
135 }
136
137
138 return 0;
139}
140
141SEC("tp_btf/task_newtask")
142int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
143{
144 struct task_struct *current, *acquired;
145
146 if (!is_test_kfunc_task())
147 return 0;
148
149 current = bpf_get_current_task_btf();
150 acquired = bpf_task_acquire(current);
151 bpf_task_release(acquired);
152
153 return 0;
154}
155
156static void lookup_compare_pid(const struct task_struct *p)
157{
158 struct task_struct *acquired;
159
160 acquired = bpf_task_from_pid(p->pid);
161 if (!acquired) {
162 err = 1;
163 return;
164 }
165
166 if (acquired->pid != p->pid)
167 err = 2;
168 bpf_task_release(acquired);
169}
170
171SEC("tp_btf/task_newtask")
172int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
173{
174 struct task_struct *acquired;
175
176 if (!is_test_kfunc_task())
177 return 0;
178
179 lookup_compare_pid(task);
180 return 0;
181}
182
183SEC("tp_btf/task_newtask")
184int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
185{
186 struct task_struct *current, *acquired;
187
188 if (!is_test_kfunc_task())
189 return 0;
190
191 lookup_compare_pid(bpf_get_current_task_btf());
192 return 0;
193}
194
195static int is_pid_lookup_valid(s32 pid)
196{
197 struct task_struct *acquired;
198
199 acquired = bpf_task_from_pid(pid);
200 if (acquired) {
201 bpf_task_release(acquired);
202 return 1;
203 }
204
205 return 0;
206}
207
208SEC("tp_btf/task_newtask")
209int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
210{
211 struct task_struct *acquired;
212
213 if (!is_test_kfunc_task())
214 return 0;
215
216 if (is_pid_lookup_valid(-1)) {
217 err = 1;
218 return 0;
219 }
220
221 if (is_pid_lookup_valid(0xcafef00d)) {
222 err = 2;
223 return 0;
224 }
225
226 return 0;
227}