Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_tracing.h>
6#include <bpf/bpf_helpers.h>
7
8#include "task_kfunc_common.h"
9
10char _license[] SEC("license") = "GPL";
11
12int err, pid;
13
14/* Prototype for all of the program trace events below:
15 *
16 * TRACE_EVENT(task_newtask,
17 * TP_PROTO(struct task_struct *p, u64 clone_flags)
18 */
19
20struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
21
22struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
23/* The two-param bpf_task_acquire doesn't exist */
24struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
25/* Incorrect type for first param */
26struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
27
28void invalid_kfunc(void) __ksym __weak;
29void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
30
31static bool is_test_kfunc_task(void)
32{
33 int cur_pid = bpf_get_current_pid_tgid() >> 32;
34
35 return pid == cur_pid;
36}
37
38static int test_acquire_release(struct task_struct *task)
39{
40 struct task_struct *acquired = NULL;
41
42 if (!bpf_ksym_exists(bpf_task_acquire)) {
43 err = 3;
44 return 0;
45 }
46 if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
47 err = 4;
48 return 0;
49 }
50 if (bpf_ksym_exists(invalid_kfunc)) {
51 /* the verifier's dead code elimination should remove this */
52 err = 5;
53 asm volatile ("goto -1"); /* for (;;); */
54 }
55
56 acquired = bpf_task_acquire(task);
57 if (acquired)
58 bpf_task_release(acquired);
59 else
60 err = 6;
61
62 return 0;
63}
64
65SEC("tp_btf/task_newtask")
66int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
67{
68 struct task_struct *acquired = NULL;
69 int fake_ctx = 42;
70
71 if (bpf_ksym_exists(bpf_task_acquire___one)) {
72 acquired = bpf_task_acquire___one(task);
73 } else if (bpf_ksym_exists(bpf_task_acquire___two)) {
74 /* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
75 * call will find vmlinux's bpf_task_acquire, but subsequent
76 * bpf_core_types_are_compat will fail
77 */
78 acquired = bpf_task_acquire___two(task, &fake_ctx);
79 err = 3;
80 return 0;
81 } else if (bpf_ksym_exists(bpf_task_acquire___three)) {
82 /* bpf_core_types_are_compat will fail similarly to above case */
83 acquired = bpf_task_acquire___three(&fake_ctx);
84 err = 4;
85 return 0;
86 }
87
88 if (acquired)
89 bpf_task_release(acquired);
90 else
91 err = 5;
92 return 0;
93}
94
95SEC("tp_btf/task_newtask")
96int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
97{
98 /* Neither symbol should successfully resolve.
99 * Success or failure of one ___flavor should not affect others
100 */
101 if (bpf_ksym_exists(bpf_task_acquire___two))
102 err = 1;
103 else if (bpf_ksym_exists(bpf_task_acquire___three))
104 err = 2;
105
106 return 0;
107}
108
109SEC("tp_btf/task_newtask")
110int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
111{
112 if (!is_test_kfunc_task())
113 return 0;
114
115 return test_acquire_release(task);
116}
117
118SEC("tp_btf/task_newtask")
119int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
120{
121 if (!is_test_kfunc_task())
122 return 0;
123
124 return test_acquire_release(bpf_get_current_task_btf());
125}
126
127SEC("tp_btf/task_newtask")
128int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
129{
130 long status;
131
132 if (!is_test_kfunc_task())
133 return 0;
134
135 status = tasks_kfunc_map_insert(task);
136 if (status)
137 err = 1;
138
139 return 0;
140}
141
142SEC("tp_btf/task_newtask")
143int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
144{
145 struct task_struct *kptr;
146 struct __tasks_kfunc_map_value *v;
147 long status;
148
149 if (!is_test_kfunc_task())
150 return 0;
151
152 status = tasks_kfunc_map_insert(task);
153 if (status) {
154 err = 1;
155 return 0;
156 }
157
158 v = tasks_kfunc_map_value_lookup(task);
159 if (!v) {
160 err = 2;
161 return 0;
162 }
163
164 kptr = bpf_kptr_xchg(&v->task, NULL);
165 if (!kptr) {
166 err = 3;
167 return 0;
168 }
169
170 bpf_task_release(kptr);
171
172 return 0;
173}
174
175SEC("tp_btf/task_newtask")
176int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
177{
178 struct task_struct *kptr;
179 struct __tasks_kfunc_map_value *v;
180 long status;
181
182 if (!is_test_kfunc_task())
183 return 0;
184
185 status = tasks_kfunc_map_insert(task);
186 if (status) {
187 err = 1;
188 return 0;
189 }
190
191 v = tasks_kfunc_map_value_lookup(task);
192 if (!v) {
193 err = 2;
194 return 0;
195 }
196
197 bpf_rcu_read_lock();
198 kptr = v->task;
199 if (!kptr) {
200 err = 3;
201 } else {
202 kptr = bpf_task_acquire(kptr);
203 if (!kptr)
204 err = 4;
205 else
206 bpf_task_release(kptr);
207 }
208 bpf_rcu_read_unlock();
209
210 return 0;
211}
212
213SEC("tp_btf/task_newtask")
214int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
215{
216 struct task_struct *current, *acquired;
217
218 if (!is_test_kfunc_task())
219 return 0;
220
221 current = bpf_get_current_task_btf();
222 acquired = bpf_task_acquire(current);
223 if (acquired)
224 bpf_task_release(acquired);
225 else
226 err = 1;
227
228 return 0;
229}
230
231static void lookup_compare_pid(const struct task_struct *p)
232{
233 struct task_struct *acquired;
234
235 acquired = bpf_task_from_pid(p->pid);
236 if (!acquired) {
237 err = 1;
238 return;
239 }
240
241 if (acquired->pid != p->pid)
242 err = 2;
243 bpf_task_release(acquired);
244}
245
246SEC("tp_btf/task_newtask")
247int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
248{
249 if (!is_test_kfunc_task())
250 return 0;
251
252 lookup_compare_pid(task);
253 return 0;
254}
255
256SEC("tp_btf/task_newtask")
257int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
258{
259 if (!is_test_kfunc_task())
260 return 0;
261
262 lookup_compare_pid(bpf_get_current_task_btf());
263 return 0;
264}
265
266static int is_pid_lookup_valid(s32 pid)
267{
268 struct task_struct *acquired;
269
270 acquired = bpf_task_from_pid(pid);
271 if (acquired) {
272 bpf_task_release(acquired);
273 return 1;
274 }
275
276 return 0;
277}
278
279SEC("tp_btf/task_newtask")
280int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
281{
282 if (!is_test_kfunc_task())
283 return 0;
284
285 bpf_strncmp(task->comm, 12, "foo");
286 bpf_strncmp(task->comm, 16, "foo");
287 bpf_strncmp(&task->comm[8], 4, "foo");
288
289 if (is_pid_lookup_valid(-1)) {
290 err = 1;
291 return 0;
292 }
293
294 if (is_pid_lookup_valid(0xcafef00d)) {
295 err = 2;
296 return 0;
297 }
298
299 return 0;
300}
301
302SEC("tp_btf/task_newtask")
303int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
304{
305 struct task_struct *acquired;
306
307 /* task->group_leader is listed as a trusted, non-NULL field of task struct. */
308 acquired = bpf_task_acquire(task->group_leader);
309 if (acquired)
310 bpf_task_release(acquired);
311 else
312 err = 1;
313
314
315 return 0;
316}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_tracing.h>
6#include <bpf/bpf_helpers.h>
7
8#include "task_kfunc_common.h"
9
10char _license[] SEC("license") = "GPL";
11
12int err, pid;
13
14/* Prototype for all of the program trace events below:
15 *
16 * TRACE_EVENT(task_newtask,
17 * TP_PROTO(struct task_struct *p, u64 clone_flags)
18 */
19
20static bool is_test_kfunc_task(void)
21{
22 int cur_pid = bpf_get_current_pid_tgid() >> 32;
23
24 return pid == cur_pid;
25}
26
27static int test_acquire_release(struct task_struct *task)
28{
29 struct task_struct *acquired;
30
31 acquired = bpf_task_acquire(task);
32 bpf_task_release(acquired);
33
34 return 0;
35}
36
37SEC("tp_btf/task_newtask")
38int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
39{
40 if (!is_test_kfunc_task())
41 return 0;
42
43 return test_acquire_release(task);
44}
45
46SEC("tp_btf/task_newtask")
47int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
48{
49 if (!is_test_kfunc_task())
50 return 0;
51
52 return test_acquire_release(bpf_get_current_task_btf());
53}
54
55SEC("tp_btf/task_newtask")
56int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
57{
58 long status;
59
60 if (!is_test_kfunc_task())
61 return 0;
62
63 status = tasks_kfunc_map_insert(task);
64 if (status)
65 err = 1;
66
67 return 0;
68}
69
70SEC("tp_btf/task_newtask")
71int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
72{
73 struct task_struct *kptr;
74 struct __tasks_kfunc_map_value *v;
75 long status;
76
77 if (!is_test_kfunc_task())
78 return 0;
79
80 status = tasks_kfunc_map_insert(task);
81 if (status) {
82 err = 1;
83 return 0;
84 }
85
86 v = tasks_kfunc_map_value_lookup(task);
87 if (!v) {
88 err = 2;
89 return 0;
90 }
91
92 kptr = bpf_kptr_xchg(&v->task, NULL);
93 if (!kptr) {
94 err = 3;
95 return 0;
96 }
97
98 bpf_task_release(kptr);
99
100 return 0;
101}
102
103SEC("tp_btf/task_newtask")
104int BPF_PROG(test_task_get_release, struct task_struct *task, u64 clone_flags)
105{
106 struct task_struct *kptr;
107 struct __tasks_kfunc_map_value *v;
108 long status;
109
110 if (!is_test_kfunc_task())
111 return 0;
112
113 status = tasks_kfunc_map_insert(task);
114 if (status) {
115 err = 1;
116 return 0;
117 }
118
119 v = tasks_kfunc_map_value_lookup(task);
120 if (!v) {
121 err = 2;
122 return 0;
123 }
124
125 kptr = bpf_task_kptr_get(&v->task);
126 if (kptr) {
127 /* Until we resolve the issues with using task->rcu_users, we
128 * expect bpf_task_kptr_get() to return a NULL task. See the
129 * comment at the definition of bpf_task_acquire_not_zero() for
130 * more details.
131 */
132 bpf_task_release(kptr);
133 err = 3;
134 return 0;
135 }
136
137
138 return 0;
139}
140
141SEC("tp_btf/task_newtask")
142int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
143{
144 struct task_struct *current, *acquired;
145
146 if (!is_test_kfunc_task())
147 return 0;
148
149 current = bpf_get_current_task_btf();
150 acquired = bpf_task_acquire(current);
151 bpf_task_release(acquired);
152
153 return 0;
154}
155
156static void lookup_compare_pid(const struct task_struct *p)
157{
158 struct task_struct *acquired;
159
160 acquired = bpf_task_from_pid(p->pid);
161 if (!acquired) {
162 err = 1;
163 return;
164 }
165
166 if (acquired->pid != p->pid)
167 err = 2;
168 bpf_task_release(acquired);
169}
170
171SEC("tp_btf/task_newtask")
172int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
173{
174 struct task_struct *acquired;
175
176 if (!is_test_kfunc_task())
177 return 0;
178
179 lookup_compare_pid(task);
180 return 0;
181}
182
183SEC("tp_btf/task_newtask")
184int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
185{
186 struct task_struct *current, *acquired;
187
188 if (!is_test_kfunc_task())
189 return 0;
190
191 lookup_compare_pid(bpf_get_current_task_btf());
192 return 0;
193}
194
195static int is_pid_lookup_valid(s32 pid)
196{
197 struct task_struct *acquired;
198
199 acquired = bpf_task_from_pid(pid);
200 if (acquired) {
201 bpf_task_release(acquired);
202 return 1;
203 }
204
205 return 0;
206}
207
208SEC("tp_btf/task_newtask")
209int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
210{
211 struct task_struct *acquired;
212
213 if (!is_test_kfunc_task())
214 return 0;
215
216 if (is_pid_lookup_valid(-1)) {
217 err = 1;
218 return 0;
219 }
220
221 if (is_pid_lookup_valid(0xcafef00d)) {
222 err = 2;
223 return 0;
224 }
225
226 return 0;
227}