Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_tracing.h>
6#include <bpf/bpf_helpers.h>
7
8#include "bpf_misc.h"
9#include "task_kfunc_common.h"
10
11char _license[] SEC("license") = "GPL";
12
13/* Prototype for all of the program trace events below:
14 *
15 * TRACE_EVENT(task_newtask,
16 * TP_PROTO(struct task_struct *p, u64 clone_flags)
17 */
18
19static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task)
20{
21 int status;
22
23 status = tasks_kfunc_map_insert(task);
24 if (status)
25 return NULL;
26
27 return tasks_kfunc_map_value_lookup(task);
28}
29
30SEC("tp_btf/task_newtask")
31__failure __msg("Possibly NULL pointer passed to trusted arg0")
32int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags)
33{
34 struct task_struct *acquired;
35 struct __tasks_kfunc_map_value *v;
36
37 v = insert_lookup_task(task);
38 if (!v)
39 return 0;
40
41 /* Can't invoke bpf_task_acquire() on an untrusted pointer. */
42 acquired = bpf_task_acquire(v->task);
43 if (!acquired)
44 return 0;
45
46 bpf_task_release(acquired);
47
48 return 0;
49}
50
51SEC("tp_btf/task_newtask")
52__failure __msg("arg#0 pointer type STRUCT task_struct must point")
53int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags)
54{
55 struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags;
56
57 /* Can't invoke bpf_task_acquire() on a random frame pointer. */
58 acquired = bpf_task_acquire((struct task_struct *)&stack_task);
59 if (!acquired)
60 return 0;
61
62 bpf_task_release(acquired);
63
64 return 0;
65}
66
67SEC("kretprobe/free_task")
68__failure __msg("calling kernel function bpf_task_acquire is not allowed")
69int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags)
70{
71 struct task_struct *acquired;
72
73 /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */
74 acquired = bpf_task_acquire(task);
75 if (!acquired)
76 return 0;
77 bpf_task_release(acquired);
78
79 return 0;
80}
81
82SEC("kretprobe/free_task")
83__failure __msg("calling kernel function bpf_task_acquire is not allowed")
84int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task, u64 clone_flags)
85{
86 struct task_struct *acquired;
87
88 bpf_rcu_read_lock();
89 if (!task) {
90 bpf_rcu_read_unlock();
91 return 0;
92 }
93 /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */
94 acquired = bpf_task_acquire(task);
95 if (acquired)
96 bpf_task_release(acquired);
97 bpf_rcu_read_unlock();
98
99 return 0;
100}
101
102SEC("tp_btf/task_newtask")
103__failure __msg("Possibly NULL pointer passed to trusted arg0")
104int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
105{
106 struct task_struct *acquired;
107
108 /* Can't invoke bpf_task_acquire() on a NULL pointer. */
109 acquired = bpf_task_acquire(NULL);
110 if (!acquired)
111 return 0;
112 bpf_task_release(acquired);
113
114 return 0;
115}
116
117SEC("tp_btf/task_newtask")
118__failure __msg("Unreleased reference")
119int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags)
120{
121 struct task_struct *acquired;
122
123 acquired = bpf_task_acquire(task);
124
125 /* Acquired task is never released. */
126 __sink(acquired);
127
128 return 0;
129}
130
131SEC("tp_btf/task_newtask")
132__failure __msg("Unreleased reference")
133int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags)
134{
135 struct task_struct *kptr;
136 struct __tasks_kfunc_map_value *v;
137
138 v = insert_lookup_task(task);
139 if (!v)
140 return 0;
141
142 kptr = bpf_kptr_xchg(&v->task, NULL);
143 if (!kptr)
144 return 0;
145
146 /* Kptr retrieved from map is never released. */
147
148 return 0;
149}
150
151SEC("tp_btf/task_newtask")
152__failure __msg("Possibly NULL pointer passed to trusted arg0")
153int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task, u64 clone_flags)
154{
155 struct task_struct *acquired;
156
157 acquired = bpf_task_acquire(task);
158 /* Can't invoke bpf_task_release() on an acquired task without a NULL check. */
159 bpf_task_release(acquired);
160
161 return 0;
162}
163
164SEC("tp_btf/task_newtask")
165__failure __msg("Possibly NULL pointer passed to trusted arg0")
166int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags)
167{
168 struct __tasks_kfunc_map_value *v;
169
170 v = insert_lookup_task(task);
171 if (!v)
172 return 0;
173
174 /* Can't invoke bpf_task_release() on an untrusted pointer. */
175 bpf_task_release(v->task);
176
177 return 0;
178}
179
180SEC("tp_btf/task_newtask")
181__failure __msg("arg#0 pointer type STRUCT task_struct must point")
182int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
183{
184 struct task_struct *acquired = (struct task_struct *)&clone_flags;
185
186 /* Cannot release random frame pointer. */
187 bpf_task_release(acquired);
188
189 return 0;
190}
191
192SEC("tp_btf/task_newtask")
193__failure __msg("Possibly NULL pointer passed to trusted arg0")
194int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
195{
196 struct __tasks_kfunc_map_value local, *v;
197 long status;
198 struct task_struct *acquired, *old;
199 s32 pid;
200
201 status = bpf_probe_read_kernel(&pid, sizeof(pid), &task->pid);
202 if (status)
203 return 0;
204
205 local.task = NULL;
206 status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST);
207 if (status)
208 return status;
209
210 v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
211 if (!v)
212 return -ENOENT;
213
214 acquired = bpf_task_acquire(task);
215 if (!acquired)
216 return -EEXIST;
217
218 old = bpf_kptr_xchg(&v->task, acquired);
219
220 /* old cannot be passed to bpf_task_release() without a NULL check. */
221 bpf_task_release(old);
222
223 return 0;
224}
225
226SEC("tp_btf/task_newtask")
227__failure __msg("release kernel function bpf_task_release expects")
228int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags)
229{
230 /* Cannot release trusted task pointer which was not acquired. */
231 bpf_task_release(task);
232
233 return 0;
234}
235
236SEC("tp_btf/task_newtask")
237__failure __msg("Possibly NULL pointer passed to trusted arg0")
238int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags)
239{
240 struct task_struct *acquired;
241
242 acquired = bpf_task_from_pid(task->pid);
243
244 /* Releasing bpf_task_from_pid() lookup without a NULL check. */
245 bpf_task_release(acquired);
246
247 return 0;
248}
249
250SEC("tp_btf/task_newtask")
251__failure __msg("Possibly NULL pointer passed to trusted arg0")
252int BPF_PROG(task_kfunc_from_vpid_no_null_check, struct task_struct *task, u64 clone_flags)
253{
254 struct task_struct *acquired;
255
256 acquired = bpf_task_from_vpid(task->pid);
257
258 /* Releasing bpf_task_from_vpid() lookup without a NULL check. */
259 bpf_task_release(acquired);
260
261 return 0;
262}
263
264SEC("lsm/task_free")
265__failure __msg("R1 must be a rcu pointer")
266int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task)
267{
268 struct task_struct *acquired;
269
270 /* the argument of lsm task_free hook is untrusted. */
271 acquired = bpf_task_acquire(task);
272 if (!acquired)
273 return 0;
274
275 bpf_task_release(acquired);
276 return 0;
277}
278
279SEC("tp_btf/task_newtask")
280__failure __msg("access beyond the end of member comm")
281int BPF_PROG(task_access_comm1, struct task_struct *task, u64 clone_flags)
282{
283 bpf_strncmp(task->comm, 17, "foo");
284 return 0;
285}
286
287SEC("tp_btf/task_newtask")
288__failure __msg("access beyond the end of member comm")
289int BPF_PROG(task_access_comm2, struct task_struct *task, u64 clone_flags)
290{
291 bpf_strncmp(task->comm + 1, 16, "foo");
292 return 0;
293}
294
295SEC("tp_btf/task_newtask")
296__failure __msg("write into memory")
297int BPF_PROG(task_access_comm3, struct task_struct *task, u64 clone_flags)
298{
299 bpf_probe_read_kernel(task->comm, 16, task->comm);
300 return 0;
301}
302
303SEC("fentry/__set_task_comm")
304__failure __msg("R1 type=ptr_ expected")
305int BPF_PROG(task_access_comm4, struct task_struct *task, const char *buf, bool exec)
306{
307 /*
308 * task->comm is a legacy ptr_to_btf_id. The verifier cannot guarantee
309 * its safety. Hence it cannot be accessed with normal load insns.
310 */
311 bpf_strncmp(task->comm, 16, "foo");
312 return 0;
313}
314
315SEC("tp_btf/task_newtask")
316__failure __msg("R1 must be referenced or trusted")
317int BPF_PROG(task_kfunc_release_in_map, struct task_struct *task, u64 clone_flags)
318{
319 struct task_struct *local;
320 struct __tasks_kfunc_map_value *v;
321
322 if (tasks_kfunc_map_insert(task))
323 return 0;
324
325 v = tasks_kfunc_map_value_lookup(task);
326 if (!v)
327 return 0;
328
329 bpf_rcu_read_lock();
330 local = v->task;
331 if (!local) {
332 bpf_rcu_read_unlock();
333 return 0;
334 }
335 /* Can't release a kptr that's still stored in a map. */
336 bpf_task_release(local);
337 bpf_rcu_read_unlock();
338
339 return 0;
340}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_tracing.h>
6#include <bpf/bpf_helpers.h>
7
8#include "task_kfunc_common.h"
9
10char _license[] SEC("license") = "GPL";
11
12/* Prototype for all of the program trace events below:
13 *
14 * TRACE_EVENT(task_newtask,
15 * TP_PROTO(struct task_struct *p, u64 clone_flags)
16 */
17
18static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task)
19{
20 int status;
21
22 status = tasks_kfunc_map_insert(task);
23 if (status)
24 return NULL;
25
26 return tasks_kfunc_map_value_lookup(task);
27}
28
29SEC("tp_btf/task_newtask")
30int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags)
31{
32 struct task_struct *acquired;
33 struct __tasks_kfunc_map_value *v;
34
35 v = insert_lookup_task(task);
36 if (!v)
37 return 0;
38
39 /* Can't invoke bpf_task_acquire() on an untrusted pointer. */
40 acquired = bpf_task_acquire(v->task);
41 bpf_task_release(acquired);
42
43 return 0;
44}
45
46SEC("tp_btf/task_newtask")
47int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags)
48{
49 struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags;
50
51 /* Can't invoke bpf_task_acquire() on a random frame pointer. */
52 acquired = bpf_task_acquire((struct task_struct *)&stack_task);
53 bpf_task_release(acquired);
54
55 return 0;
56}
57
58SEC("kretprobe/free_task")
59int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags)
60{
61 struct task_struct *acquired;
62
63 acquired = bpf_task_acquire(task);
64 /* Can't release a bpf_task_acquire()'d task without a NULL check. */
65 bpf_task_release(acquired);
66
67 return 0;
68}
69
70SEC("tp_btf/task_newtask")
71int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
72{
73 struct task_struct *acquired;
74
75 /* Can't invoke bpf_task_acquire() on a trusted pointer obtained from walking a struct. */
76 acquired = bpf_task_acquire(task->group_leader);
77 bpf_task_release(acquired);
78
79 return 0;
80}
81
82
83SEC("tp_btf/task_newtask")
84int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
85{
86 struct task_struct *acquired;
87
88 /* Can't invoke bpf_task_acquire() on a NULL pointer. */
89 acquired = bpf_task_acquire(NULL);
90 if (!acquired)
91 return 0;
92 bpf_task_release(acquired);
93
94 return 0;
95}
96
97SEC("tp_btf/task_newtask")
98int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags)
99{
100 struct task_struct *acquired;
101
102 acquired = bpf_task_acquire(task);
103
104 /* Acquired task is never released. */
105
106 return 0;
107}
108
109SEC("tp_btf/task_newtask")
110int BPF_PROG(task_kfunc_get_non_kptr_param, struct task_struct *task, u64 clone_flags)
111{
112 struct task_struct *kptr;
113
114 /* Cannot use bpf_task_kptr_get() on a non-kptr, even on a valid task. */
115 kptr = bpf_task_kptr_get(&task);
116 if (!kptr)
117 return 0;
118
119 bpf_task_release(kptr);
120
121 return 0;
122}
123
124SEC("tp_btf/task_newtask")
125int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clone_flags)
126{
127 struct task_struct *kptr, *acquired;
128
129 acquired = bpf_task_acquire(task);
130
131 /* Cannot use bpf_task_kptr_get() on a non-kptr, even if it was acquired. */
132 kptr = bpf_task_kptr_get(&acquired);
133 bpf_task_release(acquired);
134 if (!kptr)
135 return 0;
136
137 bpf_task_release(kptr);
138
139 return 0;
140}
141
142SEC("tp_btf/task_newtask")
143int BPF_PROG(task_kfunc_get_null, struct task_struct *task, u64 clone_flags)
144{
145 struct task_struct *kptr;
146
147 /* Cannot use bpf_task_kptr_get() on a NULL pointer. */
148 kptr = bpf_task_kptr_get(NULL);
149 if (!kptr)
150 return 0;
151
152 bpf_task_release(kptr);
153
154 return 0;
155}
156
157SEC("tp_btf/task_newtask")
158int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags)
159{
160 struct task_struct *kptr;
161 struct __tasks_kfunc_map_value *v;
162
163 v = insert_lookup_task(task);
164 if (!v)
165 return 0;
166
167 kptr = bpf_kptr_xchg(&v->task, NULL);
168 if (!kptr)
169 return 0;
170
171 /* Kptr retrieved from map is never released. */
172
173 return 0;
174}
175
176SEC("tp_btf/task_newtask")
177int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flags)
178{
179 struct task_struct *kptr;
180 struct __tasks_kfunc_map_value *v;
181
182 v = insert_lookup_task(task);
183 if (!v)
184 return 0;
185
186 kptr = bpf_task_kptr_get(&v->task);
187 if (!kptr)
188 return 0;
189
190 /* Kptr acquired above is never released. */
191
192 return 0;
193}
194
195SEC("tp_btf/task_newtask")
196int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags)
197{
198 struct __tasks_kfunc_map_value *v;
199
200 v = insert_lookup_task(task);
201 if (!v)
202 return 0;
203
204 /* Can't invoke bpf_task_release() on an untrusted pointer. */
205 bpf_task_release(v->task);
206
207 return 0;
208}
209
210SEC("tp_btf/task_newtask")
211int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
212{
213 struct task_struct *acquired = (struct task_struct *)&clone_flags;
214
215 /* Cannot release random frame pointer. */
216 bpf_task_release(acquired);
217
218 return 0;
219}
220
221SEC("tp_btf/task_newtask")
222int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
223{
224 struct __tasks_kfunc_map_value local, *v;
225 long status;
226 struct task_struct *acquired, *old;
227 s32 pid;
228
229 status = bpf_probe_read_kernel(&pid, sizeof(pid), &task->pid);
230 if (status)
231 return 0;
232
233 local.task = NULL;
234 status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST);
235 if (status)
236 return status;
237
238 v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
239 if (!v)
240 return -ENOENT;
241
242 acquired = bpf_task_acquire(task);
243
244 old = bpf_kptr_xchg(&v->task, acquired);
245
246 /* old cannot be passed to bpf_task_release() without a NULL check. */
247 bpf_task_release(old);
248 bpf_task_release(old);
249
250 return 0;
251}
252
253SEC("tp_btf/task_newtask")
254int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags)
255{
256 /* Cannot release trusted task pointer which was not acquired. */
257 bpf_task_release(task);
258
259 return 0;
260}
261
262SEC("tp_btf/task_newtask")
263int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags)
264{
265 struct task_struct *acquired;
266
267 acquired = bpf_task_from_pid(task->pid);
268
269 /* Releasing bpf_task_from_pid() lookup without a NULL check. */
270 bpf_task_release(acquired);
271
272 return 0;
273}
274
275SEC("lsm/task_free")
276int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task)
277{
278 struct task_struct *acquired;
279
280 /* the argument of lsm task_free hook is untrusted. */
281 acquired = bpf_task_acquire(task);
282 bpf_task_release(acquired);
283 return 0;
284}