Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2020 Facebook */
3
4#include <linux/init.h>
5#include <linux/namei.h>
6#include <linux/pid_namespace.h>
7#include <linux/fs.h>
8#include <linux/fdtable.h>
9#include <linux/filter.h>
10#include <linux/btf_ids.h>
11
12struct bpf_iter_seq_task_common {
13 struct pid_namespace *ns;
14};
15
16struct bpf_iter_seq_task_info {
17 /* The first field must be struct bpf_iter_seq_task_common.
18 * this is assumed by {init, fini}_seq_pidns() callback functions.
19 */
20 struct bpf_iter_seq_task_common common;
21 u32 tid;
22};
23
24static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
25 u32 *tid)
26{
27 struct task_struct *task = NULL;
28 struct pid *pid;
29
30 rcu_read_lock();
31retry:
32 pid = find_ge_pid(*tid, ns);
33 if (pid) {
34 *tid = pid_nr_ns(pid, ns);
35 task = get_pid_task(pid, PIDTYPE_PID);
36 if (!task) {
37 ++*tid;
38 goto retry;
39 }
40 }
41 rcu_read_unlock();
42
43 return task;
44}
45
46static void *task_seq_start(struct seq_file *seq, loff_t *pos)
47{
48 struct bpf_iter_seq_task_info *info = seq->private;
49 struct task_struct *task;
50
51 task = task_seq_get_next(info->common.ns, &info->tid);
52 if (!task)
53 return NULL;
54
55 if (*pos == 0)
56 ++*pos;
57 return task;
58}
59
60static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
61{
62 struct bpf_iter_seq_task_info *info = seq->private;
63 struct task_struct *task;
64
65 ++*pos;
66 ++info->tid;
67 put_task_struct((struct task_struct *)v);
68 task = task_seq_get_next(info->common.ns, &info->tid);
69 if (!task)
70 return NULL;
71
72 return task;
73}
74
75struct bpf_iter__task {
76 __bpf_md_ptr(struct bpf_iter_meta *, meta);
77 __bpf_md_ptr(struct task_struct *, task);
78};
79
80DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)
81
82static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
83 bool in_stop)
84{
85 struct bpf_iter_meta meta;
86 struct bpf_iter__task ctx;
87 struct bpf_prog *prog;
88
89 meta.seq = seq;
90 prog = bpf_iter_get_info(&meta, in_stop);
91 if (!prog)
92 return 0;
93
94 meta.seq = seq;
95 ctx.meta = &meta;
96 ctx.task = task;
97 return bpf_iter_run_prog(prog, &ctx);
98}
99
100static int task_seq_show(struct seq_file *seq, void *v)
101{
102 return __task_seq_show(seq, v, false);
103}
104
105static void task_seq_stop(struct seq_file *seq, void *v)
106{
107 if (!v)
108 (void)__task_seq_show(seq, v, true);
109 else
110 put_task_struct((struct task_struct *)v);
111}
112
113static const struct seq_operations task_seq_ops = {
114 .start = task_seq_start,
115 .next = task_seq_next,
116 .stop = task_seq_stop,
117 .show = task_seq_show,
118};
119
120struct bpf_iter_seq_task_file_info {
121 /* The first field must be struct bpf_iter_seq_task_common.
122 * this is assumed by {init, fini}_seq_pidns() callback functions.
123 */
124 struct bpf_iter_seq_task_common common;
125 struct task_struct *task;
126 struct files_struct *files;
127 u32 tid;
128 u32 fd;
129};
130
131static struct file *
132task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info,
133 struct task_struct **task, struct files_struct **fstruct)
134{
135 struct pid_namespace *ns = info->common.ns;
136 u32 curr_tid = info->tid, max_fds;
137 struct files_struct *curr_files;
138 struct task_struct *curr_task;
139 int curr_fd = info->fd;
140
141 /* If this function returns a non-NULL file object,
142 * it held a reference to the task/files_struct/file.
143 * Otherwise, it does not hold any reference.
144 */
145again:
146 if (*task) {
147 curr_task = *task;
148 curr_files = *fstruct;
149 curr_fd = info->fd;
150 } else {
151 curr_task = task_seq_get_next(ns, &curr_tid);
152 if (!curr_task)
153 return NULL;
154
155 curr_files = get_files_struct(curr_task);
156 if (!curr_files) {
157 put_task_struct(curr_task);
158 curr_tid = ++(info->tid);
159 info->fd = 0;
160 goto again;
161 }
162
163 /* set *fstruct, *task and info->tid */
164 *fstruct = curr_files;
165 *task = curr_task;
166 if (curr_tid == info->tid) {
167 curr_fd = info->fd;
168 } else {
169 info->tid = curr_tid;
170 curr_fd = 0;
171 }
172 }
173
174 rcu_read_lock();
175 max_fds = files_fdtable(curr_files)->max_fds;
176 for (; curr_fd < max_fds; curr_fd++) {
177 struct file *f;
178
179 f = fcheck_files(curr_files, curr_fd);
180 if (!f)
181 continue;
182 if (!get_file_rcu(f))
183 continue;
184
185 /* set info->fd */
186 info->fd = curr_fd;
187 rcu_read_unlock();
188 return f;
189 }
190
191 /* the current task is done, go to the next task */
192 rcu_read_unlock();
193 put_files_struct(curr_files);
194 put_task_struct(curr_task);
195 *task = NULL;
196 *fstruct = NULL;
197 info->fd = 0;
198 curr_tid = ++(info->tid);
199 goto again;
200}
201
202static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
203{
204 struct bpf_iter_seq_task_file_info *info = seq->private;
205 struct files_struct *files = NULL;
206 struct task_struct *task = NULL;
207 struct file *file;
208
209 file = task_file_seq_get_next(info, &task, &files);
210 if (!file) {
211 info->files = NULL;
212 info->task = NULL;
213 return NULL;
214 }
215
216 if (*pos == 0)
217 ++*pos;
218 info->task = task;
219 info->files = files;
220
221 return file;
222}
223
224static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
225{
226 struct bpf_iter_seq_task_file_info *info = seq->private;
227 struct files_struct *files = info->files;
228 struct task_struct *task = info->task;
229 struct file *file;
230
231 ++*pos;
232 ++info->fd;
233 fput((struct file *)v);
234 file = task_file_seq_get_next(info, &task, &files);
235 if (!file) {
236 info->files = NULL;
237 info->task = NULL;
238 return NULL;
239 }
240
241 info->task = task;
242 info->files = files;
243
244 return file;
245}
246
247struct bpf_iter__task_file {
248 __bpf_md_ptr(struct bpf_iter_meta *, meta);
249 __bpf_md_ptr(struct task_struct *, task);
250 u32 fd __aligned(8);
251 __bpf_md_ptr(struct file *, file);
252};
253
254DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta,
255 struct task_struct *task, u32 fd,
256 struct file *file)
257
258static int __task_file_seq_show(struct seq_file *seq, struct file *file,
259 bool in_stop)
260{
261 struct bpf_iter_seq_task_file_info *info = seq->private;
262 struct bpf_iter__task_file ctx;
263 struct bpf_iter_meta meta;
264 struct bpf_prog *prog;
265
266 meta.seq = seq;
267 prog = bpf_iter_get_info(&meta, in_stop);
268 if (!prog)
269 return 0;
270
271 ctx.meta = &meta;
272 ctx.task = info->task;
273 ctx.fd = info->fd;
274 ctx.file = file;
275 return bpf_iter_run_prog(prog, &ctx);
276}
277
278static int task_file_seq_show(struct seq_file *seq, void *v)
279{
280 return __task_file_seq_show(seq, v, false);
281}
282
283static void task_file_seq_stop(struct seq_file *seq, void *v)
284{
285 struct bpf_iter_seq_task_file_info *info = seq->private;
286
287 if (!v) {
288 (void)__task_file_seq_show(seq, v, true);
289 } else {
290 fput((struct file *)v);
291 put_files_struct(info->files);
292 put_task_struct(info->task);
293 info->files = NULL;
294 info->task = NULL;
295 }
296}
297
298static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux)
299{
300 struct bpf_iter_seq_task_common *common = priv_data;
301
302 common->ns = get_pid_ns(task_active_pid_ns(current));
303 return 0;
304}
305
306static void fini_seq_pidns(void *priv_data)
307{
308 struct bpf_iter_seq_task_common *common = priv_data;
309
310 put_pid_ns(common->ns);
311}
312
313static const struct seq_operations task_file_seq_ops = {
314 .start = task_file_seq_start,
315 .next = task_file_seq_next,
316 .stop = task_file_seq_stop,
317 .show = task_file_seq_show,
318};
319
320BTF_ID_LIST(btf_task_file_ids)
321BTF_ID(struct, task_struct)
322BTF_ID(struct, file)
323
324static const struct bpf_iter_seq_info task_seq_info = {
325 .seq_ops = &task_seq_ops,
326 .init_seq_private = init_seq_pidns,
327 .fini_seq_private = fini_seq_pidns,
328 .seq_priv_size = sizeof(struct bpf_iter_seq_task_info),
329};
330
331static struct bpf_iter_reg task_reg_info = {
332 .target = "task",
333 .ctx_arg_info_size = 1,
334 .ctx_arg_info = {
335 { offsetof(struct bpf_iter__task, task),
336 PTR_TO_BTF_ID_OR_NULL },
337 },
338 .seq_info = &task_seq_info,
339};
340
341static const struct bpf_iter_seq_info task_file_seq_info = {
342 .seq_ops = &task_file_seq_ops,
343 .init_seq_private = init_seq_pidns,
344 .fini_seq_private = fini_seq_pidns,
345 .seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info),
346};
347
348static struct bpf_iter_reg task_file_reg_info = {
349 .target = "task_file",
350 .ctx_arg_info_size = 2,
351 .ctx_arg_info = {
352 { offsetof(struct bpf_iter__task_file, task),
353 PTR_TO_BTF_ID_OR_NULL },
354 { offsetof(struct bpf_iter__task_file, file),
355 PTR_TO_BTF_ID_OR_NULL },
356 },
357 .seq_info = &task_file_seq_info,
358};
359
360static int __init task_iter_init(void)
361{
362 int ret;
363
364 task_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
365 ret = bpf_iter_reg_target(&task_reg_info);
366 if (ret)
367 return ret;
368
369 task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
370 task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
371 return bpf_iter_reg_target(&task_file_reg_info);
372}
373late_initcall(task_iter_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2020 Facebook */
3
4#include <linux/init.h>
5#include <linux/namei.h>
6#include <linux/pid_namespace.h>
7#include <linux/fs.h>
8#include <linux/fdtable.h>
9#include <linux/filter.h>
10#include <linux/btf_ids.h>
11
12struct bpf_iter_seq_task_common {
13 struct pid_namespace *ns;
14};
15
16struct bpf_iter_seq_task_info {
17 /* The first field must be struct bpf_iter_seq_task_common.
18 * this is assumed by {init, fini}_seq_pidns() callback functions.
19 */
20 struct bpf_iter_seq_task_common common;
21 u32 tid;
22};
23
24static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
25 u32 *tid,
26 bool skip_if_dup_files)
27{
28 struct task_struct *task = NULL;
29 struct pid *pid;
30
31 rcu_read_lock();
32retry:
33 pid = find_ge_pid(*tid, ns);
34 if (pid) {
35 *tid = pid_nr_ns(pid, ns);
36 task = get_pid_task(pid, PIDTYPE_PID);
37 if (!task) {
38 ++*tid;
39 goto retry;
40 } else if (skip_if_dup_files && !thread_group_leader(task) &&
41 task->files == task->group_leader->files) {
42 put_task_struct(task);
43 task = NULL;
44 ++*tid;
45 goto retry;
46 }
47 }
48 rcu_read_unlock();
49
50 return task;
51}
52
53static void *task_seq_start(struct seq_file *seq, loff_t *pos)
54{
55 struct bpf_iter_seq_task_info *info = seq->private;
56 struct task_struct *task;
57
58 task = task_seq_get_next(info->common.ns, &info->tid, false);
59 if (!task)
60 return NULL;
61
62 if (*pos == 0)
63 ++*pos;
64 return task;
65}
66
67static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
68{
69 struct bpf_iter_seq_task_info *info = seq->private;
70 struct task_struct *task;
71
72 ++*pos;
73 ++info->tid;
74 put_task_struct((struct task_struct *)v);
75 task = task_seq_get_next(info->common.ns, &info->tid, false);
76 if (!task)
77 return NULL;
78
79 return task;
80}
81
82struct bpf_iter__task {
83 __bpf_md_ptr(struct bpf_iter_meta *, meta);
84 __bpf_md_ptr(struct task_struct *, task);
85};
86
87DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)
88
89static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
90 bool in_stop)
91{
92 struct bpf_iter_meta meta;
93 struct bpf_iter__task ctx;
94 struct bpf_prog *prog;
95
96 meta.seq = seq;
97 prog = bpf_iter_get_info(&meta, in_stop);
98 if (!prog)
99 return 0;
100
101 meta.seq = seq;
102 ctx.meta = &meta;
103 ctx.task = task;
104 return bpf_iter_run_prog(prog, &ctx);
105}
106
107static int task_seq_show(struct seq_file *seq, void *v)
108{
109 return __task_seq_show(seq, v, false);
110}
111
112static void task_seq_stop(struct seq_file *seq, void *v)
113{
114 if (!v)
115 (void)__task_seq_show(seq, v, true);
116 else
117 put_task_struct((struct task_struct *)v);
118}
119
120static const struct seq_operations task_seq_ops = {
121 .start = task_seq_start,
122 .next = task_seq_next,
123 .stop = task_seq_stop,
124 .show = task_seq_show,
125};
126
127struct bpf_iter_seq_task_file_info {
128 /* The first field must be struct bpf_iter_seq_task_common.
129 * this is assumed by {init, fini}_seq_pidns() callback functions.
130 */
131 struct bpf_iter_seq_task_common common;
132 struct task_struct *task;
133 u32 tid;
134 u32 fd;
135};
136
137static struct file *
138task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
139{
140 struct pid_namespace *ns = info->common.ns;
141 u32 curr_tid = info->tid;
142 struct task_struct *curr_task;
143 unsigned int curr_fd = info->fd;
144
145 /* If this function returns a non-NULL file object,
146 * it held a reference to the task/file.
147 * Otherwise, it does not hold any reference.
148 */
149again:
150 if (info->task) {
151 curr_task = info->task;
152 curr_fd = info->fd;
153 } else {
154 curr_task = task_seq_get_next(ns, &curr_tid, true);
155 if (!curr_task) {
156 info->task = NULL;
157 info->tid = curr_tid;
158 return NULL;
159 }
160
161 /* set info->task and info->tid */
162 info->task = curr_task;
163 if (curr_tid == info->tid) {
164 curr_fd = info->fd;
165 } else {
166 info->tid = curr_tid;
167 curr_fd = 0;
168 }
169 }
170
171 rcu_read_lock();
172 for (;; curr_fd++) {
173 struct file *f;
174 f = task_lookup_next_fd_rcu(curr_task, &curr_fd);
175 if (!f)
176 break;
177 if (!get_file_rcu(f))
178 continue;
179
180 /* set info->fd */
181 info->fd = curr_fd;
182 rcu_read_unlock();
183 return f;
184 }
185
186 /* the current task is done, go to the next task */
187 rcu_read_unlock();
188 put_task_struct(curr_task);
189 info->task = NULL;
190 info->fd = 0;
191 curr_tid = ++(info->tid);
192 goto again;
193}
194
195static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
196{
197 struct bpf_iter_seq_task_file_info *info = seq->private;
198 struct file *file;
199
200 info->task = NULL;
201 file = task_file_seq_get_next(info);
202 if (file && *pos == 0)
203 ++*pos;
204
205 return file;
206}
207
208static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
209{
210 struct bpf_iter_seq_task_file_info *info = seq->private;
211
212 ++*pos;
213 ++info->fd;
214 fput((struct file *)v);
215 return task_file_seq_get_next(info);
216}
217
218struct bpf_iter__task_file {
219 __bpf_md_ptr(struct bpf_iter_meta *, meta);
220 __bpf_md_ptr(struct task_struct *, task);
221 u32 fd __aligned(8);
222 __bpf_md_ptr(struct file *, file);
223};
224
225DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta,
226 struct task_struct *task, u32 fd,
227 struct file *file)
228
229static int __task_file_seq_show(struct seq_file *seq, struct file *file,
230 bool in_stop)
231{
232 struct bpf_iter_seq_task_file_info *info = seq->private;
233 struct bpf_iter__task_file ctx;
234 struct bpf_iter_meta meta;
235 struct bpf_prog *prog;
236
237 meta.seq = seq;
238 prog = bpf_iter_get_info(&meta, in_stop);
239 if (!prog)
240 return 0;
241
242 ctx.meta = &meta;
243 ctx.task = info->task;
244 ctx.fd = info->fd;
245 ctx.file = file;
246 return bpf_iter_run_prog(prog, &ctx);
247}
248
249static int task_file_seq_show(struct seq_file *seq, void *v)
250{
251 return __task_file_seq_show(seq, v, false);
252}
253
254static void task_file_seq_stop(struct seq_file *seq, void *v)
255{
256 struct bpf_iter_seq_task_file_info *info = seq->private;
257
258 if (!v) {
259 (void)__task_file_seq_show(seq, v, true);
260 } else {
261 fput((struct file *)v);
262 put_task_struct(info->task);
263 info->task = NULL;
264 }
265}
266
267static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux)
268{
269 struct bpf_iter_seq_task_common *common = priv_data;
270
271 common->ns = get_pid_ns(task_active_pid_ns(current));
272 return 0;
273}
274
275static void fini_seq_pidns(void *priv_data)
276{
277 struct bpf_iter_seq_task_common *common = priv_data;
278
279 put_pid_ns(common->ns);
280}
281
282static const struct seq_operations task_file_seq_ops = {
283 .start = task_file_seq_start,
284 .next = task_file_seq_next,
285 .stop = task_file_seq_stop,
286 .show = task_file_seq_show,
287};
288
289struct bpf_iter_seq_task_vma_info {
290 /* The first field must be struct bpf_iter_seq_task_common.
291 * this is assumed by {init, fini}_seq_pidns() callback functions.
292 */
293 struct bpf_iter_seq_task_common common;
294 struct task_struct *task;
295 struct vm_area_struct *vma;
296 u32 tid;
297 unsigned long prev_vm_start;
298 unsigned long prev_vm_end;
299};
300
301enum bpf_task_vma_iter_find_op {
302 task_vma_iter_first_vma, /* use mm->mmap */
303 task_vma_iter_next_vma, /* use curr_vma->vm_next */
304 task_vma_iter_find_vma, /* use find_vma() to find next vma */
305};
306
307static struct vm_area_struct *
308task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
309{
310 struct pid_namespace *ns = info->common.ns;
311 enum bpf_task_vma_iter_find_op op;
312 struct vm_area_struct *curr_vma;
313 struct task_struct *curr_task;
314 u32 curr_tid = info->tid;
315
316 /* If this function returns a non-NULL vma, it holds a reference to
317 * the task_struct, and holds read lock on vma->mm->mmap_lock.
318 * If this function returns NULL, it does not hold any reference or
319 * lock.
320 */
321 if (info->task) {
322 curr_task = info->task;
323 curr_vma = info->vma;
324 /* In case of lock contention, drop mmap_lock to unblock
325 * the writer.
326 *
327 * After relock, call find(mm, prev_vm_end - 1) to find
328 * new vma to process.
329 *
330 * +------+------+-----------+
331 * | VMA1 | VMA2 | VMA3 |
332 * +------+------+-----------+
333 * | | | |
334 * 4k 8k 16k 400k
335 *
336 * For example, curr_vma == VMA2. Before unlock, we set
337 *
338 * prev_vm_start = 8k
339 * prev_vm_end = 16k
340 *
341 * There are a few cases:
342 *
343 * 1) VMA2 is freed, but VMA3 exists.
344 *
345 * find_vma() will return VMA3, just process VMA3.
346 *
347 * 2) VMA2 still exists.
348 *
349 * find_vma() will return VMA2, process VMA2->next.
350 *
351 * 3) no more vma in this mm.
352 *
353 * Process the next task.
354 *
355 * 4) find_vma() returns a different vma, VMA2'.
356 *
357 * 4.1) If VMA2 covers same range as VMA2', skip VMA2',
358 * because we already covered the range;
359 * 4.2) VMA2 and VMA2' covers different ranges, process
360 * VMA2'.
361 */
362 if (mmap_lock_is_contended(curr_task->mm)) {
363 info->prev_vm_start = curr_vma->vm_start;
364 info->prev_vm_end = curr_vma->vm_end;
365 op = task_vma_iter_find_vma;
366 mmap_read_unlock(curr_task->mm);
367 if (mmap_read_lock_killable(curr_task->mm))
368 goto finish;
369 } else {
370 op = task_vma_iter_next_vma;
371 }
372 } else {
373again:
374 curr_task = task_seq_get_next(ns, &curr_tid, true);
375 if (!curr_task) {
376 info->tid = curr_tid + 1;
377 goto finish;
378 }
379
380 if (curr_tid != info->tid) {
381 info->tid = curr_tid;
382 /* new task, process the first vma */
383 op = task_vma_iter_first_vma;
384 } else {
385 /* Found the same tid, which means the user space
386 * finished data in previous buffer and read more.
387 * We dropped mmap_lock before returning to user
388 * space, so it is necessary to use find_vma() to
389 * find the next vma to process.
390 */
391 op = task_vma_iter_find_vma;
392 }
393
394 if (!curr_task->mm)
395 goto next_task;
396
397 if (mmap_read_lock_killable(curr_task->mm))
398 goto finish;
399 }
400
401 switch (op) {
402 case task_vma_iter_first_vma:
403 curr_vma = curr_task->mm->mmap;
404 break;
405 case task_vma_iter_next_vma:
406 curr_vma = curr_vma->vm_next;
407 break;
408 case task_vma_iter_find_vma:
409 /* We dropped mmap_lock so it is necessary to use find_vma
410 * to find the next vma. This is similar to the mechanism
411 * in show_smaps_rollup().
412 */
413 curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
414 /* case 1) and 4.2) above just use curr_vma */
415
416 /* check for case 2) or case 4.1) above */
417 if (curr_vma &&
418 curr_vma->vm_start == info->prev_vm_start &&
419 curr_vma->vm_end == info->prev_vm_end)
420 curr_vma = curr_vma->vm_next;
421 break;
422 }
423 if (!curr_vma) {
424 /* case 3) above, or case 2) 4.1) with vma->next == NULL */
425 mmap_read_unlock(curr_task->mm);
426 goto next_task;
427 }
428 info->task = curr_task;
429 info->vma = curr_vma;
430 return curr_vma;
431
432next_task:
433 put_task_struct(curr_task);
434 info->task = NULL;
435 curr_tid++;
436 goto again;
437
438finish:
439 if (curr_task)
440 put_task_struct(curr_task);
441 info->task = NULL;
442 info->vma = NULL;
443 return NULL;
444}
445
446static void *task_vma_seq_start(struct seq_file *seq, loff_t *pos)
447{
448 struct bpf_iter_seq_task_vma_info *info = seq->private;
449 struct vm_area_struct *vma;
450
451 vma = task_vma_seq_get_next(info);
452 if (vma && *pos == 0)
453 ++*pos;
454
455 return vma;
456}
457
458static void *task_vma_seq_next(struct seq_file *seq, void *v, loff_t *pos)
459{
460 struct bpf_iter_seq_task_vma_info *info = seq->private;
461
462 ++*pos;
463 return task_vma_seq_get_next(info);
464}
465
466struct bpf_iter__task_vma {
467 __bpf_md_ptr(struct bpf_iter_meta *, meta);
468 __bpf_md_ptr(struct task_struct *, task);
469 __bpf_md_ptr(struct vm_area_struct *, vma);
470};
471
472DEFINE_BPF_ITER_FUNC(task_vma, struct bpf_iter_meta *meta,
473 struct task_struct *task, struct vm_area_struct *vma)
474
475static int __task_vma_seq_show(struct seq_file *seq, bool in_stop)
476{
477 struct bpf_iter_seq_task_vma_info *info = seq->private;
478 struct bpf_iter__task_vma ctx;
479 struct bpf_iter_meta meta;
480 struct bpf_prog *prog;
481
482 meta.seq = seq;
483 prog = bpf_iter_get_info(&meta, in_stop);
484 if (!prog)
485 return 0;
486
487 ctx.meta = &meta;
488 ctx.task = info->task;
489 ctx.vma = info->vma;
490 return bpf_iter_run_prog(prog, &ctx);
491}
492
493static int task_vma_seq_show(struct seq_file *seq, void *v)
494{
495 return __task_vma_seq_show(seq, false);
496}
497
498static void task_vma_seq_stop(struct seq_file *seq, void *v)
499{
500 struct bpf_iter_seq_task_vma_info *info = seq->private;
501
502 if (!v) {
503 (void)__task_vma_seq_show(seq, true);
504 } else {
505 /* info->vma has not been seen by the BPF program. If the
506 * user space reads more, task_vma_seq_get_next should
507 * return this vma again. Set prev_vm_start to ~0UL,
508 * so that we don't skip the vma returned by the next
509 * find_vma() (case task_vma_iter_find_vma in
510 * task_vma_seq_get_next()).
511 */
512 info->prev_vm_start = ~0UL;
513 info->prev_vm_end = info->vma->vm_end;
514 mmap_read_unlock(info->task->mm);
515 put_task_struct(info->task);
516 info->task = NULL;
517 }
518}
519
520static const struct seq_operations task_vma_seq_ops = {
521 .start = task_vma_seq_start,
522 .next = task_vma_seq_next,
523 .stop = task_vma_seq_stop,
524 .show = task_vma_seq_show,
525};
526
527BTF_ID_LIST(btf_task_file_ids)
528BTF_ID(struct, task_struct)
529BTF_ID(struct, file)
530BTF_ID(struct, vm_area_struct)
531
532static const struct bpf_iter_seq_info task_seq_info = {
533 .seq_ops = &task_seq_ops,
534 .init_seq_private = init_seq_pidns,
535 .fini_seq_private = fini_seq_pidns,
536 .seq_priv_size = sizeof(struct bpf_iter_seq_task_info),
537};
538
539static struct bpf_iter_reg task_reg_info = {
540 .target = "task",
541 .feature = BPF_ITER_RESCHED,
542 .ctx_arg_info_size = 1,
543 .ctx_arg_info = {
544 { offsetof(struct bpf_iter__task, task),
545 PTR_TO_BTF_ID_OR_NULL },
546 },
547 .seq_info = &task_seq_info,
548};
549
550static const struct bpf_iter_seq_info task_file_seq_info = {
551 .seq_ops = &task_file_seq_ops,
552 .init_seq_private = init_seq_pidns,
553 .fini_seq_private = fini_seq_pidns,
554 .seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info),
555};
556
557static struct bpf_iter_reg task_file_reg_info = {
558 .target = "task_file",
559 .feature = BPF_ITER_RESCHED,
560 .ctx_arg_info_size = 2,
561 .ctx_arg_info = {
562 { offsetof(struct bpf_iter__task_file, task),
563 PTR_TO_BTF_ID_OR_NULL },
564 { offsetof(struct bpf_iter__task_file, file),
565 PTR_TO_BTF_ID_OR_NULL },
566 },
567 .seq_info = &task_file_seq_info,
568};
569
570static const struct bpf_iter_seq_info task_vma_seq_info = {
571 .seq_ops = &task_vma_seq_ops,
572 .init_seq_private = init_seq_pidns,
573 .fini_seq_private = fini_seq_pidns,
574 .seq_priv_size = sizeof(struct bpf_iter_seq_task_vma_info),
575};
576
577static struct bpf_iter_reg task_vma_reg_info = {
578 .target = "task_vma",
579 .feature = BPF_ITER_RESCHED,
580 .ctx_arg_info_size = 2,
581 .ctx_arg_info = {
582 { offsetof(struct bpf_iter__task_vma, task),
583 PTR_TO_BTF_ID_OR_NULL },
584 { offsetof(struct bpf_iter__task_vma, vma),
585 PTR_TO_BTF_ID_OR_NULL },
586 },
587 .seq_info = &task_vma_seq_info,
588};
589
590static int __init task_iter_init(void)
591{
592 int ret;
593
594 task_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
595 ret = bpf_iter_reg_target(&task_reg_info);
596 if (ret)
597 return ret;
598
599 task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
600 task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
601 ret = bpf_iter_reg_target(&task_file_reg_info);
602 if (ret)
603 return ret;
604
605 task_vma_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
606 task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[2];
607 return bpf_iter_reg_target(&task_vma_reg_info);
608}
609late_initcall(task_iter_init);