Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
9#include <linux/bpf_verifier.h>
10#include <linux/bpf_perf_event.h>
11#include <linux/btf.h>
12#include <linux/filter.h>
13#include <linux/uaccess.h>
14#include <linux/ctype.h>
15#include <linux/kprobes.h>
16#include <linux/spinlock.h>
17#include <linux/syscalls.h>
18#include <linux/error-injection.h>
19#include <linux/btf_ids.h>
20#include <linux/bpf_lsm.h>
21#include <linux/fprobe.h>
22#include <linux/bsearch.h>
23#include <linux/sort.h>
24#include <linux/key.h>
25#include <linux/verification.h>
26#include <linux/namei.h>
27
28#include <net/bpf_sk_storage.h>
29
30#include <uapi/linux/bpf.h>
31#include <uapi/linux/btf.h>
32
33#include <asm/tlb.h>
34
35#include "trace_probe.h"
36#include "trace.h"
37
38#define CREATE_TRACE_POINTS
39#include "bpf_trace.h"
40
41#define bpf_event_rcu_dereference(p) \
42 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
43
44#define MAX_UPROBE_MULTI_CNT (1U << 20)
45#define MAX_KPROBE_MULTI_CNT (1U << 20)
46
47#ifdef CONFIG_MODULES
48struct bpf_trace_module {
49 struct module *module;
50 struct list_head list;
51};
52
53static LIST_HEAD(bpf_trace_modules);
54static DEFINE_MUTEX(bpf_module_mutex);
55
56static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57{
58 struct bpf_raw_event_map *btp, *ret = NULL;
59 struct bpf_trace_module *btm;
60 unsigned int i;
61
62 mutex_lock(&bpf_module_mutex);
63 list_for_each_entry(btm, &bpf_trace_modules, list) {
64 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
65 btp = &btm->module->bpf_raw_events[i];
66 if (!strcmp(btp->tp->name, name)) {
67 if (try_module_get(btm->module))
68 ret = btp;
69 goto out;
70 }
71 }
72 }
73out:
74 mutex_unlock(&bpf_module_mutex);
75 return ret;
76}
77#else
78static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
79{
80 return NULL;
81}
82#endif /* CONFIG_MODULES */
83
84u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
85u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
86
87static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
88 u64 flags, const struct btf **btf,
89 s32 *btf_id);
90static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
91static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
92
93static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
94static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
95
96/**
97 * trace_call_bpf - invoke BPF program
98 * @call: tracepoint event
99 * @ctx: opaque context pointer
100 *
101 * kprobe handlers execute BPF programs via this helper.
102 * Can be used from static tracepoints in the future.
103 *
104 * Return: BPF programs always return an integer which is interpreted by
105 * kprobe handler as:
106 * 0 - return from kprobe (event is filtered out)
107 * 1 - store kprobe event into ring buffer
108 * Other values are reserved and currently alias to 1
109 */
110unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
111{
112 unsigned int ret;
113
114 cant_sleep();
115
116 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
117 /*
118 * since some bpf program is already running on this cpu,
119 * don't call into another bpf program (same or different)
120 * and don't send kprobe event into ring-buffer,
121 * so return zero here
122 */
123 rcu_read_lock();
124 bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
125 rcu_read_unlock();
126 ret = 0;
127 goto out;
128 }
129
130 /*
131 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
132 * to all call sites, we did a bpf_prog_array_valid() there to check
133 * whether call->prog_array is empty or not, which is
134 * a heuristic to speed up execution.
135 *
136 * If bpf_prog_array_valid() fetched prog_array was
137 * non-NULL, we go into trace_call_bpf() and do the actual
138 * proper rcu_dereference() under RCU lock.
139 * If it turns out that prog_array is NULL then, we bail out.
140 * For the opposite, if the bpf_prog_array_valid() fetched pointer
141 * was NULL, you'll skip the prog_array with the risk of missing
142 * out of events when it was updated in between this and the
143 * rcu_dereference() which is accepted risk.
144 */
145 rcu_read_lock();
146 ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
147 ctx, bpf_prog_run);
148 rcu_read_unlock();
149
150 out:
151 __this_cpu_dec(bpf_prog_active);
152
153 return ret;
154}
155
156#ifdef CONFIG_BPF_KPROBE_OVERRIDE
157BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
158{
159 regs_set_return_value(regs, rc);
160 override_function_with_return(regs);
161 return 0;
162}
163
164static const struct bpf_func_proto bpf_override_return_proto = {
165 .func = bpf_override_return,
166 .gpl_only = true,
167 .ret_type = RET_INTEGER,
168 .arg1_type = ARG_PTR_TO_CTX,
169 .arg2_type = ARG_ANYTHING,
170};
171#endif
172
173static __always_inline int
174bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
175{
176 int ret;
177
178 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
179 if (unlikely(ret < 0))
180 memset(dst, 0, size);
181 return ret;
182}
183
184BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
185 const void __user *, unsafe_ptr)
186{
187 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
188}
189
190const struct bpf_func_proto bpf_probe_read_user_proto = {
191 .func = bpf_probe_read_user,
192 .gpl_only = true,
193 .ret_type = RET_INTEGER,
194 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
195 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
196 .arg3_type = ARG_ANYTHING,
197};
198
199static __always_inline int
200bpf_probe_read_user_str_common(void *dst, u32 size,
201 const void __user *unsafe_ptr)
202{
203 int ret;
204
205 /*
206 * NB: We rely on strncpy_from_user() not copying junk past the NUL
207 * terminator into `dst`.
208 *
209 * strncpy_from_user() does long-sized strides in the fast path. If the
210 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
211 * then there could be junk after the NUL in `dst`. If user takes `dst`
212 * and keys a hash map with it, then semantically identical strings can
213 * occupy multiple entries in the map.
214 */
215 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
216 if (unlikely(ret < 0))
217 memset(dst, 0, size);
218 return ret;
219}
220
221BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
222 const void __user *, unsafe_ptr)
223{
224 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
225}
226
227const struct bpf_func_proto bpf_probe_read_user_str_proto = {
228 .func = bpf_probe_read_user_str,
229 .gpl_only = true,
230 .ret_type = RET_INTEGER,
231 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
232 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
233 .arg3_type = ARG_ANYTHING,
234};
235
236BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
237 const void *, unsafe_ptr)
238{
239 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
240}
241
242const struct bpf_func_proto bpf_probe_read_kernel_proto = {
243 .func = bpf_probe_read_kernel,
244 .gpl_only = true,
245 .ret_type = RET_INTEGER,
246 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
247 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
248 .arg3_type = ARG_ANYTHING,
249};
250
251static __always_inline int
252bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
253{
254 int ret;
255
256 /*
257 * The strncpy_from_kernel_nofault() call will likely not fill the
258 * entire buffer, but that's okay in this circumstance as we're probing
259 * arbitrary memory anyway similar to bpf_probe_read_*() and might
260 * as well probe the stack. Thus, memory is explicitly cleared
261 * only in error case, so that improper users ignoring return
262 * code altogether don't copy garbage; otherwise length of string
263 * is returned that can be used for bpf_perf_event_output() et al.
264 */
265 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
266 if (unlikely(ret < 0))
267 memset(dst, 0, size);
268 return ret;
269}
270
271BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
272 const void *, unsafe_ptr)
273{
274 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
275}
276
277const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
278 .func = bpf_probe_read_kernel_str,
279 .gpl_only = true,
280 .ret_type = RET_INTEGER,
281 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
282 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
283 .arg3_type = ARG_ANYTHING,
284};
285
286#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
287BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
288 const void *, unsafe_ptr)
289{
290 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
291 return bpf_probe_read_user_common(dst, size,
292 (__force void __user *)unsafe_ptr);
293 }
294 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
295}
296
297static const struct bpf_func_proto bpf_probe_read_compat_proto = {
298 .func = bpf_probe_read_compat,
299 .gpl_only = true,
300 .ret_type = RET_INTEGER,
301 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
302 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
303 .arg3_type = ARG_ANYTHING,
304};
305
306BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
307 const void *, unsafe_ptr)
308{
309 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
310 return bpf_probe_read_user_str_common(dst, size,
311 (__force void __user *)unsafe_ptr);
312 }
313 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
314}
315
316static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
317 .func = bpf_probe_read_compat_str,
318 .gpl_only = true,
319 .ret_type = RET_INTEGER,
320 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
321 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
322 .arg3_type = ARG_ANYTHING,
323};
324#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
325
326BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
327 u32, size)
328{
329 /*
330 * Ensure we're in user context which is safe for the helper to
331 * run. This helper has no business in a kthread.
332 *
333 * access_ok() should prevent writing to non-user memory, but in
334 * some situations (nommu, temporary switch, etc) access_ok() does
335 * not provide enough validation, hence the check on KERNEL_DS.
336 *
337 * nmi_uaccess_okay() ensures the probe is not run in an interim
338 * state, when the task or mm are switched. This is specifically
339 * required to prevent the use of temporary mm.
340 */
341
342 if (unlikely(in_interrupt() ||
343 current->flags & (PF_KTHREAD | PF_EXITING)))
344 return -EPERM;
345 if (unlikely(!nmi_uaccess_okay()))
346 return -EPERM;
347
348 return copy_to_user_nofault(unsafe_ptr, src, size);
349}
350
351static const struct bpf_func_proto bpf_probe_write_user_proto = {
352 .func = bpf_probe_write_user,
353 .gpl_only = true,
354 .ret_type = RET_INTEGER,
355 .arg1_type = ARG_ANYTHING,
356 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
357 .arg3_type = ARG_CONST_SIZE,
358};
359
360static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
361{
362 if (!capable(CAP_SYS_ADMIN))
363 return NULL;
364
365 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
366 current->comm, task_pid_nr(current));
367
368 return &bpf_probe_write_user_proto;
369}
370
371#define MAX_TRACE_PRINTK_VARARGS 3
372#define BPF_TRACE_PRINTK_SIZE 1024
373
374BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
375 u64, arg2, u64, arg3)
376{
377 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
378 struct bpf_bprintf_data data = {
379 .get_bin_args = true,
380 .get_buf = true,
381 };
382 int ret;
383
384 ret = bpf_bprintf_prepare(fmt, fmt_size, args,
385 MAX_TRACE_PRINTK_VARARGS, &data);
386 if (ret < 0)
387 return ret;
388
389 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
390
391 trace_bpf_trace_printk(data.buf);
392
393 bpf_bprintf_cleanup(&data);
394
395 return ret;
396}
397
398static const struct bpf_func_proto bpf_trace_printk_proto = {
399 .func = bpf_trace_printk,
400 .gpl_only = true,
401 .ret_type = RET_INTEGER,
402 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
403 .arg2_type = ARG_CONST_SIZE,
404};
405
406static void __set_printk_clr_event(void)
407{
408 /*
409 * This program might be calling bpf_trace_printk,
410 * so enable the associated bpf_trace/bpf_trace_printk event.
411 * Repeat this each time as it is possible a user has
412 * disabled bpf_trace_printk events. By loading a program
413 * calling bpf_trace_printk() however the user has expressed
414 * the intent to see such events.
415 */
416 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
417 pr_warn_ratelimited("could not enable bpf_trace_printk events");
418}
419
420const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
421{
422 __set_printk_clr_event();
423 return &bpf_trace_printk_proto;
424}
425
426BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
427 u32, data_len)
428{
429 struct bpf_bprintf_data data = {
430 .get_bin_args = true,
431 .get_buf = true,
432 };
433 int ret, num_args;
434
435 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
436 (data_len && !args))
437 return -EINVAL;
438 num_args = data_len / 8;
439
440 ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
441 if (ret < 0)
442 return ret;
443
444 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
445
446 trace_bpf_trace_printk(data.buf);
447
448 bpf_bprintf_cleanup(&data);
449
450 return ret;
451}
452
453static const struct bpf_func_proto bpf_trace_vprintk_proto = {
454 .func = bpf_trace_vprintk,
455 .gpl_only = true,
456 .ret_type = RET_INTEGER,
457 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
458 .arg2_type = ARG_CONST_SIZE,
459 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
460 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
461};
462
463const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
464{
465 __set_printk_clr_event();
466 return &bpf_trace_vprintk_proto;
467}
468
469BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
470 const void *, args, u32, data_len)
471{
472 struct bpf_bprintf_data data = {
473 .get_bin_args = true,
474 };
475 int err, num_args;
476
477 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
478 (data_len && !args))
479 return -EINVAL;
480 num_args = data_len / 8;
481
482 err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
483 if (err < 0)
484 return err;
485
486 seq_bprintf(m, fmt, data.bin_args);
487
488 bpf_bprintf_cleanup(&data);
489
490 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
491}
492
493BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
494
495static const struct bpf_func_proto bpf_seq_printf_proto = {
496 .func = bpf_seq_printf,
497 .gpl_only = true,
498 .ret_type = RET_INTEGER,
499 .arg1_type = ARG_PTR_TO_BTF_ID,
500 .arg1_btf_id = &btf_seq_file_ids[0],
501 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
502 .arg3_type = ARG_CONST_SIZE,
503 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
504 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
505};
506
507BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
508{
509 return seq_write(m, data, len) ? -EOVERFLOW : 0;
510}
511
512static const struct bpf_func_proto bpf_seq_write_proto = {
513 .func = bpf_seq_write,
514 .gpl_only = true,
515 .ret_type = RET_INTEGER,
516 .arg1_type = ARG_PTR_TO_BTF_ID,
517 .arg1_btf_id = &btf_seq_file_ids[0],
518 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
519 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
520};
521
522BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
523 u32, btf_ptr_size, u64, flags)
524{
525 const struct btf *btf;
526 s32 btf_id;
527 int ret;
528
529 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
530 if (ret)
531 return ret;
532
533 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
534}
535
536static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
537 .func = bpf_seq_printf_btf,
538 .gpl_only = true,
539 .ret_type = RET_INTEGER,
540 .arg1_type = ARG_PTR_TO_BTF_ID,
541 .arg1_btf_id = &btf_seq_file_ids[0],
542 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
543 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
544 .arg4_type = ARG_ANYTHING,
545};
546
547static __always_inline int
548get_map_perf_counter(struct bpf_map *map, u64 flags,
549 u64 *value, u64 *enabled, u64 *running)
550{
551 struct bpf_array *array = container_of(map, struct bpf_array, map);
552 unsigned int cpu = smp_processor_id();
553 u64 index = flags & BPF_F_INDEX_MASK;
554 struct bpf_event_entry *ee;
555
556 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
557 return -EINVAL;
558 if (index == BPF_F_CURRENT_CPU)
559 index = cpu;
560 if (unlikely(index >= array->map.max_entries))
561 return -E2BIG;
562
563 ee = READ_ONCE(array->ptrs[index]);
564 if (!ee)
565 return -ENOENT;
566
567 return perf_event_read_local(ee->event, value, enabled, running);
568}
569
570BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
571{
572 u64 value = 0;
573 int err;
574
575 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
576 /*
577 * this api is ugly since we miss [-22..-2] range of valid
578 * counter values, but that's uapi
579 */
580 if (err)
581 return err;
582 return value;
583}
584
585static const struct bpf_func_proto bpf_perf_event_read_proto = {
586 .func = bpf_perf_event_read,
587 .gpl_only = true,
588 .ret_type = RET_INTEGER,
589 .arg1_type = ARG_CONST_MAP_PTR,
590 .arg2_type = ARG_ANYTHING,
591};
592
593BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
594 struct bpf_perf_event_value *, buf, u32, size)
595{
596 int err = -EINVAL;
597
598 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
599 goto clear;
600 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
601 &buf->running);
602 if (unlikely(err))
603 goto clear;
604 return 0;
605clear:
606 memset(buf, 0, size);
607 return err;
608}
609
610static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
611 .func = bpf_perf_event_read_value,
612 .gpl_only = true,
613 .ret_type = RET_INTEGER,
614 .arg1_type = ARG_CONST_MAP_PTR,
615 .arg2_type = ARG_ANYTHING,
616 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
617 .arg4_type = ARG_CONST_SIZE,
618};
619
620static __always_inline u64
621__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
622 u64 flags, struct perf_raw_record *raw,
623 struct perf_sample_data *sd)
624{
625 struct bpf_array *array = container_of(map, struct bpf_array, map);
626 unsigned int cpu = smp_processor_id();
627 u64 index = flags & BPF_F_INDEX_MASK;
628 struct bpf_event_entry *ee;
629 struct perf_event *event;
630
631 if (index == BPF_F_CURRENT_CPU)
632 index = cpu;
633 if (unlikely(index >= array->map.max_entries))
634 return -E2BIG;
635
636 ee = READ_ONCE(array->ptrs[index]);
637 if (!ee)
638 return -ENOENT;
639
640 event = ee->event;
641 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
642 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
643 return -EINVAL;
644
645 if (unlikely(event->oncpu != cpu))
646 return -EOPNOTSUPP;
647
648 perf_sample_save_raw_data(sd, event, raw);
649
650 return perf_event_output(event, sd, regs);
651}
652
653/*
654 * Support executing tracepoints in normal, irq, and nmi context that each call
655 * bpf_perf_event_output
656 */
657struct bpf_trace_sample_data {
658 struct perf_sample_data sds[3];
659};
660
661static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
662static DEFINE_PER_CPU(int, bpf_trace_nest_level);
663BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
664 u64, flags, void *, data, u64, size)
665{
666 struct bpf_trace_sample_data *sds;
667 struct perf_raw_record raw = {
668 .frag = {
669 .size = size,
670 .data = data,
671 },
672 };
673 struct perf_sample_data *sd;
674 int nest_level, err;
675
676 preempt_disable();
677 sds = this_cpu_ptr(&bpf_trace_sds);
678 nest_level = this_cpu_inc_return(bpf_trace_nest_level);
679
680 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
681 err = -EBUSY;
682 goto out;
683 }
684
685 sd = &sds->sds[nest_level - 1];
686
687 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
688 err = -EINVAL;
689 goto out;
690 }
691
692 perf_sample_data_init(sd, 0, 0);
693
694 err = __bpf_perf_event_output(regs, map, flags, &raw, sd);
695out:
696 this_cpu_dec(bpf_trace_nest_level);
697 preempt_enable();
698 return err;
699}
700
701static const struct bpf_func_proto bpf_perf_event_output_proto = {
702 .func = bpf_perf_event_output,
703 .gpl_only = true,
704 .ret_type = RET_INTEGER,
705 .arg1_type = ARG_PTR_TO_CTX,
706 .arg2_type = ARG_CONST_MAP_PTR,
707 .arg3_type = ARG_ANYTHING,
708 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
709 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
710};
711
712static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
713struct bpf_nested_pt_regs {
714 struct pt_regs regs[3];
715};
716static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
717static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
718
719u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
720 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
721{
722 struct perf_raw_frag frag = {
723 .copy = ctx_copy,
724 .size = ctx_size,
725 .data = ctx,
726 };
727 struct perf_raw_record raw = {
728 .frag = {
729 {
730 .next = ctx_size ? &frag : NULL,
731 },
732 .size = meta_size,
733 .data = meta,
734 },
735 };
736 struct perf_sample_data *sd;
737 struct pt_regs *regs;
738 int nest_level;
739 u64 ret;
740
741 preempt_disable();
742 nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
743
744 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
745 ret = -EBUSY;
746 goto out;
747 }
748 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
749 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
750
751 perf_fetch_caller_regs(regs);
752 perf_sample_data_init(sd, 0, 0);
753
754 ret = __bpf_perf_event_output(regs, map, flags, &raw, sd);
755out:
756 this_cpu_dec(bpf_event_output_nest_level);
757 preempt_enable();
758 return ret;
759}
760
761BPF_CALL_0(bpf_get_current_task)
762{
763 return (long) current;
764}
765
766const struct bpf_func_proto bpf_get_current_task_proto = {
767 .func = bpf_get_current_task,
768 .gpl_only = true,
769 .ret_type = RET_INTEGER,
770};
771
772BPF_CALL_0(bpf_get_current_task_btf)
773{
774 return (unsigned long) current;
775}
776
777const struct bpf_func_proto bpf_get_current_task_btf_proto = {
778 .func = bpf_get_current_task_btf,
779 .gpl_only = true,
780 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED,
781 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
782};
783
784BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
785{
786 return (unsigned long) task_pt_regs(task);
787}
788
789BTF_ID_LIST(bpf_task_pt_regs_ids)
790BTF_ID(struct, pt_regs)
791
792const struct bpf_func_proto bpf_task_pt_regs_proto = {
793 .func = bpf_task_pt_regs,
794 .gpl_only = true,
795 .arg1_type = ARG_PTR_TO_BTF_ID,
796 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
797 .ret_type = RET_PTR_TO_BTF_ID,
798 .ret_btf_id = &bpf_task_pt_regs_ids[0],
799};
800
801struct send_signal_irq_work {
802 struct irq_work irq_work;
803 struct task_struct *task;
804 u32 sig;
805 enum pid_type type;
806 bool has_siginfo;
807 struct kernel_siginfo info;
808};
809
810static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
811
812static void do_bpf_send_signal(struct irq_work *entry)
813{
814 struct send_signal_irq_work *work;
815 struct kernel_siginfo *siginfo;
816
817 work = container_of(entry, struct send_signal_irq_work, irq_work);
818 siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV;
819
820 group_send_sig_info(work->sig, siginfo, work->task, work->type);
821 put_task_struct(work->task);
822}
823
824static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value)
825{
826 struct send_signal_irq_work *work = NULL;
827 struct kernel_siginfo info;
828 struct kernel_siginfo *siginfo;
829
830 if (!task) {
831 task = current;
832 siginfo = SEND_SIG_PRIV;
833 } else {
834 clear_siginfo(&info);
835 info.si_signo = sig;
836 info.si_errno = 0;
837 info.si_code = SI_KERNEL;
838 info.si_pid = 0;
839 info.si_uid = 0;
840 info.si_value.sival_ptr = (void *)(unsigned long)value;
841 siginfo = &info;
842 }
843
844 /* Similar to bpf_probe_write_user, task needs to be
845 * in a sound condition and kernel memory access be
846 * permitted in order to send signal to the current
847 * task.
848 */
849 if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING)))
850 return -EPERM;
851 if (unlikely(!nmi_uaccess_okay()))
852 return -EPERM;
853 /* Task should not be pid=1 to avoid kernel panic. */
854 if (unlikely(is_global_init(task)))
855 return -EPERM;
856
857 if (!preemptible()) {
858 /* Do an early check on signal validity. Otherwise,
859 * the error is lost in deferred irq_work.
860 */
861 if (unlikely(!valid_signal(sig)))
862 return -EINVAL;
863
864 work = this_cpu_ptr(&send_signal_work);
865 if (irq_work_is_busy(&work->irq_work))
866 return -EBUSY;
867
868 /* Add the current task, which is the target of sending signal,
869 * to the irq_work. The current task may change when queued
870 * irq works get executed.
871 */
872 work->task = get_task_struct(task);
873 work->has_siginfo = siginfo == &info;
874 if (work->has_siginfo)
875 copy_siginfo(&work->info, &info);
876 work->sig = sig;
877 work->type = type;
878 irq_work_queue(&work->irq_work);
879 return 0;
880 }
881
882 return group_send_sig_info(sig, siginfo, task, type);
883}
884
885BPF_CALL_1(bpf_send_signal, u32, sig)
886{
887 return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0);
888}
889
890static const struct bpf_func_proto bpf_send_signal_proto = {
891 .func = bpf_send_signal,
892 .gpl_only = false,
893 .ret_type = RET_INTEGER,
894 .arg1_type = ARG_ANYTHING,
895};
896
897BPF_CALL_1(bpf_send_signal_thread, u32, sig)
898{
899 return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0);
900}
901
902static const struct bpf_func_proto bpf_send_signal_thread_proto = {
903 .func = bpf_send_signal_thread,
904 .gpl_only = false,
905 .ret_type = RET_INTEGER,
906 .arg1_type = ARG_ANYTHING,
907};
908
909BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
910{
911 struct path copy;
912 long len;
913 char *p;
914
915 if (!sz)
916 return 0;
917
918 /*
919 * The path pointer is verified as trusted and safe to use,
920 * but let's double check it's valid anyway to workaround
921 * potentially broken verifier.
922 */
923 len = copy_from_kernel_nofault(©, path, sizeof(*path));
924 if (len < 0)
925 return len;
926
927 p = d_path(©, buf, sz);
928 if (IS_ERR(p)) {
929 len = PTR_ERR(p);
930 } else {
931 len = buf + sz - p;
932 memmove(buf, p, len);
933 }
934
935 return len;
936}
937
938BTF_SET_START(btf_allowlist_d_path)
939#ifdef CONFIG_SECURITY
940BTF_ID(func, security_file_permission)
941BTF_ID(func, security_inode_getattr)
942BTF_ID(func, security_file_open)
943#endif
944#ifdef CONFIG_SECURITY_PATH
945BTF_ID(func, security_path_truncate)
946#endif
947BTF_ID(func, vfs_truncate)
948BTF_ID(func, vfs_fallocate)
949BTF_ID(func, dentry_open)
950BTF_ID(func, vfs_getattr)
951BTF_ID(func, filp_close)
952BTF_SET_END(btf_allowlist_d_path)
953
954static bool bpf_d_path_allowed(const struct bpf_prog *prog)
955{
956 if (prog->type == BPF_PROG_TYPE_TRACING &&
957 prog->expected_attach_type == BPF_TRACE_ITER)
958 return true;
959
960 if (prog->type == BPF_PROG_TYPE_LSM)
961 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
962
963 return btf_id_set_contains(&btf_allowlist_d_path,
964 prog->aux->attach_btf_id);
965}
966
967BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
968
969static const struct bpf_func_proto bpf_d_path_proto = {
970 .func = bpf_d_path,
971 .gpl_only = false,
972 .ret_type = RET_INTEGER,
973 .arg1_type = ARG_PTR_TO_BTF_ID,
974 .arg1_btf_id = &bpf_d_path_btf_ids[0],
975 .arg2_type = ARG_PTR_TO_MEM,
976 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
977 .allowed = bpf_d_path_allowed,
978};
979
980#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
981 BTF_F_PTR_RAW | BTF_F_ZERO)
982
983static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
984 u64 flags, const struct btf **btf,
985 s32 *btf_id)
986{
987 const struct btf_type *t;
988
989 if (unlikely(flags & ~(BTF_F_ALL)))
990 return -EINVAL;
991
992 if (btf_ptr_size != sizeof(struct btf_ptr))
993 return -EINVAL;
994
995 *btf = bpf_get_btf_vmlinux();
996
997 if (IS_ERR_OR_NULL(*btf))
998 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
999
1000 if (ptr->type_id > 0)
1001 *btf_id = ptr->type_id;
1002 else
1003 return -EINVAL;
1004
1005 if (*btf_id > 0)
1006 t = btf_type_by_id(*btf, *btf_id);
1007 if (*btf_id <= 0 || !t)
1008 return -ENOENT;
1009
1010 return 0;
1011}
1012
1013BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1014 u32, btf_ptr_size, u64, flags)
1015{
1016 const struct btf *btf;
1017 s32 btf_id;
1018 int ret;
1019
1020 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1021 if (ret)
1022 return ret;
1023
1024 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1025 flags);
1026}
1027
1028const struct bpf_func_proto bpf_snprintf_btf_proto = {
1029 .func = bpf_snprintf_btf,
1030 .gpl_only = false,
1031 .ret_type = RET_INTEGER,
1032 .arg1_type = ARG_PTR_TO_MEM,
1033 .arg2_type = ARG_CONST_SIZE,
1034 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1035 .arg4_type = ARG_CONST_SIZE,
1036 .arg5_type = ARG_ANYTHING,
1037};
1038
1039BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1040{
1041 /* This helper call is inlined by verifier. */
1042 return ((u64 *)ctx)[-2];
1043}
1044
1045static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1046 .func = bpf_get_func_ip_tracing,
1047 .gpl_only = true,
1048 .ret_type = RET_INTEGER,
1049 .arg1_type = ARG_PTR_TO_CTX,
1050};
1051
1052#ifdef CONFIG_X86_KERNEL_IBT
1053static unsigned long get_entry_ip(unsigned long fentry_ip)
1054{
1055 u32 instr;
1056
1057 /* We want to be extra safe in case entry ip is on the page edge,
1058 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
1059 */
1060 if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
1061 if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
1062 return fentry_ip;
1063 } else {
1064 instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
1065 }
1066 if (is_endbr(instr))
1067 fentry_ip -= ENDBR_INSN_SIZE;
1068 return fentry_ip;
1069}
1070#else
1071#define get_entry_ip(fentry_ip) fentry_ip
1072#endif
1073
1074BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1075{
1076 struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1077 struct kprobe *kp;
1078
1079#ifdef CONFIG_UPROBES
1080 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1081 if (run_ctx->is_uprobe)
1082 return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1083#endif
1084
1085 kp = kprobe_running();
1086
1087 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1088 return 0;
1089
1090 return get_entry_ip((uintptr_t)kp->addr);
1091}
1092
1093static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1094 .func = bpf_get_func_ip_kprobe,
1095 .gpl_only = true,
1096 .ret_type = RET_INTEGER,
1097 .arg1_type = ARG_PTR_TO_CTX,
1098};
1099
1100BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1101{
1102 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1103}
1104
1105static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1106 .func = bpf_get_func_ip_kprobe_multi,
1107 .gpl_only = false,
1108 .ret_type = RET_INTEGER,
1109 .arg1_type = ARG_PTR_TO_CTX,
1110};
1111
1112BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1113{
1114 return bpf_kprobe_multi_cookie(current->bpf_ctx);
1115}
1116
1117static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1118 .func = bpf_get_attach_cookie_kprobe_multi,
1119 .gpl_only = false,
1120 .ret_type = RET_INTEGER,
1121 .arg1_type = ARG_PTR_TO_CTX,
1122};
1123
1124BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1125{
1126 return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1127}
1128
1129static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1130 .func = bpf_get_func_ip_uprobe_multi,
1131 .gpl_only = false,
1132 .ret_type = RET_INTEGER,
1133 .arg1_type = ARG_PTR_TO_CTX,
1134};
1135
1136BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1137{
1138 return bpf_uprobe_multi_cookie(current->bpf_ctx);
1139}
1140
1141static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1142 .func = bpf_get_attach_cookie_uprobe_multi,
1143 .gpl_only = false,
1144 .ret_type = RET_INTEGER,
1145 .arg1_type = ARG_PTR_TO_CTX,
1146};
1147
1148BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1149{
1150 struct bpf_trace_run_ctx *run_ctx;
1151
1152 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1153 return run_ctx->bpf_cookie;
1154}
1155
1156static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1157 .func = bpf_get_attach_cookie_trace,
1158 .gpl_only = false,
1159 .ret_type = RET_INTEGER,
1160 .arg1_type = ARG_PTR_TO_CTX,
1161};
1162
1163BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1164{
1165 return ctx->event->bpf_cookie;
1166}
1167
1168static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1169 .func = bpf_get_attach_cookie_pe,
1170 .gpl_only = false,
1171 .ret_type = RET_INTEGER,
1172 .arg1_type = ARG_PTR_TO_CTX,
1173};
1174
1175BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1176{
1177 struct bpf_trace_run_ctx *run_ctx;
1178
1179 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1180 return run_ctx->bpf_cookie;
1181}
1182
1183static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1184 .func = bpf_get_attach_cookie_tracing,
1185 .gpl_only = false,
1186 .ret_type = RET_INTEGER,
1187 .arg1_type = ARG_PTR_TO_CTX,
1188};
1189
1190BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1191{
1192 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1193 u32 entry_cnt = size / br_entry_size;
1194
1195 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1196
1197 if (unlikely(flags))
1198 return -EINVAL;
1199
1200 if (!entry_cnt)
1201 return -ENOENT;
1202
1203 return entry_cnt * br_entry_size;
1204}
1205
1206static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1207 .func = bpf_get_branch_snapshot,
1208 .gpl_only = true,
1209 .ret_type = RET_INTEGER,
1210 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1211 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1212};
1213
1214BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1215{
1216 /* This helper call is inlined by verifier. */
1217 u64 nr_args = ((u64 *)ctx)[-1];
1218
1219 if ((u64) n >= nr_args)
1220 return -EINVAL;
1221 *value = ((u64 *)ctx)[n];
1222 return 0;
1223}
1224
1225static const struct bpf_func_proto bpf_get_func_arg_proto = {
1226 .func = get_func_arg,
1227 .ret_type = RET_INTEGER,
1228 .arg1_type = ARG_PTR_TO_CTX,
1229 .arg2_type = ARG_ANYTHING,
1230 .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
1231 .arg3_size = sizeof(u64),
1232};
1233
1234BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1235{
1236 /* This helper call is inlined by verifier. */
1237 u64 nr_args = ((u64 *)ctx)[-1];
1238
1239 *value = ((u64 *)ctx)[nr_args];
1240 return 0;
1241}
1242
1243static const struct bpf_func_proto bpf_get_func_ret_proto = {
1244 .func = get_func_ret,
1245 .ret_type = RET_INTEGER,
1246 .arg1_type = ARG_PTR_TO_CTX,
1247 .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
1248 .arg2_size = sizeof(u64),
1249};
1250
1251BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1252{
1253 /* This helper call is inlined by verifier. */
1254 return ((u64 *)ctx)[-1];
1255}
1256
1257static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1258 .func = get_func_arg_cnt,
1259 .ret_type = RET_INTEGER,
1260 .arg1_type = ARG_PTR_TO_CTX,
1261};
1262
1263#ifdef CONFIG_KEYS
1264__bpf_kfunc_start_defs();
1265
1266/**
1267 * bpf_lookup_user_key - lookup a key by its serial
1268 * @serial: key handle serial number
1269 * @flags: lookup-specific flags
1270 *
1271 * Search a key with a given *serial* and the provided *flags*.
1272 * If found, increment the reference count of the key by one, and
1273 * return it in the bpf_key structure.
1274 *
1275 * The bpf_key structure must be passed to bpf_key_put() when done
1276 * with it, so that the key reference count is decremented and the
1277 * bpf_key structure is freed.
1278 *
1279 * Permission checks are deferred to the time the key is used by
1280 * one of the available key-specific kfuncs.
1281 *
1282 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1283 * special keyring (e.g. session keyring), if it doesn't yet exist.
1284 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1285 * for the key construction, and to retrieve uninstantiated keys (keys
1286 * without data attached to them).
1287 *
1288 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1289 * NULL pointer otherwise.
1290 */
1291__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1292{
1293 key_ref_t key_ref;
1294 struct bpf_key *bkey;
1295
1296 if (flags & ~KEY_LOOKUP_ALL)
1297 return NULL;
1298
1299 /*
1300 * Permission check is deferred until the key is used, as the
1301 * intent of the caller is unknown here.
1302 */
1303 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1304 if (IS_ERR(key_ref))
1305 return NULL;
1306
1307 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1308 if (!bkey) {
1309 key_put(key_ref_to_ptr(key_ref));
1310 return NULL;
1311 }
1312
1313 bkey->key = key_ref_to_ptr(key_ref);
1314 bkey->has_ref = true;
1315
1316 return bkey;
1317}
1318
1319/**
1320 * bpf_lookup_system_key - lookup a key by a system-defined ID
1321 * @id: key ID
1322 *
1323 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1324 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1325 * attempting to decrement the key reference count on that pointer. The key
1326 * pointer set in such way is currently understood only by
1327 * verify_pkcs7_signature().
1328 *
1329 * Set *id* to one of the values defined in include/linux/verification.h:
1330 * 0 for the primary keyring (immutable keyring of system keys);
1331 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1332 * (where keys can be added only if they are vouched for by existing keys
1333 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1334 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1335 * kerned image and, possibly, the initramfs signature).
1336 *
1337 * Return: a bpf_key pointer with an invalid key pointer set from the
1338 * pre-determined ID on success, a NULL pointer otherwise
1339 */
1340__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1341{
1342 struct bpf_key *bkey;
1343
1344 if (system_keyring_id_check(id) < 0)
1345 return NULL;
1346
1347 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1348 if (!bkey)
1349 return NULL;
1350
1351 bkey->key = (struct key *)(unsigned long)id;
1352 bkey->has_ref = false;
1353
1354 return bkey;
1355}
1356
1357/**
1358 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1359 * @bkey: bpf_key structure
1360 *
1361 * Decrement the reference count of the key inside *bkey*, if the pointer
1362 * is valid, and free *bkey*.
1363 */
1364__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1365{
1366 if (bkey->has_ref)
1367 key_put(bkey->key);
1368
1369 kfree(bkey);
1370}
1371
1372#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1373/**
1374 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1375 * @data_p: data to verify
1376 * @sig_p: signature of the data
1377 * @trusted_keyring: keyring with keys trusted for signature verification
1378 *
1379 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1380 * with keys in a keyring referenced by *trusted_keyring*.
1381 *
1382 * Return: 0 on success, a negative value on error.
1383 */
1384__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
1385 struct bpf_dynptr *sig_p,
1386 struct bpf_key *trusted_keyring)
1387{
1388 struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
1389 struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
1390 const void *data, *sig;
1391 u32 data_len, sig_len;
1392 int ret;
1393
1394 if (trusted_keyring->has_ref) {
1395 /*
1396 * Do the permission check deferred in bpf_lookup_user_key().
1397 * See bpf_lookup_user_key() for more details.
1398 *
1399 * A call to key_task_permission() here would be redundant, as
1400 * it is already done by keyring_search() called by
1401 * find_asymmetric_key().
1402 */
1403 ret = key_validate(trusted_keyring->key);
1404 if (ret < 0)
1405 return ret;
1406 }
1407
1408 data_len = __bpf_dynptr_size(data_ptr);
1409 data = __bpf_dynptr_data(data_ptr, data_len);
1410 sig_len = __bpf_dynptr_size(sig_ptr);
1411 sig = __bpf_dynptr_data(sig_ptr, sig_len);
1412
1413 return verify_pkcs7_signature(data, data_len, sig, sig_len,
1414 trusted_keyring->key,
1415 VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1416 NULL);
1417}
1418#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1419
1420__bpf_kfunc_end_defs();
1421
1422BTF_KFUNCS_START(key_sig_kfunc_set)
1423BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1424BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1425BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1426#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1427BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1428#endif
1429BTF_KFUNCS_END(key_sig_kfunc_set)
1430
1431static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1432 .owner = THIS_MODULE,
1433 .set = &key_sig_kfunc_set,
1434};
1435
1436static int __init bpf_key_sig_kfuncs_init(void)
1437{
1438 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1439 &bpf_key_sig_kfunc_set);
1440}
1441
1442late_initcall(bpf_key_sig_kfuncs_init);
1443#endif /* CONFIG_KEYS */
1444
1445static const struct bpf_func_proto *
1446bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1447{
1448 switch (func_id) {
1449 case BPF_FUNC_map_lookup_elem:
1450 return &bpf_map_lookup_elem_proto;
1451 case BPF_FUNC_map_update_elem:
1452 return &bpf_map_update_elem_proto;
1453 case BPF_FUNC_map_delete_elem:
1454 return &bpf_map_delete_elem_proto;
1455 case BPF_FUNC_map_push_elem:
1456 return &bpf_map_push_elem_proto;
1457 case BPF_FUNC_map_pop_elem:
1458 return &bpf_map_pop_elem_proto;
1459 case BPF_FUNC_map_peek_elem:
1460 return &bpf_map_peek_elem_proto;
1461 case BPF_FUNC_map_lookup_percpu_elem:
1462 return &bpf_map_lookup_percpu_elem_proto;
1463 case BPF_FUNC_ktime_get_ns:
1464 return &bpf_ktime_get_ns_proto;
1465 case BPF_FUNC_ktime_get_boot_ns:
1466 return &bpf_ktime_get_boot_ns_proto;
1467 case BPF_FUNC_tail_call:
1468 return &bpf_tail_call_proto;
1469 case BPF_FUNC_get_current_task:
1470 return &bpf_get_current_task_proto;
1471 case BPF_FUNC_get_current_task_btf:
1472 return &bpf_get_current_task_btf_proto;
1473 case BPF_FUNC_task_pt_regs:
1474 return &bpf_task_pt_regs_proto;
1475 case BPF_FUNC_get_current_uid_gid:
1476 return &bpf_get_current_uid_gid_proto;
1477 case BPF_FUNC_get_current_comm:
1478 return &bpf_get_current_comm_proto;
1479 case BPF_FUNC_trace_printk:
1480 return bpf_get_trace_printk_proto();
1481 case BPF_FUNC_get_smp_processor_id:
1482 return &bpf_get_smp_processor_id_proto;
1483 case BPF_FUNC_get_numa_node_id:
1484 return &bpf_get_numa_node_id_proto;
1485 case BPF_FUNC_perf_event_read:
1486 return &bpf_perf_event_read_proto;
1487 case BPF_FUNC_get_prandom_u32:
1488 return &bpf_get_prandom_u32_proto;
1489 case BPF_FUNC_probe_write_user:
1490 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1491 NULL : bpf_get_probe_write_proto();
1492 case BPF_FUNC_probe_read_user:
1493 return &bpf_probe_read_user_proto;
1494 case BPF_FUNC_probe_read_kernel:
1495 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1496 NULL : &bpf_probe_read_kernel_proto;
1497 case BPF_FUNC_probe_read_user_str:
1498 return &bpf_probe_read_user_str_proto;
1499 case BPF_FUNC_probe_read_kernel_str:
1500 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1501 NULL : &bpf_probe_read_kernel_str_proto;
1502#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1503 case BPF_FUNC_probe_read:
1504 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1505 NULL : &bpf_probe_read_compat_proto;
1506 case BPF_FUNC_probe_read_str:
1507 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1508 NULL : &bpf_probe_read_compat_str_proto;
1509#endif
1510#ifdef CONFIG_CGROUPS
1511 case BPF_FUNC_cgrp_storage_get:
1512 return &bpf_cgrp_storage_get_proto;
1513 case BPF_FUNC_cgrp_storage_delete:
1514 return &bpf_cgrp_storage_delete_proto;
1515 case BPF_FUNC_current_task_under_cgroup:
1516 return &bpf_current_task_under_cgroup_proto;
1517#endif
1518 case BPF_FUNC_send_signal:
1519 return &bpf_send_signal_proto;
1520 case BPF_FUNC_send_signal_thread:
1521 return &bpf_send_signal_thread_proto;
1522 case BPF_FUNC_perf_event_read_value:
1523 return &bpf_perf_event_read_value_proto;
1524 case BPF_FUNC_ringbuf_output:
1525 return &bpf_ringbuf_output_proto;
1526 case BPF_FUNC_ringbuf_reserve:
1527 return &bpf_ringbuf_reserve_proto;
1528 case BPF_FUNC_ringbuf_submit:
1529 return &bpf_ringbuf_submit_proto;
1530 case BPF_FUNC_ringbuf_discard:
1531 return &bpf_ringbuf_discard_proto;
1532 case BPF_FUNC_ringbuf_query:
1533 return &bpf_ringbuf_query_proto;
1534 case BPF_FUNC_jiffies64:
1535 return &bpf_jiffies64_proto;
1536 case BPF_FUNC_get_task_stack:
1537 return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
1538 : &bpf_get_task_stack_proto;
1539 case BPF_FUNC_copy_from_user:
1540 return &bpf_copy_from_user_proto;
1541 case BPF_FUNC_copy_from_user_task:
1542 return &bpf_copy_from_user_task_proto;
1543 case BPF_FUNC_snprintf_btf:
1544 return &bpf_snprintf_btf_proto;
1545 case BPF_FUNC_per_cpu_ptr:
1546 return &bpf_per_cpu_ptr_proto;
1547 case BPF_FUNC_this_cpu_ptr:
1548 return &bpf_this_cpu_ptr_proto;
1549 case BPF_FUNC_task_storage_get:
1550 if (bpf_prog_check_recur(prog))
1551 return &bpf_task_storage_get_recur_proto;
1552 return &bpf_task_storage_get_proto;
1553 case BPF_FUNC_task_storage_delete:
1554 if (bpf_prog_check_recur(prog))
1555 return &bpf_task_storage_delete_recur_proto;
1556 return &bpf_task_storage_delete_proto;
1557 case BPF_FUNC_for_each_map_elem:
1558 return &bpf_for_each_map_elem_proto;
1559 case BPF_FUNC_snprintf:
1560 return &bpf_snprintf_proto;
1561 case BPF_FUNC_get_func_ip:
1562 return &bpf_get_func_ip_proto_tracing;
1563 case BPF_FUNC_get_branch_snapshot:
1564 return &bpf_get_branch_snapshot_proto;
1565 case BPF_FUNC_find_vma:
1566 return &bpf_find_vma_proto;
1567 case BPF_FUNC_trace_vprintk:
1568 return bpf_get_trace_vprintk_proto();
1569 default:
1570 return bpf_base_func_proto(func_id, prog);
1571 }
1572}
1573
1574static bool is_kprobe_multi(const struct bpf_prog *prog)
1575{
1576 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ||
1577 prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1578}
1579
1580static inline bool is_kprobe_session(const struct bpf_prog *prog)
1581{
1582 return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1583}
1584
1585static inline bool is_uprobe_multi(const struct bpf_prog *prog)
1586{
1587 return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI ||
1588 prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1589}
1590
1591static inline bool is_uprobe_session(const struct bpf_prog *prog)
1592{
1593 return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1594}
1595
1596static const struct bpf_func_proto *
1597kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1598{
1599 switch (func_id) {
1600 case BPF_FUNC_perf_event_output:
1601 return &bpf_perf_event_output_proto;
1602 case BPF_FUNC_get_stackid:
1603 return &bpf_get_stackid_proto;
1604 case BPF_FUNC_get_stack:
1605 return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto;
1606#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1607 case BPF_FUNC_override_return:
1608 return &bpf_override_return_proto;
1609#endif
1610 case BPF_FUNC_get_func_ip:
1611 if (is_kprobe_multi(prog))
1612 return &bpf_get_func_ip_proto_kprobe_multi;
1613 if (is_uprobe_multi(prog))
1614 return &bpf_get_func_ip_proto_uprobe_multi;
1615 return &bpf_get_func_ip_proto_kprobe;
1616 case BPF_FUNC_get_attach_cookie:
1617 if (is_kprobe_multi(prog))
1618 return &bpf_get_attach_cookie_proto_kmulti;
1619 if (is_uprobe_multi(prog))
1620 return &bpf_get_attach_cookie_proto_umulti;
1621 return &bpf_get_attach_cookie_proto_trace;
1622 default:
1623 return bpf_tracing_func_proto(func_id, prog);
1624 }
1625}
1626
1627/* bpf+kprobe programs can access fields of 'struct pt_regs' */
1628static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1629 const struct bpf_prog *prog,
1630 struct bpf_insn_access_aux *info)
1631{
1632 if (off < 0 || off >= sizeof(struct pt_regs))
1633 return false;
1634 if (type != BPF_READ)
1635 return false;
1636 if (off % size != 0)
1637 return false;
1638 /*
1639 * Assertion for 32 bit to make sure last 8 byte access
1640 * (BPF_DW) to the last 4 byte member is disallowed.
1641 */
1642 if (off + size > sizeof(struct pt_regs))
1643 return false;
1644
1645 return true;
1646}
1647
1648const struct bpf_verifier_ops kprobe_verifier_ops = {
1649 .get_func_proto = kprobe_prog_func_proto,
1650 .is_valid_access = kprobe_prog_is_valid_access,
1651};
1652
1653const struct bpf_prog_ops kprobe_prog_ops = {
1654};
1655
1656BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1657 u64, flags, void *, data, u64, size)
1658{
1659 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1660
1661 /*
1662 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1663 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1664 * from there and call the same bpf_perf_event_output() helper inline.
1665 */
1666 return ____bpf_perf_event_output(regs, map, flags, data, size);
1667}
1668
1669static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1670 .func = bpf_perf_event_output_tp,
1671 .gpl_only = true,
1672 .ret_type = RET_INTEGER,
1673 .arg1_type = ARG_PTR_TO_CTX,
1674 .arg2_type = ARG_CONST_MAP_PTR,
1675 .arg3_type = ARG_ANYTHING,
1676 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1677 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1678};
1679
1680BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1681 u64, flags)
1682{
1683 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1684
1685 /*
1686 * Same comment as in bpf_perf_event_output_tp(), only that this time
1687 * the other helper's function body cannot be inlined due to being
1688 * external, thus we need to call raw helper function.
1689 */
1690 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1691 flags, 0, 0);
1692}
1693
1694static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1695 .func = bpf_get_stackid_tp,
1696 .gpl_only = true,
1697 .ret_type = RET_INTEGER,
1698 .arg1_type = ARG_PTR_TO_CTX,
1699 .arg2_type = ARG_CONST_MAP_PTR,
1700 .arg3_type = ARG_ANYTHING,
1701};
1702
1703BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1704 u64, flags)
1705{
1706 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1707
1708 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1709 (unsigned long) size, flags, 0);
1710}
1711
1712static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1713 .func = bpf_get_stack_tp,
1714 .gpl_only = true,
1715 .ret_type = RET_INTEGER,
1716 .arg1_type = ARG_PTR_TO_CTX,
1717 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1718 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1719 .arg4_type = ARG_ANYTHING,
1720};
1721
1722static const struct bpf_func_proto *
1723tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1724{
1725 switch (func_id) {
1726 case BPF_FUNC_perf_event_output:
1727 return &bpf_perf_event_output_proto_tp;
1728 case BPF_FUNC_get_stackid:
1729 return &bpf_get_stackid_proto_tp;
1730 case BPF_FUNC_get_stack:
1731 return &bpf_get_stack_proto_tp;
1732 case BPF_FUNC_get_attach_cookie:
1733 return &bpf_get_attach_cookie_proto_trace;
1734 default:
1735 return bpf_tracing_func_proto(func_id, prog);
1736 }
1737}
1738
1739static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1740 const struct bpf_prog *prog,
1741 struct bpf_insn_access_aux *info)
1742{
1743 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1744 return false;
1745 if (type != BPF_READ)
1746 return false;
1747 if (off % size != 0)
1748 return false;
1749
1750 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1751 return true;
1752}
1753
1754const struct bpf_verifier_ops tracepoint_verifier_ops = {
1755 .get_func_proto = tp_prog_func_proto,
1756 .is_valid_access = tp_prog_is_valid_access,
1757};
1758
1759const struct bpf_prog_ops tracepoint_prog_ops = {
1760};
1761
1762BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1763 struct bpf_perf_event_value *, buf, u32, size)
1764{
1765 int err = -EINVAL;
1766
1767 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1768 goto clear;
1769 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1770 &buf->running);
1771 if (unlikely(err))
1772 goto clear;
1773 return 0;
1774clear:
1775 memset(buf, 0, size);
1776 return err;
1777}
1778
1779static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1780 .func = bpf_perf_prog_read_value,
1781 .gpl_only = true,
1782 .ret_type = RET_INTEGER,
1783 .arg1_type = ARG_PTR_TO_CTX,
1784 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1785 .arg3_type = ARG_CONST_SIZE,
1786};
1787
1788BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1789 void *, buf, u32, size, u64, flags)
1790{
1791 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1792 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1793 u32 to_copy;
1794
1795 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1796 return -EINVAL;
1797
1798 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1799 return -ENOENT;
1800
1801 if (unlikely(!br_stack))
1802 return -ENOENT;
1803
1804 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1805 return br_stack->nr * br_entry_size;
1806
1807 if (!buf || (size % br_entry_size != 0))
1808 return -EINVAL;
1809
1810 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1811 memcpy(buf, br_stack->entries, to_copy);
1812
1813 return to_copy;
1814}
1815
1816static const struct bpf_func_proto bpf_read_branch_records_proto = {
1817 .func = bpf_read_branch_records,
1818 .gpl_only = true,
1819 .ret_type = RET_INTEGER,
1820 .arg1_type = ARG_PTR_TO_CTX,
1821 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1822 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1823 .arg4_type = ARG_ANYTHING,
1824};
1825
1826static const struct bpf_func_proto *
1827pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1828{
1829 switch (func_id) {
1830 case BPF_FUNC_perf_event_output:
1831 return &bpf_perf_event_output_proto_tp;
1832 case BPF_FUNC_get_stackid:
1833 return &bpf_get_stackid_proto_pe;
1834 case BPF_FUNC_get_stack:
1835 return &bpf_get_stack_proto_pe;
1836 case BPF_FUNC_perf_prog_read_value:
1837 return &bpf_perf_prog_read_value_proto;
1838 case BPF_FUNC_read_branch_records:
1839 return &bpf_read_branch_records_proto;
1840 case BPF_FUNC_get_attach_cookie:
1841 return &bpf_get_attach_cookie_proto_pe;
1842 default:
1843 return bpf_tracing_func_proto(func_id, prog);
1844 }
1845}
1846
1847/*
1848 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1849 * to avoid potential recursive reuse issue when/if tracepoints are added
1850 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1851 *
1852 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1853 * in normal, irq, and nmi context.
1854 */
1855struct bpf_raw_tp_regs {
1856 struct pt_regs regs[3];
1857};
1858static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1859static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1860static struct pt_regs *get_bpf_raw_tp_regs(void)
1861{
1862 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1863 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1864
1865 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1866 this_cpu_dec(bpf_raw_tp_nest_level);
1867 return ERR_PTR(-EBUSY);
1868 }
1869
1870 return &tp_regs->regs[nest_level - 1];
1871}
1872
1873static void put_bpf_raw_tp_regs(void)
1874{
1875 this_cpu_dec(bpf_raw_tp_nest_level);
1876}
1877
1878BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1879 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1880{
1881 struct pt_regs *regs = get_bpf_raw_tp_regs();
1882 int ret;
1883
1884 if (IS_ERR(regs))
1885 return PTR_ERR(regs);
1886
1887 perf_fetch_caller_regs(regs);
1888 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1889
1890 put_bpf_raw_tp_regs();
1891 return ret;
1892}
1893
1894static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1895 .func = bpf_perf_event_output_raw_tp,
1896 .gpl_only = true,
1897 .ret_type = RET_INTEGER,
1898 .arg1_type = ARG_PTR_TO_CTX,
1899 .arg2_type = ARG_CONST_MAP_PTR,
1900 .arg3_type = ARG_ANYTHING,
1901 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1902 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1903};
1904
1905extern const struct bpf_func_proto bpf_skb_output_proto;
1906extern const struct bpf_func_proto bpf_xdp_output_proto;
1907extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1908
1909BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1910 struct bpf_map *, map, u64, flags)
1911{
1912 struct pt_regs *regs = get_bpf_raw_tp_regs();
1913 int ret;
1914
1915 if (IS_ERR(regs))
1916 return PTR_ERR(regs);
1917
1918 perf_fetch_caller_regs(regs);
1919 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1920 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1921 flags, 0, 0);
1922 put_bpf_raw_tp_regs();
1923 return ret;
1924}
1925
1926static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1927 .func = bpf_get_stackid_raw_tp,
1928 .gpl_only = true,
1929 .ret_type = RET_INTEGER,
1930 .arg1_type = ARG_PTR_TO_CTX,
1931 .arg2_type = ARG_CONST_MAP_PTR,
1932 .arg3_type = ARG_ANYTHING,
1933};
1934
1935BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1936 void *, buf, u32, size, u64, flags)
1937{
1938 struct pt_regs *regs = get_bpf_raw_tp_regs();
1939 int ret;
1940
1941 if (IS_ERR(regs))
1942 return PTR_ERR(regs);
1943
1944 perf_fetch_caller_regs(regs);
1945 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1946 (unsigned long) size, flags, 0);
1947 put_bpf_raw_tp_regs();
1948 return ret;
1949}
1950
1951static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1952 .func = bpf_get_stack_raw_tp,
1953 .gpl_only = true,
1954 .ret_type = RET_INTEGER,
1955 .arg1_type = ARG_PTR_TO_CTX,
1956 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1957 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1958 .arg4_type = ARG_ANYTHING,
1959};
1960
1961static const struct bpf_func_proto *
1962raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1963{
1964 switch (func_id) {
1965 case BPF_FUNC_perf_event_output:
1966 return &bpf_perf_event_output_proto_raw_tp;
1967 case BPF_FUNC_get_stackid:
1968 return &bpf_get_stackid_proto_raw_tp;
1969 case BPF_FUNC_get_stack:
1970 return &bpf_get_stack_proto_raw_tp;
1971 case BPF_FUNC_get_attach_cookie:
1972 return &bpf_get_attach_cookie_proto_tracing;
1973 default:
1974 return bpf_tracing_func_proto(func_id, prog);
1975 }
1976}
1977
1978const struct bpf_func_proto *
1979tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1980{
1981 const struct bpf_func_proto *fn;
1982
1983 switch (func_id) {
1984#ifdef CONFIG_NET
1985 case BPF_FUNC_skb_output:
1986 return &bpf_skb_output_proto;
1987 case BPF_FUNC_xdp_output:
1988 return &bpf_xdp_output_proto;
1989 case BPF_FUNC_skc_to_tcp6_sock:
1990 return &bpf_skc_to_tcp6_sock_proto;
1991 case BPF_FUNC_skc_to_tcp_sock:
1992 return &bpf_skc_to_tcp_sock_proto;
1993 case BPF_FUNC_skc_to_tcp_timewait_sock:
1994 return &bpf_skc_to_tcp_timewait_sock_proto;
1995 case BPF_FUNC_skc_to_tcp_request_sock:
1996 return &bpf_skc_to_tcp_request_sock_proto;
1997 case BPF_FUNC_skc_to_udp6_sock:
1998 return &bpf_skc_to_udp6_sock_proto;
1999 case BPF_FUNC_skc_to_unix_sock:
2000 return &bpf_skc_to_unix_sock_proto;
2001 case BPF_FUNC_skc_to_mptcp_sock:
2002 return &bpf_skc_to_mptcp_sock_proto;
2003 case BPF_FUNC_sk_storage_get:
2004 return &bpf_sk_storage_get_tracing_proto;
2005 case BPF_FUNC_sk_storage_delete:
2006 return &bpf_sk_storage_delete_tracing_proto;
2007 case BPF_FUNC_sock_from_file:
2008 return &bpf_sock_from_file_proto;
2009 case BPF_FUNC_get_socket_cookie:
2010 return &bpf_get_socket_ptr_cookie_proto;
2011 case BPF_FUNC_xdp_get_buff_len:
2012 return &bpf_xdp_get_buff_len_trace_proto;
2013#endif
2014 case BPF_FUNC_seq_printf:
2015 return prog->expected_attach_type == BPF_TRACE_ITER ?
2016 &bpf_seq_printf_proto :
2017 NULL;
2018 case BPF_FUNC_seq_write:
2019 return prog->expected_attach_type == BPF_TRACE_ITER ?
2020 &bpf_seq_write_proto :
2021 NULL;
2022 case BPF_FUNC_seq_printf_btf:
2023 return prog->expected_attach_type == BPF_TRACE_ITER ?
2024 &bpf_seq_printf_btf_proto :
2025 NULL;
2026 case BPF_FUNC_d_path:
2027 return &bpf_d_path_proto;
2028 case BPF_FUNC_get_func_arg:
2029 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
2030 case BPF_FUNC_get_func_ret:
2031 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
2032 case BPF_FUNC_get_func_arg_cnt:
2033 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2034 case BPF_FUNC_get_attach_cookie:
2035 if (prog->type == BPF_PROG_TYPE_TRACING &&
2036 prog->expected_attach_type == BPF_TRACE_RAW_TP)
2037 return &bpf_get_attach_cookie_proto_tracing;
2038 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2039 default:
2040 fn = raw_tp_prog_func_proto(func_id, prog);
2041 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2042 fn = bpf_iter_get_func_proto(func_id, prog);
2043 return fn;
2044 }
2045}
2046
2047static bool raw_tp_prog_is_valid_access(int off, int size,
2048 enum bpf_access_type type,
2049 const struct bpf_prog *prog,
2050 struct bpf_insn_access_aux *info)
2051{
2052 return bpf_tracing_ctx_access(off, size, type);
2053}
2054
2055static bool tracing_prog_is_valid_access(int off, int size,
2056 enum bpf_access_type type,
2057 const struct bpf_prog *prog,
2058 struct bpf_insn_access_aux *info)
2059{
2060 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
2061}
2062
2063int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2064 const union bpf_attr *kattr,
2065 union bpf_attr __user *uattr)
2066{
2067 return -ENOTSUPP;
2068}
2069
2070const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2071 .get_func_proto = raw_tp_prog_func_proto,
2072 .is_valid_access = raw_tp_prog_is_valid_access,
2073};
2074
2075const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2076#ifdef CONFIG_NET
2077 .test_run = bpf_prog_test_run_raw_tp,
2078#endif
2079};
2080
2081const struct bpf_verifier_ops tracing_verifier_ops = {
2082 .get_func_proto = tracing_prog_func_proto,
2083 .is_valid_access = tracing_prog_is_valid_access,
2084};
2085
2086const struct bpf_prog_ops tracing_prog_ops = {
2087 .test_run = bpf_prog_test_run_tracing,
2088};
2089
2090static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2091 enum bpf_access_type type,
2092 const struct bpf_prog *prog,
2093 struct bpf_insn_access_aux *info)
2094{
2095 if (off == 0) {
2096 if (size != sizeof(u64) || type != BPF_READ)
2097 return false;
2098 info->reg_type = PTR_TO_TP_BUFFER;
2099 }
2100 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2101}
2102
2103const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2104 .get_func_proto = raw_tp_prog_func_proto,
2105 .is_valid_access = raw_tp_writable_prog_is_valid_access,
2106};
2107
2108const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2109};
2110
2111static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2112 const struct bpf_prog *prog,
2113 struct bpf_insn_access_aux *info)
2114{
2115 const int size_u64 = sizeof(u64);
2116
2117 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2118 return false;
2119 if (type != BPF_READ)
2120 return false;
2121 if (off % size != 0) {
2122 if (sizeof(unsigned long) != 4)
2123 return false;
2124 if (size != 8)
2125 return false;
2126 if (off % size != 4)
2127 return false;
2128 }
2129
2130 switch (off) {
2131 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2132 bpf_ctx_record_field_size(info, size_u64);
2133 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2134 return false;
2135 break;
2136 case bpf_ctx_range(struct bpf_perf_event_data, addr):
2137 bpf_ctx_record_field_size(info, size_u64);
2138 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2139 return false;
2140 break;
2141 default:
2142 if (size != sizeof(long))
2143 return false;
2144 }
2145
2146 return true;
2147}
2148
2149static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2150 const struct bpf_insn *si,
2151 struct bpf_insn *insn_buf,
2152 struct bpf_prog *prog, u32 *target_size)
2153{
2154 struct bpf_insn *insn = insn_buf;
2155
2156 switch (si->off) {
2157 case offsetof(struct bpf_perf_event_data, sample_period):
2158 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2159 data), si->dst_reg, si->src_reg,
2160 offsetof(struct bpf_perf_event_data_kern, data));
2161 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2162 bpf_target_off(struct perf_sample_data, period, 8,
2163 target_size));
2164 break;
2165 case offsetof(struct bpf_perf_event_data, addr):
2166 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2167 data), si->dst_reg, si->src_reg,
2168 offsetof(struct bpf_perf_event_data_kern, data));
2169 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2170 bpf_target_off(struct perf_sample_data, addr, 8,
2171 target_size));
2172 break;
2173 default:
2174 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2175 regs), si->dst_reg, si->src_reg,
2176 offsetof(struct bpf_perf_event_data_kern, regs));
2177 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2178 si->off);
2179 break;
2180 }
2181
2182 return insn - insn_buf;
2183}
2184
2185const struct bpf_verifier_ops perf_event_verifier_ops = {
2186 .get_func_proto = pe_prog_func_proto,
2187 .is_valid_access = pe_prog_is_valid_access,
2188 .convert_ctx_access = pe_prog_convert_ctx_access,
2189};
2190
2191const struct bpf_prog_ops perf_event_prog_ops = {
2192};
2193
2194static DEFINE_MUTEX(bpf_event_mutex);
2195
2196#define BPF_TRACE_MAX_PROGS 64
2197
2198int perf_event_attach_bpf_prog(struct perf_event *event,
2199 struct bpf_prog *prog,
2200 u64 bpf_cookie)
2201{
2202 struct bpf_prog_array *old_array;
2203 struct bpf_prog_array *new_array;
2204 int ret = -EEXIST;
2205
2206 /*
2207 * Kprobe override only works if they are on the function entry,
2208 * and only if they are on the opt-in list.
2209 */
2210 if (prog->kprobe_override &&
2211 (!trace_kprobe_on_func_entry(event->tp_event) ||
2212 !trace_kprobe_error_injectable(event->tp_event)))
2213 return -EINVAL;
2214
2215 mutex_lock(&bpf_event_mutex);
2216
2217 if (event->prog)
2218 goto unlock;
2219
2220 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2221 if (old_array &&
2222 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2223 ret = -E2BIG;
2224 goto unlock;
2225 }
2226
2227 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2228 if (ret < 0)
2229 goto unlock;
2230
2231 /* set the new array to event->tp_event and set event->prog */
2232 event->prog = prog;
2233 event->bpf_cookie = bpf_cookie;
2234 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2235 bpf_prog_array_free_sleepable(old_array);
2236
2237unlock:
2238 mutex_unlock(&bpf_event_mutex);
2239 return ret;
2240}
2241
2242void perf_event_detach_bpf_prog(struct perf_event *event)
2243{
2244 struct bpf_prog_array *old_array;
2245 struct bpf_prog_array *new_array;
2246 int ret;
2247
2248 mutex_lock(&bpf_event_mutex);
2249
2250 if (!event->prog)
2251 goto unlock;
2252
2253 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2254 if (!old_array)
2255 goto put;
2256
2257 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2258 if (ret < 0) {
2259 bpf_prog_array_delete_safe(old_array, event->prog);
2260 } else {
2261 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2262 bpf_prog_array_free_sleepable(old_array);
2263 }
2264
2265put:
2266 /*
2267 * It could be that the bpf_prog is not sleepable (and will be freed
2268 * via normal RCU), but is called from a point that supports sleepable
2269 * programs and uses tasks-trace-RCU.
2270 */
2271 synchronize_rcu_tasks_trace();
2272
2273 bpf_prog_put(event->prog);
2274 event->prog = NULL;
2275
2276unlock:
2277 mutex_unlock(&bpf_event_mutex);
2278}
2279
2280int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2281{
2282 struct perf_event_query_bpf __user *uquery = info;
2283 struct perf_event_query_bpf query = {};
2284 struct bpf_prog_array *progs;
2285 u32 *ids, prog_cnt, ids_len;
2286 int ret;
2287
2288 if (!perfmon_capable())
2289 return -EPERM;
2290 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2291 return -EINVAL;
2292 if (copy_from_user(&query, uquery, sizeof(query)))
2293 return -EFAULT;
2294
2295 ids_len = query.ids_len;
2296 if (ids_len > BPF_TRACE_MAX_PROGS)
2297 return -E2BIG;
2298 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2299 if (!ids)
2300 return -ENOMEM;
2301 /*
2302 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2303 * is required when user only wants to check for uquery->prog_cnt.
2304 * There is no need to check for it since the case is handled
2305 * gracefully in bpf_prog_array_copy_info.
2306 */
2307
2308 mutex_lock(&bpf_event_mutex);
2309 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2310 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2311 mutex_unlock(&bpf_event_mutex);
2312
2313 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2314 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2315 ret = -EFAULT;
2316
2317 kfree(ids);
2318 return ret;
2319}
2320
2321extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2322extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2323
2324struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2325{
2326 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2327
2328 for (; btp < __stop__bpf_raw_tp; btp++) {
2329 if (!strcmp(btp->tp->name, name))
2330 return btp;
2331 }
2332
2333 return bpf_get_raw_tracepoint_module(name);
2334}
2335
2336void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2337{
2338 struct module *mod;
2339
2340 preempt_disable();
2341 mod = __module_address((unsigned long)btp);
2342 module_put(mod);
2343 preempt_enable();
2344}
2345
2346static __always_inline
2347void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
2348{
2349 struct bpf_prog *prog = link->link.prog;
2350 struct bpf_run_ctx *old_run_ctx;
2351 struct bpf_trace_run_ctx run_ctx;
2352
2353 cant_sleep();
2354 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2355 bpf_prog_inc_misses_counter(prog);
2356 goto out;
2357 }
2358
2359 run_ctx.bpf_cookie = link->cookie;
2360 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2361
2362 rcu_read_lock();
2363 (void) bpf_prog_run(prog, args);
2364 rcu_read_unlock();
2365
2366 bpf_reset_run_ctx(old_run_ctx);
2367out:
2368 this_cpu_dec(*(prog->active));
2369}
2370
2371#define UNPACK(...) __VA_ARGS__
2372#define REPEAT_1(FN, DL, X, ...) FN(X)
2373#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2374#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2375#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2376#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2377#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2378#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2379#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2380#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2381#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2382#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2383#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2384#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2385
2386#define SARG(X) u64 arg##X
2387#define COPY(X) args[X] = arg##X
2388
2389#define __DL_COM (,)
2390#define __DL_SEM (;)
2391
2392#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2393
2394#define BPF_TRACE_DEFN_x(x) \
2395 void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
2396 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2397 { \
2398 u64 args[x]; \
2399 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2400 __bpf_trace_run(link, args); \
2401 } \
2402 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2403BPF_TRACE_DEFN_x(1);
2404BPF_TRACE_DEFN_x(2);
2405BPF_TRACE_DEFN_x(3);
2406BPF_TRACE_DEFN_x(4);
2407BPF_TRACE_DEFN_x(5);
2408BPF_TRACE_DEFN_x(6);
2409BPF_TRACE_DEFN_x(7);
2410BPF_TRACE_DEFN_x(8);
2411BPF_TRACE_DEFN_x(9);
2412BPF_TRACE_DEFN_x(10);
2413BPF_TRACE_DEFN_x(11);
2414BPF_TRACE_DEFN_x(12);
2415
2416int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2417{
2418 struct tracepoint *tp = btp->tp;
2419 struct bpf_prog *prog = link->link.prog;
2420
2421 /*
2422 * check that program doesn't access arguments beyond what's
2423 * available in this tracepoint
2424 */
2425 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2426 return -EINVAL;
2427
2428 if (prog->aux->max_tp_access > btp->writable_size)
2429 return -EINVAL;
2430
2431 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link);
2432}
2433
2434int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2435{
2436 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link);
2437}
2438
2439int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2440 u32 *fd_type, const char **buf,
2441 u64 *probe_offset, u64 *probe_addr,
2442 unsigned long *missed)
2443{
2444 bool is_tracepoint, is_syscall_tp;
2445 struct bpf_prog *prog;
2446 int flags, err = 0;
2447
2448 prog = event->prog;
2449 if (!prog)
2450 return -ENOENT;
2451
2452 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2453 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2454 return -EOPNOTSUPP;
2455
2456 *prog_id = prog->aux->id;
2457 flags = event->tp_event->flags;
2458 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2459 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2460
2461 if (is_tracepoint || is_syscall_tp) {
2462 *buf = is_tracepoint ? event->tp_event->tp->name
2463 : event->tp_event->name;
2464 /* We allow NULL pointer for tracepoint */
2465 if (fd_type)
2466 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2467 if (probe_offset)
2468 *probe_offset = 0x0;
2469 if (probe_addr)
2470 *probe_addr = 0x0;
2471 } else {
2472 /* kprobe/uprobe */
2473 err = -EOPNOTSUPP;
2474#ifdef CONFIG_KPROBE_EVENTS
2475 if (flags & TRACE_EVENT_FL_KPROBE)
2476 err = bpf_get_kprobe_info(event, fd_type, buf,
2477 probe_offset, probe_addr, missed,
2478 event->attr.type == PERF_TYPE_TRACEPOINT);
2479#endif
2480#ifdef CONFIG_UPROBE_EVENTS
2481 if (flags & TRACE_EVENT_FL_UPROBE)
2482 err = bpf_get_uprobe_info(event, fd_type, buf,
2483 probe_offset, probe_addr,
2484 event->attr.type == PERF_TYPE_TRACEPOINT);
2485#endif
2486 }
2487
2488 return err;
2489}
2490
2491static int __init send_signal_irq_work_init(void)
2492{
2493 int cpu;
2494 struct send_signal_irq_work *work;
2495
2496 for_each_possible_cpu(cpu) {
2497 work = per_cpu_ptr(&send_signal_work, cpu);
2498 init_irq_work(&work->irq_work, do_bpf_send_signal);
2499 }
2500 return 0;
2501}
2502
2503subsys_initcall(send_signal_irq_work_init);
2504
2505#ifdef CONFIG_MODULES
2506static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2507 void *module)
2508{
2509 struct bpf_trace_module *btm, *tmp;
2510 struct module *mod = module;
2511 int ret = 0;
2512
2513 if (mod->num_bpf_raw_events == 0 ||
2514 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2515 goto out;
2516
2517 mutex_lock(&bpf_module_mutex);
2518
2519 switch (op) {
2520 case MODULE_STATE_COMING:
2521 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2522 if (btm) {
2523 btm->module = module;
2524 list_add(&btm->list, &bpf_trace_modules);
2525 } else {
2526 ret = -ENOMEM;
2527 }
2528 break;
2529 case MODULE_STATE_GOING:
2530 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2531 if (btm->module == module) {
2532 list_del(&btm->list);
2533 kfree(btm);
2534 break;
2535 }
2536 }
2537 break;
2538 }
2539
2540 mutex_unlock(&bpf_module_mutex);
2541
2542out:
2543 return notifier_from_errno(ret);
2544}
2545
2546static struct notifier_block bpf_module_nb = {
2547 .notifier_call = bpf_event_notify,
2548};
2549
2550static int __init bpf_event_init(void)
2551{
2552 register_module_notifier(&bpf_module_nb);
2553 return 0;
2554}
2555
2556fs_initcall(bpf_event_init);
2557#endif /* CONFIG_MODULES */
2558
2559struct bpf_session_run_ctx {
2560 struct bpf_run_ctx run_ctx;
2561 bool is_return;
2562 void *data;
2563};
2564
2565#ifdef CONFIG_FPROBE
2566struct bpf_kprobe_multi_link {
2567 struct bpf_link link;
2568 struct fprobe fp;
2569 unsigned long *addrs;
2570 u64 *cookies;
2571 u32 cnt;
2572 u32 mods_cnt;
2573 struct module **mods;
2574 u32 flags;
2575};
2576
2577struct bpf_kprobe_multi_run_ctx {
2578 struct bpf_session_run_ctx session_ctx;
2579 struct bpf_kprobe_multi_link *link;
2580 unsigned long entry_ip;
2581};
2582
2583struct user_syms {
2584 const char **syms;
2585 char *buf;
2586};
2587
2588static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2589{
2590 unsigned long __user usymbol;
2591 const char **syms = NULL;
2592 char *buf = NULL, *p;
2593 int err = -ENOMEM;
2594 unsigned int i;
2595
2596 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2597 if (!syms)
2598 goto error;
2599
2600 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2601 if (!buf)
2602 goto error;
2603
2604 for (p = buf, i = 0; i < cnt; i++) {
2605 if (__get_user(usymbol, usyms + i)) {
2606 err = -EFAULT;
2607 goto error;
2608 }
2609 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2610 if (err == KSYM_NAME_LEN)
2611 err = -E2BIG;
2612 if (err < 0)
2613 goto error;
2614 syms[i] = p;
2615 p += err + 1;
2616 }
2617
2618 us->syms = syms;
2619 us->buf = buf;
2620 return 0;
2621
2622error:
2623 if (err) {
2624 kvfree(syms);
2625 kvfree(buf);
2626 }
2627 return err;
2628}
2629
2630static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2631{
2632 u32 i;
2633
2634 for (i = 0; i < cnt; i++)
2635 module_put(mods[i]);
2636}
2637
2638static void free_user_syms(struct user_syms *us)
2639{
2640 kvfree(us->syms);
2641 kvfree(us->buf);
2642}
2643
2644static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2645{
2646 struct bpf_kprobe_multi_link *kmulti_link;
2647
2648 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2649 unregister_fprobe(&kmulti_link->fp);
2650 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2651}
2652
2653static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2654{
2655 struct bpf_kprobe_multi_link *kmulti_link;
2656
2657 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2658 kvfree(kmulti_link->addrs);
2659 kvfree(kmulti_link->cookies);
2660 kfree(kmulti_link->mods);
2661 kfree(kmulti_link);
2662}
2663
2664static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2665 struct bpf_link_info *info)
2666{
2667 u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
2668 u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2669 struct bpf_kprobe_multi_link *kmulti_link;
2670 u32 ucount = info->kprobe_multi.count;
2671 int err = 0, i;
2672
2673 if (!uaddrs ^ !ucount)
2674 return -EINVAL;
2675 if (ucookies && !ucount)
2676 return -EINVAL;
2677
2678 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2679 info->kprobe_multi.count = kmulti_link->cnt;
2680 info->kprobe_multi.flags = kmulti_link->flags;
2681 info->kprobe_multi.missed = kmulti_link->fp.nmissed;
2682
2683 if (!uaddrs)
2684 return 0;
2685 if (ucount < kmulti_link->cnt)
2686 err = -ENOSPC;
2687 else
2688 ucount = kmulti_link->cnt;
2689
2690 if (ucookies) {
2691 if (kmulti_link->cookies) {
2692 if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
2693 return -EFAULT;
2694 } else {
2695 for (i = 0; i < ucount; i++) {
2696 if (put_user(0, ucookies + i))
2697 return -EFAULT;
2698 }
2699 }
2700 }
2701
2702 if (kallsyms_show_value(current_cred())) {
2703 if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2704 return -EFAULT;
2705 } else {
2706 for (i = 0; i < ucount; i++) {
2707 if (put_user(0, uaddrs + i))
2708 return -EFAULT;
2709 }
2710 }
2711 return err;
2712}
2713
2714static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2715 .release = bpf_kprobe_multi_link_release,
2716 .dealloc_deferred = bpf_kprobe_multi_link_dealloc,
2717 .fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2718};
2719
2720static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2721{
2722 const struct bpf_kprobe_multi_link *link = priv;
2723 unsigned long *addr_a = a, *addr_b = b;
2724 u64 *cookie_a, *cookie_b;
2725
2726 cookie_a = link->cookies + (addr_a - link->addrs);
2727 cookie_b = link->cookies + (addr_b - link->addrs);
2728
2729 /* swap addr_a/addr_b and cookie_a/cookie_b values */
2730 swap(*addr_a, *addr_b);
2731 swap(*cookie_a, *cookie_b);
2732}
2733
2734static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2735{
2736 const unsigned long *addr_a = a, *addr_b = b;
2737
2738 if (*addr_a == *addr_b)
2739 return 0;
2740 return *addr_a < *addr_b ? -1 : 1;
2741}
2742
2743static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2744{
2745 return bpf_kprobe_multi_addrs_cmp(a, b);
2746}
2747
2748static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2749{
2750 struct bpf_kprobe_multi_run_ctx *run_ctx;
2751 struct bpf_kprobe_multi_link *link;
2752 u64 *cookie, entry_ip;
2753 unsigned long *addr;
2754
2755 if (WARN_ON_ONCE(!ctx))
2756 return 0;
2757 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2758 session_ctx.run_ctx);
2759 link = run_ctx->link;
2760 if (!link->cookies)
2761 return 0;
2762 entry_ip = run_ctx->entry_ip;
2763 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2764 bpf_kprobe_multi_addrs_cmp);
2765 if (!addr)
2766 return 0;
2767 cookie = link->cookies + (addr - link->addrs);
2768 return *cookie;
2769}
2770
2771static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2772{
2773 struct bpf_kprobe_multi_run_ctx *run_ctx;
2774
2775 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2776 session_ctx.run_ctx);
2777 return run_ctx->entry_ip;
2778}
2779
2780static int
2781kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2782 unsigned long entry_ip, struct pt_regs *regs,
2783 bool is_return, void *data)
2784{
2785 struct bpf_kprobe_multi_run_ctx run_ctx = {
2786 .session_ctx = {
2787 .is_return = is_return,
2788 .data = data,
2789 },
2790 .link = link,
2791 .entry_ip = entry_ip,
2792 };
2793 struct bpf_run_ctx *old_run_ctx;
2794 int err;
2795
2796 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2797 bpf_prog_inc_misses_counter(link->link.prog);
2798 err = 0;
2799 goto out;
2800 }
2801
2802 migrate_disable();
2803 rcu_read_lock();
2804 old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
2805 err = bpf_prog_run(link->link.prog, regs);
2806 bpf_reset_run_ctx(old_run_ctx);
2807 rcu_read_unlock();
2808 migrate_enable();
2809
2810 out:
2811 __this_cpu_dec(bpf_prog_active);
2812 return err;
2813}
2814
2815static int
2816kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2817 unsigned long ret_ip, struct pt_regs *regs,
2818 void *data)
2819{
2820 struct bpf_kprobe_multi_link *link;
2821 int err;
2822
2823 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2824 err = kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, false, data);
2825 return is_kprobe_session(link->link.prog) ? err : 0;
2826}
2827
2828static void
2829kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2830 unsigned long ret_ip, struct pt_regs *regs,
2831 void *data)
2832{
2833 struct bpf_kprobe_multi_link *link;
2834
2835 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2836 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, true, data);
2837}
2838
2839static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2840{
2841 const char **str_a = (const char **) a;
2842 const char **str_b = (const char **) b;
2843
2844 return strcmp(*str_a, *str_b);
2845}
2846
2847struct multi_symbols_sort {
2848 const char **funcs;
2849 u64 *cookies;
2850};
2851
2852static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2853{
2854 const struct multi_symbols_sort *data = priv;
2855 const char **name_a = a, **name_b = b;
2856
2857 swap(*name_a, *name_b);
2858
2859 /* If defined, swap also related cookies. */
2860 if (data->cookies) {
2861 u64 *cookie_a, *cookie_b;
2862
2863 cookie_a = data->cookies + (name_a - data->funcs);
2864 cookie_b = data->cookies + (name_b - data->funcs);
2865 swap(*cookie_a, *cookie_b);
2866 }
2867}
2868
2869struct modules_array {
2870 struct module **mods;
2871 int mods_cnt;
2872 int mods_cap;
2873};
2874
2875static int add_module(struct modules_array *arr, struct module *mod)
2876{
2877 struct module **mods;
2878
2879 if (arr->mods_cnt == arr->mods_cap) {
2880 arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2881 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2882 if (!mods)
2883 return -ENOMEM;
2884 arr->mods = mods;
2885 }
2886
2887 arr->mods[arr->mods_cnt] = mod;
2888 arr->mods_cnt++;
2889 return 0;
2890}
2891
2892static bool has_module(struct modules_array *arr, struct module *mod)
2893{
2894 int i;
2895
2896 for (i = arr->mods_cnt - 1; i >= 0; i--) {
2897 if (arr->mods[i] == mod)
2898 return true;
2899 }
2900 return false;
2901}
2902
2903static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2904{
2905 struct modules_array arr = {};
2906 u32 i, err = 0;
2907
2908 for (i = 0; i < addrs_cnt; i++) {
2909 struct module *mod;
2910
2911 preempt_disable();
2912 mod = __module_address(addrs[i]);
2913 /* Either no module or we it's already stored */
2914 if (!mod || has_module(&arr, mod)) {
2915 preempt_enable();
2916 continue;
2917 }
2918 if (!try_module_get(mod))
2919 err = -EINVAL;
2920 preempt_enable();
2921 if (err)
2922 break;
2923 err = add_module(&arr, mod);
2924 if (err) {
2925 module_put(mod);
2926 break;
2927 }
2928 }
2929
2930 /* We return either err < 0 in case of error, ... */
2931 if (err) {
2932 kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2933 kfree(arr.mods);
2934 return err;
2935 }
2936
2937 /* or number of modules found if everything is ok. */
2938 *mods = arr.mods;
2939 return arr.mods_cnt;
2940}
2941
2942static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2943{
2944 u32 i;
2945
2946 for (i = 0; i < cnt; i++) {
2947 if (!within_error_injection_list(addrs[i]))
2948 return -EINVAL;
2949 }
2950 return 0;
2951}
2952
2953int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2954{
2955 struct bpf_kprobe_multi_link *link = NULL;
2956 struct bpf_link_primer link_primer;
2957 void __user *ucookies;
2958 unsigned long *addrs;
2959 u32 flags, cnt, size;
2960 void __user *uaddrs;
2961 u64 *cookies = NULL;
2962 void __user *usyms;
2963 int err;
2964
2965 /* no support for 32bit archs yet */
2966 if (sizeof(u64) != sizeof(void *))
2967 return -EOPNOTSUPP;
2968
2969 if (!is_kprobe_multi(prog))
2970 return -EINVAL;
2971
2972 flags = attr->link_create.kprobe_multi.flags;
2973 if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2974 return -EINVAL;
2975
2976 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2977 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2978 if (!!uaddrs == !!usyms)
2979 return -EINVAL;
2980
2981 cnt = attr->link_create.kprobe_multi.cnt;
2982 if (!cnt)
2983 return -EINVAL;
2984 if (cnt > MAX_KPROBE_MULTI_CNT)
2985 return -E2BIG;
2986
2987 size = cnt * sizeof(*addrs);
2988 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2989 if (!addrs)
2990 return -ENOMEM;
2991
2992 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2993 if (ucookies) {
2994 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2995 if (!cookies) {
2996 err = -ENOMEM;
2997 goto error;
2998 }
2999 if (copy_from_user(cookies, ucookies, size)) {
3000 err = -EFAULT;
3001 goto error;
3002 }
3003 }
3004
3005 if (uaddrs) {
3006 if (copy_from_user(addrs, uaddrs, size)) {
3007 err = -EFAULT;
3008 goto error;
3009 }
3010 } else {
3011 struct multi_symbols_sort data = {
3012 .cookies = cookies,
3013 };
3014 struct user_syms us;
3015
3016 err = copy_user_syms(&us, usyms, cnt);
3017 if (err)
3018 goto error;
3019
3020 if (cookies)
3021 data.funcs = us.syms;
3022
3023 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
3024 symbols_swap_r, &data);
3025
3026 err = ftrace_lookup_symbols(us.syms, cnt, addrs);
3027 free_user_syms(&us);
3028 if (err)
3029 goto error;
3030 }
3031
3032 if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
3033 err = -EINVAL;
3034 goto error;
3035 }
3036
3037 link = kzalloc(sizeof(*link), GFP_KERNEL);
3038 if (!link) {
3039 err = -ENOMEM;
3040 goto error;
3041 }
3042
3043 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
3044 &bpf_kprobe_multi_link_lops, prog);
3045
3046 err = bpf_link_prime(&link->link, &link_primer);
3047 if (err)
3048 goto error;
3049
3050 if (!(flags & BPF_F_KPROBE_MULTI_RETURN))
3051 link->fp.entry_handler = kprobe_multi_link_handler;
3052 if ((flags & BPF_F_KPROBE_MULTI_RETURN) || is_kprobe_session(prog))
3053 link->fp.exit_handler = kprobe_multi_link_exit_handler;
3054 if (is_kprobe_session(prog))
3055 link->fp.entry_data_size = sizeof(u64);
3056
3057 link->addrs = addrs;
3058 link->cookies = cookies;
3059 link->cnt = cnt;
3060 link->flags = flags;
3061
3062 if (cookies) {
3063 /*
3064 * Sorting addresses will trigger sorting cookies as well
3065 * (check bpf_kprobe_multi_cookie_swap). This way we can
3066 * find cookie based on the address in bpf_get_attach_cookie
3067 * helper.
3068 */
3069 sort_r(addrs, cnt, sizeof(*addrs),
3070 bpf_kprobe_multi_cookie_cmp,
3071 bpf_kprobe_multi_cookie_swap,
3072 link);
3073 }
3074
3075 err = get_modules_for_addrs(&link->mods, addrs, cnt);
3076 if (err < 0) {
3077 bpf_link_cleanup(&link_primer);
3078 return err;
3079 }
3080 link->mods_cnt = err;
3081
3082 err = register_fprobe_ips(&link->fp, addrs, cnt);
3083 if (err) {
3084 kprobe_multi_put_modules(link->mods, link->mods_cnt);
3085 bpf_link_cleanup(&link_primer);
3086 return err;
3087 }
3088
3089 return bpf_link_settle(&link_primer);
3090
3091error:
3092 kfree(link);
3093 kvfree(addrs);
3094 kvfree(cookies);
3095 return err;
3096}
3097#else /* !CONFIG_FPROBE */
3098int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3099{
3100 return -EOPNOTSUPP;
3101}
3102static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3103{
3104 return 0;
3105}
3106static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3107{
3108 return 0;
3109}
3110#endif
3111
3112#ifdef CONFIG_UPROBES
3113struct bpf_uprobe_multi_link;
3114
3115struct bpf_uprobe {
3116 struct bpf_uprobe_multi_link *link;
3117 loff_t offset;
3118 unsigned long ref_ctr_offset;
3119 u64 cookie;
3120 struct uprobe *uprobe;
3121 struct uprobe_consumer consumer;
3122 bool session;
3123};
3124
3125struct bpf_uprobe_multi_link {
3126 struct path path;
3127 struct bpf_link link;
3128 u32 cnt;
3129 u32 flags;
3130 struct bpf_uprobe *uprobes;
3131 struct task_struct *task;
3132};
3133
3134struct bpf_uprobe_multi_run_ctx {
3135 struct bpf_session_run_ctx session_ctx;
3136 unsigned long entry_ip;
3137 struct bpf_uprobe *uprobe;
3138};
3139
3140static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt)
3141{
3142 u32 i;
3143
3144 for (i = 0; i < cnt; i++)
3145 uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer);
3146
3147 if (cnt)
3148 uprobe_unregister_sync();
3149}
3150
3151static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3152{
3153 struct bpf_uprobe_multi_link *umulti_link;
3154
3155 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3156 bpf_uprobe_unregister(umulti_link->uprobes, umulti_link->cnt);
3157 if (umulti_link->task)
3158 put_task_struct(umulti_link->task);
3159 path_put(&umulti_link->path);
3160}
3161
3162static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3163{
3164 struct bpf_uprobe_multi_link *umulti_link;
3165
3166 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3167 kvfree(umulti_link->uprobes);
3168 kfree(umulti_link);
3169}
3170
3171static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
3172 struct bpf_link_info *info)
3173{
3174 u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3175 u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3176 u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3177 u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3178 u32 upath_size = info->uprobe_multi.path_size;
3179 struct bpf_uprobe_multi_link *umulti_link;
3180 u32 ucount = info->uprobe_multi.count;
3181 int err = 0, i;
3182 char *p, *buf;
3183 long left = 0;
3184
3185 if (!upath ^ !upath_size)
3186 return -EINVAL;
3187
3188 if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
3189 return -EINVAL;
3190
3191 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3192 info->uprobe_multi.count = umulti_link->cnt;
3193 info->uprobe_multi.flags = umulti_link->flags;
3194 info->uprobe_multi.pid = umulti_link->task ?
3195 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3196
3197 upath_size = upath_size ? min_t(u32, upath_size, PATH_MAX) : PATH_MAX;
3198 buf = kmalloc(upath_size, GFP_KERNEL);
3199 if (!buf)
3200 return -ENOMEM;
3201 p = d_path(&umulti_link->path, buf, upath_size);
3202 if (IS_ERR(p)) {
3203 kfree(buf);
3204 return PTR_ERR(p);
3205 }
3206 upath_size = buf + upath_size - p;
3207
3208 if (upath)
3209 left = copy_to_user(upath, p, upath_size);
3210 kfree(buf);
3211 if (left)
3212 return -EFAULT;
3213 info->uprobe_multi.path_size = upath_size;
3214
3215 if (!uoffsets && !ucookies && !uref_ctr_offsets)
3216 return 0;
3217
3218 if (ucount < umulti_link->cnt)
3219 err = -ENOSPC;
3220 else
3221 ucount = umulti_link->cnt;
3222
3223 for (i = 0; i < ucount; i++) {
3224 if (uoffsets &&
3225 put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3226 return -EFAULT;
3227 if (uref_ctr_offsets &&
3228 put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3229 return -EFAULT;
3230 if (ucookies &&
3231 put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3232 return -EFAULT;
3233 }
3234
3235 return err;
3236}
3237
3238static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3239 .release = bpf_uprobe_multi_link_release,
3240 .dealloc_deferred = bpf_uprobe_multi_link_dealloc,
3241 .fill_link_info = bpf_uprobe_multi_link_fill_link_info,
3242};
3243
3244static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3245 unsigned long entry_ip,
3246 struct pt_regs *regs,
3247 bool is_return, void *data)
3248{
3249 struct bpf_uprobe_multi_link *link = uprobe->link;
3250 struct bpf_uprobe_multi_run_ctx run_ctx = {
3251 .session_ctx = {
3252 .is_return = is_return,
3253 .data = data,
3254 },
3255 .entry_ip = entry_ip,
3256 .uprobe = uprobe,
3257 };
3258 struct bpf_prog *prog = link->link.prog;
3259 bool sleepable = prog->sleepable;
3260 struct bpf_run_ctx *old_run_ctx;
3261 int err;
3262
3263 if (link->task && !same_thread_group(current, link->task))
3264 return 0;
3265
3266 if (sleepable)
3267 rcu_read_lock_trace();
3268 else
3269 rcu_read_lock();
3270
3271 migrate_disable();
3272
3273 old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
3274 err = bpf_prog_run(link->link.prog, regs);
3275 bpf_reset_run_ctx(old_run_ctx);
3276
3277 migrate_enable();
3278
3279 if (sleepable)
3280 rcu_read_unlock_trace();
3281 else
3282 rcu_read_unlock();
3283 return err;
3284}
3285
3286static bool
3287uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
3288{
3289 struct bpf_uprobe *uprobe;
3290
3291 uprobe = container_of(con, struct bpf_uprobe, consumer);
3292 return uprobe->link->task->mm == mm;
3293}
3294
3295static int
3296uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
3297 __u64 *data)
3298{
3299 struct bpf_uprobe *uprobe;
3300 int ret;
3301
3302 uprobe = container_of(con, struct bpf_uprobe, consumer);
3303 ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs, false, data);
3304 if (uprobe->session)
3305 return ret ? UPROBE_HANDLER_IGNORE : 0;
3306 return 0;
3307}
3308
3309static int
3310uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs,
3311 __u64 *data)
3312{
3313 struct bpf_uprobe *uprobe;
3314
3315 uprobe = container_of(con, struct bpf_uprobe, consumer);
3316 uprobe_prog_run(uprobe, func, regs, true, data);
3317 return 0;
3318}
3319
3320static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3321{
3322 struct bpf_uprobe_multi_run_ctx *run_ctx;
3323
3324 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3325 session_ctx.run_ctx);
3326 return run_ctx->entry_ip;
3327}
3328
3329static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3330{
3331 struct bpf_uprobe_multi_run_ctx *run_ctx;
3332
3333 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3334 session_ctx.run_ctx);
3335 return run_ctx->uprobe->cookie;
3336}
3337
3338int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3339{
3340 struct bpf_uprobe_multi_link *link = NULL;
3341 unsigned long __user *uref_ctr_offsets;
3342 struct bpf_link_primer link_primer;
3343 struct bpf_uprobe *uprobes = NULL;
3344 struct task_struct *task = NULL;
3345 unsigned long __user *uoffsets;
3346 u64 __user *ucookies;
3347 void __user *upath;
3348 u32 flags, cnt, i;
3349 struct path path;
3350 char *name;
3351 pid_t pid;
3352 int err;
3353
3354 /* no support for 32bit archs yet */
3355 if (sizeof(u64) != sizeof(void *))
3356 return -EOPNOTSUPP;
3357
3358 if (!is_uprobe_multi(prog))
3359 return -EINVAL;
3360
3361 flags = attr->link_create.uprobe_multi.flags;
3362 if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3363 return -EINVAL;
3364
3365 /*
3366 * path, offsets and cnt are mandatory,
3367 * ref_ctr_offsets and cookies are optional
3368 */
3369 upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3370 uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3371 cnt = attr->link_create.uprobe_multi.cnt;
3372 pid = attr->link_create.uprobe_multi.pid;
3373
3374 if (!upath || !uoffsets || !cnt || pid < 0)
3375 return -EINVAL;
3376 if (cnt > MAX_UPROBE_MULTI_CNT)
3377 return -E2BIG;
3378
3379 uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3380 ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3381
3382 name = strndup_user(upath, PATH_MAX);
3383 if (IS_ERR(name)) {
3384 err = PTR_ERR(name);
3385 return err;
3386 }
3387
3388 err = kern_path(name, LOOKUP_FOLLOW, &path);
3389 kfree(name);
3390 if (err)
3391 return err;
3392
3393 if (!d_is_reg(path.dentry)) {
3394 err = -EBADF;
3395 goto error_path_put;
3396 }
3397
3398 if (pid) {
3399 task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
3400 if (!task) {
3401 err = -ESRCH;
3402 goto error_path_put;
3403 }
3404 }
3405
3406 err = -ENOMEM;
3407
3408 link = kzalloc(sizeof(*link), GFP_KERNEL);
3409 uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3410
3411 if (!uprobes || !link)
3412 goto error_free;
3413
3414 for (i = 0; i < cnt; i++) {
3415 if (__get_user(uprobes[i].offset, uoffsets + i)) {
3416 err = -EFAULT;
3417 goto error_free;
3418 }
3419 if (uprobes[i].offset < 0) {
3420 err = -EINVAL;
3421 goto error_free;
3422 }
3423 if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
3424 err = -EFAULT;
3425 goto error_free;
3426 }
3427 if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3428 err = -EFAULT;
3429 goto error_free;
3430 }
3431
3432 uprobes[i].link = link;
3433
3434 if (!(flags & BPF_F_UPROBE_MULTI_RETURN))
3435 uprobes[i].consumer.handler = uprobe_multi_link_handler;
3436 if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog))
3437 uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3438 if (is_uprobe_session(prog))
3439 uprobes[i].session = true;
3440 if (pid)
3441 uprobes[i].consumer.filter = uprobe_multi_link_filter;
3442 }
3443
3444 link->cnt = cnt;
3445 link->uprobes = uprobes;
3446 link->path = path;
3447 link->task = task;
3448 link->flags = flags;
3449
3450 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3451 &bpf_uprobe_multi_link_lops, prog);
3452
3453 for (i = 0; i < cnt; i++) {
3454 uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry),
3455 uprobes[i].offset,
3456 uprobes[i].ref_ctr_offset,
3457 &uprobes[i].consumer);
3458 if (IS_ERR(uprobes[i].uprobe)) {
3459 err = PTR_ERR(uprobes[i].uprobe);
3460 link->cnt = i;
3461 goto error_unregister;
3462 }
3463 }
3464
3465 err = bpf_link_prime(&link->link, &link_primer);
3466 if (err)
3467 goto error_unregister;
3468
3469 return bpf_link_settle(&link_primer);
3470
3471error_unregister:
3472 bpf_uprobe_unregister(uprobes, link->cnt);
3473
3474error_free:
3475 kvfree(uprobes);
3476 kfree(link);
3477 if (task)
3478 put_task_struct(task);
3479error_path_put:
3480 path_put(&path);
3481 return err;
3482}
3483#else /* !CONFIG_UPROBES */
3484int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3485{
3486 return -EOPNOTSUPP;
3487}
3488static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3489{
3490 return 0;
3491}
3492static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3493{
3494 return 0;
3495}
3496#endif /* CONFIG_UPROBES */
3497
3498__bpf_kfunc_start_defs();
3499
3500__bpf_kfunc bool bpf_session_is_return(void)
3501{
3502 struct bpf_session_run_ctx *session_ctx;
3503
3504 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3505 return session_ctx->is_return;
3506}
3507
3508__bpf_kfunc __u64 *bpf_session_cookie(void)
3509{
3510 struct bpf_session_run_ctx *session_ctx;
3511
3512 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3513 return session_ctx->data;
3514}
3515
3516__bpf_kfunc_end_defs();
3517
3518BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids)
3519BTF_ID_FLAGS(func, bpf_session_is_return)
3520BTF_ID_FLAGS(func, bpf_session_cookie)
3521BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids)
3522
3523static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
3524{
3525 if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id))
3526 return 0;
3527
3528 if (!is_kprobe_session(prog) && !is_uprobe_session(prog))
3529 return -EACCES;
3530
3531 return 0;
3532}
3533
3534static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
3535 .owner = THIS_MODULE,
3536 .set = &kprobe_multi_kfunc_set_ids,
3537 .filter = bpf_kprobe_multi_filter,
3538};
3539
3540static int __init bpf_kprobe_multi_kfuncs_init(void)
3541{
3542 return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
3543}
3544
3545late_initcall(bpf_kprobe_multi_kfuncs_init);
3546
3547__bpf_kfunc_start_defs();
3548
3549__bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type,
3550 u64 value)
3551{
3552 if (type != PIDTYPE_PID && type != PIDTYPE_TGID)
3553 return -EINVAL;
3554
3555 return bpf_send_signal_common(sig, type, task, value);
3556}
3557
3558__bpf_kfunc_end_defs();
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
9#include <linux/bpf_verifier.h>
10#include <linux/bpf_perf_event.h>
11#include <linux/btf.h>
12#include <linux/filter.h>
13#include <linux/uaccess.h>
14#include <linux/ctype.h>
15#include <linux/kprobes.h>
16#include <linux/spinlock.h>
17#include <linux/syscalls.h>
18#include <linux/error-injection.h>
19#include <linux/btf_ids.h>
20#include <linux/bpf_lsm.h>
21#include <linux/fprobe.h>
22#include <linux/bsearch.h>
23#include <linux/sort.h>
24#include <linux/key.h>
25#include <linux/verification.h>
26#include <linux/namei.h>
27#include <linux/fileattr.h>
28
29#include <net/bpf_sk_storage.h>
30
31#include <uapi/linux/bpf.h>
32#include <uapi/linux/btf.h>
33
34#include <asm/tlb.h>
35
36#include "trace_probe.h"
37#include "trace.h"
38
39#define CREATE_TRACE_POINTS
40#include "bpf_trace.h"
41
42#define bpf_event_rcu_dereference(p) \
43 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
44
45#define MAX_UPROBE_MULTI_CNT (1U << 20)
46#define MAX_KPROBE_MULTI_CNT (1U << 20)
47
48#ifdef CONFIG_MODULES
49struct bpf_trace_module {
50 struct module *module;
51 struct list_head list;
52};
53
54static LIST_HEAD(bpf_trace_modules);
55static DEFINE_MUTEX(bpf_module_mutex);
56
57static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
58{
59 struct bpf_raw_event_map *btp, *ret = NULL;
60 struct bpf_trace_module *btm;
61 unsigned int i;
62
63 mutex_lock(&bpf_module_mutex);
64 list_for_each_entry(btm, &bpf_trace_modules, list) {
65 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
66 btp = &btm->module->bpf_raw_events[i];
67 if (!strcmp(btp->tp->name, name)) {
68 if (try_module_get(btm->module))
69 ret = btp;
70 goto out;
71 }
72 }
73 }
74out:
75 mutex_unlock(&bpf_module_mutex);
76 return ret;
77}
78#else
79static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
80{
81 return NULL;
82}
83#endif /* CONFIG_MODULES */
84
85u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
86u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
87
88static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
89 u64 flags, const struct btf **btf,
90 s32 *btf_id);
91static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
92static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
93
94static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
95static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
96
97/**
98 * trace_call_bpf - invoke BPF program
99 * @call: tracepoint event
100 * @ctx: opaque context pointer
101 *
102 * kprobe handlers execute BPF programs via this helper.
103 * Can be used from static tracepoints in the future.
104 *
105 * Return: BPF programs always return an integer which is interpreted by
106 * kprobe handler as:
107 * 0 - return from kprobe (event is filtered out)
108 * 1 - store kprobe event into ring buffer
109 * Other values are reserved and currently alias to 1
110 */
111unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
112{
113 unsigned int ret;
114
115 cant_sleep();
116
117 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
118 /*
119 * since some bpf program is already running on this cpu,
120 * don't call into another bpf program (same or different)
121 * and don't send kprobe event into ring-buffer,
122 * so return zero here
123 */
124 rcu_read_lock();
125 bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
126 rcu_read_unlock();
127 ret = 0;
128 goto out;
129 }
130
131 /*
132 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
133 * to all call sites, we did a bpf_prog_array_valid() there to check
134 * whether call->prog_array is empty or not, which is
135 * a heuristic to speed up execution.
136 *
137 * If bpf_prog_array_valid() fetched prog_array was
138 * non-NULL, we go into trace_call_bpf() and do the actual
139 * proper rcu_dereference() under RCU lock.
140 * If it turns out that prog_array is NULL then, we bail out.
141 * For the opposite, if the bpf_prog_array_valid() fetched pointer
142 * was NULL, you'll skip the prog_array with the risk of missing
143 * out of events when it was updated in between this and the
144 * rcu_dereference() which is accepted risk.
145 */
146 rcu_read_lock();
147 ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
148 ctx, bpf_prog_run);
149 rcu_read_unlock();
150
151 out:
152 __this_cpu_dec(bpf_prog_active);
153
154 return ret;
155}
156
157#ifdef CONFIG_BPF_KPROBE_OVERRIDE
158BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
159{
160 regs_set_return_value(regs, rc);
161 override_function_with_return(regs);
162 return 0;
163}
164
165static const struct bpf_func_proto bpf_override_return_proto = {
166 .func = bpf_override_return,
167 .gpl_only = true,
168 .ret_type = RET_INTEGER,
169 .arg1_type = ARG_PTR_TO_CTX,
170 .arg2_type = ARG_ANYTHING,
171};
172#endif
173
174static __always_inline int
175bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
176{
177 int ret;
178
179 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
180 if (unlikely(ret < 0))
181 memset(dst, 0, size);
182 return ret;
183}
184
185BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
186 const void __user *, unsafe_ptr)
187{
188 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
189}
190
191const struct bpf_func_proto bpf_probe_read_user_proto = {
192 .func = bpf_probe_read_user,
193 .gpl_only = true,
194 .ret_type = RET_INTEGER,
195 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
196 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
197 .arg3_type = ARG_ANYTHING,
198};
199
200static __always_inline int
201bpf_probe_read_user_str_common(void *dst, u32 size,
202 const void __user *unsafe_ptr)
203{
204 int ret;
205
206 /*
207 * NB: We rely on strncpy_from_user() not copying junk past the NUL
208 * terminator into `dst`.
209 *
210 * strncpy_from_user() does long-sized strides in the fast path. If the
211 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
212 * then there could be junk after the NUL in `dst`. If user takes `dst`
213 * and keys a hash map with it, then semantically identical strings can
214 * occupy multiple entries in the map.
215 */
216 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
217 if (unlikely(ret < 0))
218 memset(dst, 0, size);
219 return ret;
220}
221
222BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
223 const void __user *, unsafe_ptr)
224{
225 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
226}
227
228const struct bpf_func_proto bpf_probe_read_user_str_proto = {
229 .func = bpf_probe_read_user_str,
230 .gpl_only = true,
231 .ret_type = RET_INTEGER,
232 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
233 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
234 .arg3_type = ARG_ANYTHING,
235};
236
237BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
238 const void *, unsafe_ptr)
239{
240 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
241}
242
243const struct bpf_func_proto bpf_probe_read_kernel_proto = {
244 .func = bpf_probe_read_kernel,
245 .gpl_only = true,
246 .ret_type = RET_INTEGER,
247 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
248 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
249 .arg3_type = ARG_ANYTHING,
250};
251
252static __always_inline int
253bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
254{
255 int ret;
256
257 /*
258 * The strncpy_from_kernel_nofault() call will likely not fill the
259 * entire buffer, but that's okay in this circumstance as we're probing
260 * arbitrary memory anyway similar to bpf_probe_read_*() and might
261 * as well probe the stack. Thus, memory is explicitly cleared
262 * only in error case, so that improper users ignoring return
263 * code altogether don't copy garbage; otherwise length of string
264 * is returned that can be used for bpf_perf_event_output() et al.
265 */
266 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
267 if (unlikely(ret < 0))
268 memset(dst, 0, size);
269 return ret;
270}
271
272BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
273 const void *, unsafe_ptr)
274{
275 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
276}
277
278const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
279 .func = bpf_probe_read_kernel_str,
280 .gpl_only = true,
281 .ret_type = RET_INTEGER,
282 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
283 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
284 .arg3_type = ARG_ANYTHING,
285};
286
287#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
288BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
289 const void *, unsafe_ptr)
290{
291 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
292 return bpf_probe_read_user_common(dst, size,
293 (__force void __user *)unsafe_ptr);
294 }
295 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
296}
297
298static const struct bpf_func_proto bpf_probe_read_compat_proto = {
299 .func = bpf_probe_read_compat,
300 .gpl_only = true,
301 .ret_type = RET_INTEGER,
302 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
303 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
304 .arg3_type = ARG_ANYTHING,
305};
306
307BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
308 const void *, unsafe_ptr)
309{
310 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
311 return bpf_probe_read_user_str_common(dst, size,
312 (__force void __user *)unsafe_ptr);
313 }
314 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
315}
316
317static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
318 .func = bpf_probe_read_compat_str,
319 .gpl_only = true,
320 .ret_type = RET_INTEGER,
321 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
322 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
323 .arg3_type = ARG_ANYTHING,
324};
325#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
326
327BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
328 u32, size)
329{
330 /*
331 * Ensure we're in user context which is safe for the helper to
332 * run. This helper has no business in a kthread.
333 *
334 * access_ok() should prevent writing to non-user memory, but in
335 * some situations (nommu, temporary switch, etc) access_ok() does
336 * not provide enough validation, hence the check on KERNEL_DS.
337 *
338 * nmi_uaccess_okay() ensures the probe is not run in an interim
339 * state, when the task or mm are switched. This is specifically
340 * required to prevent the use of temporary mm.
341 */
342
343 if (unlikely(in_interrupt() ||
344 current->flags & (PF_KTHREAD | PF_EXITING)))
345 return -EPERM;
346 if (unlikely(!nmi_uaccess_okay()))
347 return -EPERM;
348
349 return copy_to_user_nofault(unsafe_ptr, src, size);
350}
351
352static const struct bpf_func_proto bpf_probe_write_user_proto = {
353 .func = bpf_probe_write_user,
354 .gpl_only = true,
355 .ret_type = RET_INTEGER,
356 .arg1_type = ARG_ANYTHING,
357 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
358 .arg3_type = ARG_CONST_SIZE,
359};
360
361static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
362{
363 if (!capable(CAP_SYS_ADMIN))
364 return NULL;
365
366 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
367 current->comm, task_pid_nr(current));
368
369 return &bpf_probe_write_user_proto;
370}
371
372#define MAX_TRACE_PRINTK_VARARGS 3
373#define BPF_TRACE_PRINTK_SIZE 1024
374
375BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
376 u64, arg2, u64, arg3)
377{
378 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
379 struct bpf_bprintf_data data = {
380 .get_bin_args = true,
381 .get_buf = true,
382 };
383 int ret;
384
385 ret = bpf_bprintf_prepare(fmt, fmt_size, args,
386 MAX_TRACE_PRINTK_VARARGS, &data);
387 if (ret < 0)
388 return ret;
389
390 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
391
392 trace_bpf_trace_printk(data.buf);
393
394 bpf_bprintf_cleanup(&data);
395
396 return ret;
397}
398
399static const struct bpf_func_proto bpf_trace_printk_proto = {
400 .func = bpf_trace_printk,
401 .gpl_only = true,
402 .ret_type = RET_INTEGER,
403 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
404 .arg2_type = ARG_CONST_SIZE,
405};
406
407static void __set_printk_clr_event(void)
408{
409 /*
410 * This program might be calling bpf_trace_printk,
411 * so enable the associated bpf_trace/bpf_trace_printk event.
412 * Repeat this each time as it is possible a user has
413 * disabled bpf_trace_printk events. By loading a program
414 * calling bpf_trace_printk() however the user has expressed
415 * the intent to see such events.
416 */
417 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
418 pr_warn_ratelimited("could not enable bpf_trace_printk events");
419}
420
421const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
422{
423 __set_printk_clr_event();
424 return &bpf_trace_printk_proto;
425}
426
427BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
428 u32, data_len)
429{
430 struct bpf_bprintf_data data = {
431 .get_bin_args = true,
432 .get_buf = true,
433 };
434 int ret, num_args;
435
436 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
437 (data_len && !args))
438 return -EINVAL;
439 num_args = data_len / 8;
440
441 ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
442 if (ret < 0)
443 return ret;
444
445 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
446
447 trace_bpf_trace_printk(data.buf);
448
449 bpf_bprintf_cleanup(&data);
450
451 return ret;
452}
453
454static const struct bpf_func_proto bpf_trace_vprintk_proto = {
455 .func = bpf_trace_vprintk,
456 .gpl_only = true,
457 .ret_type = RET_INTEGER,
458 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
459 .arg2_type = ARG_CONST_SIZE,
460 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
461 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
462};
463
464const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
465{
466 __set_printk_clr_event();
467 return &bpf_trace_vprintk_proto;
468}
469
470BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
471 const void *, args, u32, data_len)
472{
473 struct bpf_bprintf_data data = {
474 .get_bin_args = true,
475 };
476 int err, num_args;
477
478 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
479 (data_len && !args))
480 return -EINVAL;
481 num_args = data_len / 8;
482
483 err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
484 if (err < 0)
485 return err;
486
487 seq_bprintf(m, fmt, data.bin_args);
488
489 bpf_bprintf_cleanup(&data);
490
491 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
492}
493
494BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
495
496static const struct bpf_func_proto bpf_seq_printf_proto = {
497 .func = bpf_seq_printf,
498 .gpl_only = true,
499 .ret_type = RET_INTEGER,
500 .arg1_type = ARG_PTR_TO_BTF_ID,
501 .arg1_btf_id = &btf_seq_file_ids[0],
502 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
503 .arg3_type = ARG_CONST_SIZE,
504 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
505 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
506};
507
508BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
509{
510 return seq_write(m, data, len) ? -EOVERFLOW : 0;
511}
512
513static const struct bpf_func_proto bpf_seq_write_proto = {
514 .func = bpf_seq_write,
515 .gpl_only = true,
516 .ret_type = RET_INTEGER,
517 .arg1_type = ARG_PTR_TO_BTF_ID,
518 .arg1_btf_id = &btf_seq_file_ids[0],
519 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
520 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
521};
522
523BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
524 u32, btf_ptr_size, u64, flags)
525{
526 const struct btf *btf;
527 s32 btf_id;
528 int ret;
529
530 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
531 if (ret)
532 return ret;
533
534 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
535}
536
537static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
538 .func = bpf_seq_printf_btf,
539 .gpl_only = true,
540 .ret_type = RET_INTEGER,
541 .arg1_type = ARG_PTR_TO_BTF_ID,
542 .arg1_btf_id = &btf_seq_file_ids[0],
543 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
544 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
545 .arg4_type = ARG_ANYTHING,
546};
547
548static __always_inline int
549get_map_perf_counter(struct bpf_map *map, u64 flags,
550 u64 *value, u64 *enabled, u64 *running)
551{
552 struct bpf_array *array = container_of(map, struct bpf_array, map);
553 unsigned int cpu = smp_processor_id();
554 u64 index = flags & BPF_F_INDEX_MASK;
555 struct bpf_event_entry *ee;
556
557 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
558 return -EINVAL;
559 if (index == BPF_F_CURRENT_CPU)
560 index = cpu;
561 if (unlikely(index >= array->map.max_entries))
562 return -E2BIG;
563
564 ee = READ_ONCE(array->ptrs[index]);
565 if (!ee)
566 return -ENOENT;
567
568 return perf_event_read_local(ee->event, value, enabled, running);
569}
570
571BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
572{
573 u64 value = 0;
574 int err;
575
576 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
577 /*
578 * this api is ugly since we miss [-22..-2] range of valid
579 * counter values, but that's uapi
580 */
581 if (err)
582 return err;
583 return value;
584}
585
586static const struct bpf_func_proto bpf_perf_event_read_proto = {
587 .func = bpf_perf_event_read,
588 .gpl_only = true,
589 .ret_type = RET_INTEGER,
590 .arg1_type = ARG_CONST_MAP_PTR,
591 .arg2_type = ARG_ANYTHING,
592};
593
594BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
595 struct bpf_perf_event_value *, buf, u32, size)
596{
597 int err = -EINVAL;
598
599 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
600 goto clear;
601 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
602 &buf->running);
603 if (unlikely(err))
604 goto clear;
605 return 0;
606clear:
607 memset(buf, 0, size);
608 return err;
609}
610
611static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
612 .func = bpf_perf_event_read_value,
613 .gpl_only = true,
614 .ret_type = RET_INTEGER,
615 .arg1_type = ARG_CONST_MAP_PTR,
616 .arg2_type = ARG_ANYTHING,
617 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
618 .arg4_type = ARG_CONST_SIZE,
619};
620
621static __always_inline u64
622__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
623 u64 flags, struct perf_sample_data *sd)
624{
625 struct bpf_array *array = container_of(map, struct bpf_array, map);
626 unsigned int cpu = smp_processor_id();
627 u64 index = flags & BPF_F_INDEX_MASK;
628 struct bpf_event_entry *ee;
629 struct perf_event *event;
630
631 if (index == BPF_F_CURRENT_CPU)
632 index = cpu;
633 if (unlikely(index >= array->map.max_entries))
634 return -E2BIG;
635
636 ee = READ_ONCE(array->ptrs[index]);
637 if (!ee)
638 return -ENOENT;
639
640 event = ee->event;
641 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
642 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
643 return -EINVAL;
644
645 if (unlikely(event->oncpu != cpu))
646 return -EOPNOTSUPP;
647
648 return perf_event_output(event, sd, regs);
649}
650
651/*
652 * Support executing tracepoints in normal, irq, and nmi context that each call
653 * bpf_perf_event_output
654 */
655struct bpf_trace_sample_data {
656 struct perf_sample_data sds[3];
657};
658
659static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
660static DEFINE_PER_CPU(int, bpf_trace_nest_level);
661BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
662 u64, flags, void *, data, u64, size)
663{
664 struct bpf_trace_sample_data *sds;
665 struct perf_raw_record raw = {
666 .frag = {
667 .size = size,
668 .data = data,
669 },
670 };
671 struct perf_sample_data *sd;
672 int nest_level, err;
673
674 preempt_disable();
675 sds = this_cpu_ptr(&bpf_trace_sds);
676 nest_level = this_cpu_inc_return(bpf_trace_nest_level);
677
678 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
679 err = -EBUSY;
680 goto out;
681 }
682
683 sd = &sds->sds[nest_level - 1];
684
685 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
686 err = -EINVAL;
687 goto out;
688 }
689
690 perf_sample_data_init(sd, 0, 0);
691 perf_sample_save_raw_data(sd, &raw);
692
693 err = __bpf_perf_event_output(regs, map, flags, sd);
694out:
695 this_cpu_dec(bpf_trace_nest_level);
696 preempt_enable();
697 return err;
698}
699
700static const struct bpf_func_proto bpf_perf_event_output_proto = {
701 .func = bpf_perf_event_output,
702 .gpl_only = true,
703 .ret_type = RET_INTEGER,
704 .arg1_type = ARG_PTR_TO_CTX,
705 .arg2_type = ARG_CONST_MAP_PTR,
706 .arg3_type = ARG_ANYTHING,
707 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
708 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
709};
710
711static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
712struct bpf_nested_pt_regs {
713 struct pt_regs regs[3];
714};
715static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
716static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
717
718u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
719 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
720{
721 struct perf_raw_frag frag = {
722 .copy = ctx_copy,
723 .size = ctx_size,
724 .data = ctx,
725 };
726 struct perf_raw_record raw = {
727 .frag = {
728 {
729 .next = ctx_size ? &frag : NULL,
730 },
731 .size = meta_size,
732 .data = meta,
733 },
734 };
735 struct perf_sample_data *sd;
736 struct pt_regs *regs;
737 int nest_level;
738 u64 ret;
739
740 preempt_disable();
741 nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
742
743 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
744 ret = -EBUSY;
745 goto out;
746 }
747 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
748 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
749
750 perf_fetch_caller_regs(regs);
751 perf_sample_data_init(sd, 0, 0);
752 perf_sample_save_raw_data(sd, &raw);
753
754 ret = __bpf_perf_event_output(regs, map, flags, sd);
755out:
756 this_cpu_dec(bpf_event_output_nest_level);
757 preempt_enable();
758 return ret;
759}
760
761BPF_CALL_0(bpf_get_current_task)
762{
763 return (long) current;
764}
765
766const struct bpf_func_proto bpf_get_current_task_proto = {
767 .func = bpf_get_current_task,
768 .gpl_only = true,
769 .ret_type = RET_INTEGER,
770};
771
772BPF_CALL_0(bpf_get_current_task_btf)
773{
774 return (unsigned long) current;
775}
776
777const struct bpf_func_proto bpf_get_current_task_btf_proto = {
778 .func = bpf_get_current_task_btf,
779 .gpl_only = true,
780 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED,
781 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
782};
783
784BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
785{
786 return (unsigned long) task_pt_regs(task);
787}
788
789BTF_ID_LIST(bpf_task_pt_regs_ids)
790BTF_ID(struct, pt_regs)
791
792const struct bpf_func_proto bpf_task_pt_regs_proto = {
793 .func = bpf_task_pt_regs,
794 .gpl_only = true,
795 .arg1_type = ARG_PTR_TO_BTF_ID,
796 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
797 .ret_type = RET_PTR_TO_BTF_ID,
798 .ret_btf_id = &bpf_task_pt_regs_ids[0],
799};
800
801BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
802{
803 struct bpf_array *array = container_of(map, struct bpf_array, map);
804 struct cgroup *cgrp;
805
806 if (unlikely(idx >= array->map.max_entries))
807 return -E2BIG;
808
809 cgrp = READ_ONCE(array->ptrs[idx]);
810 if (unlikely(!cgrp))
811 return -EAGAIN;
812
813 return task_under_cgroup_hierarchy(current, cgrp);
814}
815
816static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
817 .func = bpf_current_task_under_cgroup,
818 .gpl_only = false,
819 .ret_type = RET_INTEGER,
820 .arg1_type = ARG_CONST_MAP_PTR,
821 .arg2_type = ARG_ANYTHING,
822};
823
824struct send_signal_irq_work {
825 struct irq_work irq_work;
826 struct task_struct *task;
827 u32 sig;
828 enum pid_type type;
829};
830
831static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
832
833static void do_bpf_send_signal(struct irq_work *entry)
834{
835 struct send_signal_irq_work *work;
836
837 work = container_of(entry, struct send_signal_irq_work, irq_work);
838 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
839 put_task_struct(work->task);
840}
841
842static int bpf_send_signal_common(u32 sig, enum pid_type type)
843{
844 struct send_signal_irq_work *work = NULL;
845
846 /* Similar to bpf_probe_write_user, task needs to be
847 * in a sound condition and kernel memory access be
848 * permitted in order to send signal to the current
849 * task.
850 */
851 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
852 return -EPERM;
853 if (unlikely(!nmi_uaccess_okay()))
854 return -EPERM;
855 /* Task should not be pid=1 to avoid kernel panic. */
856 if (unlikely(is_global_init(current)))
857 return -EPERM;
858
859 if (irqs_disabled()) {
860 /* Do an early check on signal validity. Otherwise,
861 * the error is lost in deferred irq_work.
862 */
863 if (unlikely(!valid_signal(sig)))
864 return -EINVAL;
865
866 work = this_cpu_ptr(&send_signal_work);
867 if (irq_work_is_busy(&work->irq_work))
868 return -EBUSY;
869
870 /* Add the current task, which is the target of sending signal,
871 * to the irq_work. The current task may change when queued
872 * irq works get executed.
873 */
874 work->task = get_task_struct(current);
875 work->sig = sig;
876 work->type = type;
877 irq_work_queue(&work->irq_work);
878 return 0;
879 }
880
881 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
882}
883
884BPF_CALL_1(bpf_send_signal, u32, sig)
885{
886 return bpf_send_signal_common(sig, PIDTYPE_TGID);
887}
888
889static const struct bpf_func_proto bpf_send_signal_proto = {
890 .func = bpf_send_signal,
891 .gpl_only = false,
892 .ret_type = RET_INTEGER,
893 .arg1_type = ARG_ANYTHING,
894};
895
896BPF_CALL_1(bpf_send_signal_thread, u32, sig)
897{
898 return bpf_send_signal_common(sig, PIDTYPE_PID);
899}
900
901static const struct bpf_func_proto bpf_send_signal_thread_proto = {
902 .func = bpf_send_signal_thread,
903 .gpl_only = false,
904 .ret_type = RET_INTEGER,
905 .arg1_type = ARG_ANYTHING,
906};
907
908BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
909{
910 struct path copy;
911 long len;
912 char *p;
913
914 if (!sz)
915 return 0;
916
917 /*
918 * The path pointer is verified as trusted and safe to use,
919 * but let's double check it's valid anyway to workaround
920 * potentially broken verifier.
921 */
922 len = copy_from_kernel_nofault(©, path, sizeof(*path));
923 if (len < 0)
924 return len;
925
926 p = d_path(©, buf, sz);
927 if (IS_ERR(p)) {
928 len = PTR_ERR(p);
929 } else {
930 len = buf + sz - p;
931 memmove(buf, p, len);
932 }
933
934 return len;
935}
936
937BTF_SET_START(btf_allowlist_d_path)
938#ifdef CONFIG_SECURITY
939BTF_ID(func, security_file_permission)
940BTF_ID(func, security_inode_getattr)
941BTF_ID(func, security_file_open)
942#endif
943#ifdef CONFIG_SECURITY_PATH
944BTF_ID(func, security_path_truncate)
945#endif
946BTF_ID(func, vfs_truncate)
947BTF_ID(func, vfs_fallocate)
948BTF_ID(func, dentry_open)
949BTF_ID(func, vfs_getattr)
950BTF_ID(func, filp_close)
951BTF_SET_END(btf_allowlist_d_path)
952
953static bool bpf_d_path_allowed(const struct bpf_prog *prog)
954{
955 if (prog->type == BPF_PROG_TYPE_TRACING &&
956 prog->expected_attach_type == BPF_TRACE_ITER)
957 return true;
958
959 if (prog->type == BPF_PROG_TYPE_LSM)
960 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
961
962 return btf_id_set_contains(&btf_allowlist_d_path,
963 prog->aux->attach_btf_id);
964}
965
966BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
967
968static const struct bpf_func_proto bpf_d_path_proto = {
969 .func = bpf_d_path,
970 .gpl_only = false,
971 .ret_type = RET_INTEGER,
972 .arg1_type = ARG_PTR_TO_BTF_ID,
973 .arg1_btf_id = &bpf_d_path_btf_ids[0],
974 .arg2_type = ARG_PTR_TO_MEM,
975 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
976 .allowed = bpf_d_path_allowed,
977};
978
979#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
980 BTF_F_PTR_RAW | BTF_F_ZERO)
981
982static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
983 u64 flags, const struct btf **btf,
984 s32 *btf_id)
985{
986 const struct btf_type *t;
987
988 if (unlikely(flags & ~(BTF_F_ALL)))
989 return -EINVAL;
990
991 if (btf_ptr_size != sizeof(struct btf_ptr))
992 return -EINVAL;
993
994 *btf = bpf_get_btf_vmlinux();
995
996 if (IS_ERR_OR_NULL(*btf))
997 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
998
999 if (ptr->type_id > 0)
1000 *btf_id = ptr->type_id;
1001 else
1002 return -EINVAL;
1003
1004 if (*btf_id > 0)
1005 t = btf_type_by_id(*btf, *btf_id);
1006 if (*btf_id <= 0 || !t)
1007 return -ENOENT;
1008
1009 return 0;
1010}
1011
1012BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1013 u32, btf_ptr_size, u64, flags)
1014{
1015 const struct btf *btf;
1016 s32 btf_id;
1017 int ret;
1018
1019 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1020 if (ret)
1021 return ret;
1022
1023 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1024 flags);
1025}
1026
1027const struct bpf_func_proto bpf_snprintf_btf_proto = {
1028 .func = bpf_snprintf_btf,
1029 .gpl_only = false,
1030 .ret_type = RET_INTEGER,
1031 .arg1_type = ARG_PTR_TO_MEM,
1032 .arg2_type = ARG_CONST_SIZE,
1033 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1034 .arg4_type = ARG_CONST_SIZE,
1035 .arg5_type = ARG_ANYTHING,
1036};
1037
1038BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1039{
1040 /* This helper call is inlined by verifier. */
1041 return ((u64 *)ctx)[-2];
1042}
1043
1044static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1045 .func = bpf_get_func_ip_tracing,
1046 .gpl_only = true,
1047 .ret_type = RET_INTEGER,
1048 .arg1_type = ARG_PTR_TO_CTX,
1049};
1050
1051#ifdef CONFIG_X86_KERNEL_IBT
1052static unsigned long get_entry_ip(unsigned long fentry_ip)
1053{
1054 u32 instr;
1055
1056 /* Being extra safe in here in case entry ip is on the page-edge. */
1057 if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1058 return fentry_ip;
1059 if (is_endbr(instr))
1060 fentry_ip -= ENDBR_INSN_SIZE;
1061 return fentry_ip;
1062}
1063#else
1064#define get_entry_ip(fentry_ip) fentry_ip
1065#endif
1066
1067BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1068{
1069 struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1070 struct kprobe *kp;
1071
1072#ifdef CONFIG_UPROBES
1073 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1074 if (run_ctx->is_uprobe)
1075 return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1076#endif
1077
1078 kp = kprobe_running();
1079
1080 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1081 return 0;
1082
1083 return get_entry_ip((uintptr_t)kp->addr);
1084}
1085
1086static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1087 .func = bpf_get_func_ip_kprobe,
1088 .gpl_only = true,
1089 .ret_type = RET_INTEGER,
1090 .arg1_type = ARG_PTR_TO_CTX,
1091};
1092
1093BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1094{
1095 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1096}
1097
1098static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1099 .func = bpf_get_func_ip_kprobe_multi,
1100 .gpl_only = false,
1101 .ret_type = RET_INTEGER,
1102 .arg1_type = ARG_PTR_TO_CTX,
1103};
1104
1105BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1106{
1107 return bpf_kprobe_multi_cookie(current->bpf_ctx);
1108}
1109
1110static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1111 .func = bpf_get_attach_cookie_kprobe_multi,
1112 .gpl_only = false,
1113 .ret_type = RET_INTEGER,
1114 .arg1_type = ARG_PTR_TO_CTX,
1115};
1116
1117BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1118{
1119 return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1120}
1121
1122static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1123 .func = bpf_get_func_ip_uprobe_multi,
1124 .gpl_only = false,
1125 .ret_type = RET_INTEGER,
1126 .arg1_type = ARG_PTR_TO_CTX,
1127};
1128
1129BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1130{
1131 return bpf_uprobe_multi_cookie(current->bpf_ctx);
1132}
1133
1134static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1135 .func = bpf_get_attach_cookie_uprobe_multi,
1136 .gpl_only = false,
1137 .ret_type = RET_INTEGER,
1138 .arg1_type = ARG_PTR_TO_CTX,
1139};
1140
1141BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1142{
1143 struct bpf_trace_run_ctx *run_ctx;
1144
1145 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1146 return run_ctx->bpf_cookie;
1147}
1148
1149static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1150 .func = bpf_get_attach_cookie_trace,
1151 .gpl_only = false,
1152 .ret_type = RET_INTEGER,
1153 .arg1_type = ARG_PTR_TO_CTX,
1154};
1155
1156BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1157{
1158 return ctx->event->bpf_cookie;
1159}
1160
1161static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1162 .func = bpf_get_attach_cookie_pe,
1163 .gpl_only = false,
1164 .ret_type = RET_INTEGER,
1165 .arg1_type = ARG_PTR_TO_CTX,
1166};
1167
1168BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1169{
1170 struct bpf_trace_run_ctx *run_ctx;
1171
1172 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1173 return run_ctx->bpf_cookie;
1174}
1175
1176static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1177 .func = bpf_get_attach_cookie_tracing,
1178 .gpl_only = false,
1179 .ret_type = RET_INTEGER,
1180 .arg1_type = ARG_PTR_TO_CTX,
1181};
1182
1183BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1184{
1185#ifndef CONFIG_X86
1186 return -ENOENT;
1187#else
1188 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1189 u32 entry_cnt = size / br_entry_size;
1190
1191 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1192
1193 if (unlikely(flags))
1194 return -EINVAL;
1195
1196 if (!entry_cnt)
1197 return -ENOENT;
1198
1199 return entry_cnt * br_entry_size;
1200#endif
1201}
1202
1203static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1204 .func = bpf_get_branch_snapshot,
1205 .gpl_only = true,
1206 .ret_type = RET_INTEGER,
1207 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1208 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1209};
1210
1211BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1212{
1213 /* This helper call is inlined by verifier. */
1214 u64 nr_args = ((u64 *)ctx)[-1];
1215
1216 if ((u64) n >= nr_args)
1217 return -EINVAL;
1218 *value = ((u64 *)ctx)[n];
1219 return 0;
1220}
1221
1222static const struct bpf_func_proto bpf_get_func_arg_proto = {
1223 .func = get_func_arg,
1224 .ret_type = RET_INTEGER,
1225 .arg1_type = ARG_PTR_TO_CTX,
1226 .arg2_type = ARG_ANYTHING,
1227 .arg3_type = ARG_PTR_TO_LONG,
1228};
1229
1230BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1231{
1232 /* This helper call is inlined by verifier. */
1233 u64 nr_args = ((u64 *)ctx)[-1];
1234
1235 *value = ((u64 *)ctx)[nr_args];
1236 return 0;
1237}
1238
1239static const struct bpf_func_proto bpf_get_func_ret_proto = {
1240 .func = get_func_ret,
1241 .ret_type = RET_INTEGER,
1242 .arg1_type = ARG_PTR_TO_CTX,
1243 .arg2_type = ARG_PTR_TO_LONG,
1244};
1245
1246BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1247{
1248 /* This helper call is inlined by verifier. */
1249 return ((u64 *)ctx)[-1];
1250}
1251
1252static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1253 .func = get_func_arg_cnt,
1254 .ret_type = RET_INTEGER,
1255 .arg1_type = ARG_PTR_TO_CTX,
1256};
1257
1258#ifdef CONFIG_KEYS
1259__bpf_kfunc_start_defs();
1260
1261/**
1262 * bpf_lookup_user_key - lookup a key by its serial
1263 * @serial: key handle serial number
1264 * @flags: lookup-specific flags
1265 *
1266 * Search a key with a given *serial* and the provided *flags*.
1267 * If found, increment the reference count of the key by one, and
1268 * return it in the bpf_key structure.
1269 *
1270 * The bpf_key structure must be passed to bpf_key_put() when done
1271 * with it, so that the key reference count is decremented and the
1272 * bpf_key structure is freed.
1273 *
1274 * Permission checks are deferred to the time the key is used by
1275 * one of the available key-specific kfuncs.
1276 *
1277 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1278 * special keyring (e.g. session keyring), if it doesn't yet exist.
1279 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1280 * for the key construction, and to retrieve uninstantiated keys (keys
1281 * without data attached to them).
1282 *
1283 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1284 * NULL pointer otherwise.
1285 */
1286__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1287{
1288 key_ref_t key_ref;
1289 struct bpf_key *bkey;
1290
1291 if (flags & ~KEY_LOOKUP_ALL)
1292 return NULL;
1293
1294 /*
1295 * Permission check is deferred until the key is used, as the
1296 * intent of the caller is unknown here.
1297 */
1298 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1299 if (IS_ERR(key_ref))
1300 return NULL;
1301
1302 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1303 if (!bkey) {
1304 key_put(key_ref_to_ptr(key_ref));
1305 return NULL;
1306 }
1307
1308 bkey->key = key_ref_to_ptr(key_ref);
1309 bkey->has_ref = true;
1310
1311 return bkey;
1312}
1313
1314/**
1315 * bpf_lookup_system_key - lookup a key by a system-defined ID
1316 * @id: key ID
1317 *
1318 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1319 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1320 * attempting to decrement the key reference count on that pointer. The key
1321 * pointer set in such way is currently understood only by
1322 * verify_pkcs7_signature().
1323 *
1324 * Set *id* to one of the values defined in include/linux/verification.h:
1325 * 0 for the primary keyring (immutable keyring of system keys);
1326 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1327 * (where keys can be added only if they are vouched for by existing keys
1328 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1329 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1330 * kerned image and, possibly, the initramfs signature).
1331 *
1332 * Return: a bpf_key pointer with an invalid key pointer set from the
1333 * pre-determined ID on success, a NULL pointer otherwise
1334 */
1335__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1336{
1337 struct bpf_key *bkey;
1338
1339 if (system_keyring_id_check(id) < 0)
1340 return NULL;
1341
1342 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1343 if (!bkey)
1344 return NULL;
1345
1346 bkey->key = (struct key *)(unsigned long)id;
1347 bkey->has_ref = false;
1348
1349 return bkey;
1350}
1351
1352/**
1353 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1354 * @bkey: bpf_key structure
1355 *
1356 * Decrement the reference count of the key inside *bkey*, if the pointer
1357 * is valid, and free *bkey*.
1358 */
1359__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1360{
1361 if (bkey->has_ref)
1362 key_put(bkey->key);
1363
1364 kfree(bkey);
1365}
1366
1367#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1368/**
1369 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1370 * @data_ptr: data to verify
1371 * @sig_ptr: signature of the data
1372 * @trusted_keyring: keyring with keys trusted for signature verification
1373 *
1374 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1375 * with keys in a keyring referenced by *trusted_keyring*.
1376 *
1377 * Return: 0 on success, a negative value on error.
1378 */
1379__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1380 struct bpf_dynptr_kern *sig_ptr,
1381 struct bpf_key *trusted_keyring)
1382{
1383 const void *data, *sig;
1384 u32 data_len, sig_len;
1385 int ret;
1386
1387 if (trusted_keyring->has_ref) {
1388 /*
1389 * Do the permission check deferred in bpf_lookup_user_key().
1390 * See bpf_lookup_user_key() for more details.
1391 *
1392 * A call to key_task_permission() here would be redundant, as
1393 * it is already done by keyring_search() called by
1394 * find_asymmetric_key().
1395 */
1396 ret = key_validate(trusted_keyring->key);
1397 if (ret < 0)
1398 return ret;
1399 }
1400
1401 data_len = __bpf_dynptr_size(data_ptr);
1402 data = __bpf_dynptr_data(data_ptr, data_len);
1403 sig_len = __bpf_dynptr_size(sig_ptr);
1404 sig = __bpf_dynptr_data(sig_ptr, sig_len);
1405
1406 return verify_pkcs7_signature(data, data_len, sig, sig_len,
1407 trusted_keyring->key,
1408 VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1409 NULL);
1410}
1411#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1412
1413__bpf_kfunc_end_defs();
1414
1415BTF_KFUNCS_START(key_sig_kfunc_set)
1416BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1417BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1418BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1419#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1420BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1421#endif
1422BTF_KFUNCS_END(key_sig_kfunc_set)
1423
1424static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1425 .owner = THIS_MODULE,
1426 .set = &key_sig_kfunc_set,
1427};
1428
1429static int __init bpf_key_sig_kfuncs_init(void)
1430{
1431 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1432 &bpf_key_sig_kfunc_set);
1433}
1434
1435late_initcall(bpf_key_sig_kfuncs_init);
1436#endif /* CONFIG_KEYS */
1437
1438/* filesystem kfuncs */
1439__bpf_kfunc_start_defs();
1440
1441/**
1442 * bpf_get_file_xattr - get xattr of a file
1443 * @file: file to get xattr from
1444 * @name__str: name of the xattr
1445 * @value_ptr: output buffer of the xattr value
1446 *
1447 * Get xattr *name__str* of *file* and store the output in *value_ptr*.
1448 *
1449 * For security reasons, only *name__str* with prefix "user." is allowed.
1450 *
1451 * Return: 0 on success, a negative value on error.
1452 */
1453__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
1454 struct bpf_dynptr_kern *value_ptr)
1455{
1456 struct dentry *dentry;
1457 u32 value_len;
1458 void *value;
1459 int ret;
1460
1461 if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
1462 return -EPERM;
1463
1464 value_len = __bpf_dynptr_size(value_ptr);
1465 value = __bpf_dynptr_data_rw(value_ptr, value_len);
1466 if (!value)
1467 return -EINVAL;
1468
1469 dentry = file_dentry(file);
1470 ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ);
1471 if (ret)
1472 return ret;
1473 return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len);
1474}
1475
1476__bpf_kfunc_end_defs();
1477
1478BTF_KFUNCS_START(fs_kfunc_set_ids)
1479BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
1480BTF_KFUNCS_END(fs_kfunc_set_ids)
1481
1482static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
1483{
1484 if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id))
1485 return 0;
1486
1487 /* Only allow to attach from LSM hooks, to avoid recursion */
1488 return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0;
1489}
1490
1491static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
1492 .owner = THIS_MODULE,
1493 .set = &fs_kfunc_set_ids,
1494 .filter = bpf_get_file_xattr_filter,
1495};
1496
1497static int __init bpf_fs_kfuncs_init(void)
1498{
1499 return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
1500}
1501
1502late_initcall(bpf_fs_kfuncs_init);
1503
1504static const struct bpf_func_proto *
1505bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1506{
1507 switch (func_id) {
1508 case BPF_FUNC_map_lookup_elem:
1509 return &bpf_map_lookup_elem_proto;
1510 case BPF_FUNC_map_update_elem:
1511 return &bpf_map_update_elem_proto;
1512 case BPF_FUNC_map_delete_elem:
1513 return &bpf_map_delete_elem_proto;
1514 case BPF_FUNC_map_push_elem:
1515 return &bpf_map_push_elem_proto;
1516 case BPF_FUNC_map_pop_elem:
1517 return &bpf_map_pop_elem_proto;
1518 case BPF_FUNC_map_peek_elem:
1519 return &bpf_map_peek_elem_proto;
1520 case BPF_FUNC_map_lookup_percpu_elem:
1521 return &bpf_map_lookup_percpu_elem_proto;
1522 case BPF_FUNC_ktime_get_ns:
1523 return &bpf_ktime_get_ns_proto;
1524 case BPF_FUNC_ktime_get_boot_ns:
1525 return &bpf_ktime_get_boot_ns_proto;
1526 case BPF_FUNC_tail_call:
1527 return &bpf_tail_call_proto;
1528 case BPF_FUNC_get_current_pid_tgid:
1529 return &bpf_get_current_pid_tgid_proto;
1530 case BPF_FUNC_get_current_task:
1531 return &bpf_get_current_task_proto;
1532 case BPF_FUNC_get_current_task_btf:
1533 return &bpf_get_current_task_btf_proto;
1534 case BPF_FUNC_task_pt_regs:
1535 return &bpf_task_pt_regs_proto;
1536 case BPF_FUNC_get_current_uid_gid:
1537 return &bpf_get_current_uid_gid_proto;
1538 case BPF_FUNC_get_current_comm:
1539 return &bpf_get_current_comm_proto;
1540 case BPF_FUNC_trace_printk:
1541 return bpf_get_trace_printk_proto();
1542 case BPF_FUNC_get_smp_processor_id:
1543 return &bpf_get_smp_processor_id_proto;
1544 case BPF_FUNC_get_numa_node_id:
1545 return &bpf_get_numa_node_id_proto;
1546 case BPF_FUNC_perf_event_read:
1547 return &bpf_perf_event_read_proto;
1548 case BPF_FUNC_current_task_under_cgroup:
1549 return &bpf_current_task_under_cgroup_proto;
1550 case BPF_FUNC_get_prandom_u32:
1551 return &bpf_get_prandom_u32_proto;
1552 case BPF_FUNC_probe_write_user:
1553 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1554 NULL : bpf_get_probe_write_proto();
1555 case BPF_FUNC_probe_read_user:
1556 return &bpf_probe_read_user_proto;
1557 case BPF_FUNC_probe_read_kernel:
1558 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1559 NULL : &bpf_probe_read_kernel_proto;
1560 case BPF_FUNC_probe_read_user_str:
1561 return &bpf_probe_read_user_str_proto;
1562 case BPF_FUNC_probe_read_kernel_str:
1563 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1564 NULL : &bpf_probe_read_kernel_str_proto;
1565#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1566 case BPF_FUNC_probe_read:
1567 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1568 NULL : &bpf_probe_read_compat_proto;
1569 case BPF_FUNC_probe_read_str:
1570 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1571 NULL : &bpf_probe_read_compat_str_proto;
1572#endif
1573#ifdef CONFIG_CGROUPS
1574 case BPF_FUNC_cgrp_storage_get:
1575 return &bpf_cgrp_storage_get_proto;
1576 case BPF_FUNC_cgrp_storage_delete:
1577 return &bpf_cgrp_storage_delete_proto;
1578#endif
1579 case BPF_FUNC_send_signal:
1580 return &bpf_send_signal_proto;
1581 case BPF_FUNC_send_signal_thread:
1582 return &bpf_send_signal_thread_proto;
1583 case BPF_FUNC_perf_event_read_value:
1584 return &bpf_perf_event_read_value_proto;
1585 case BPF_FUNC_get_ns_current_pid_tgid:
1586 return &bpf_get_ns_current_pid_tgid_proto;
1587 case BPF_FUNC_ringbuf_output:
1588 return &bpf_ringbuf_output_proto;
1589 case BPF_FUNC_ringbuf_reserve:
1590 return &bpf_ringbuf_reserve_proto;
1591 case BPF_FUNC_ringbuf_submit:
1592 return &bpf_ringbuf_submit_proto;
1593 case BPF_FUNC_ringbuf_discard:
1594 return &bpf_ringbuf_discard_proto;
1595 case BPF_FUNC_ringbuf_query:
1596 return &bpf_ringbuf_query_proto;
1597 case BPF_FUNC_jiffies64:
1598 return &bpf_jiffies64_proto;
1599 case BPF_FUNC_get_task_stack:
1600 return &bpf_get_task_stack_proto;
1601 case BPF_FUNC_copy_from_user:
1602 return &bpf_copy_from_user_proto;
1603 case BPF_FUNC_copy_from_user_task:
1604 return &bpf_copy_from_user_task_proto;
1605 case BPF_FUNC_snprintf_btf:
1606 return &bpf_snprintf_btf_proto;
1607 case BPF_FUNC_per_cpu_ptr:
1608 return &bpf_per_cpu_ptr_proto;
1609 case BPF_FUNC_this_cpu_ptr:
1610 return &bpf_this_cpu_ptr_proto;
1611 case BPF_FUNC_task_storage_get:
1612 if (bpf_prog_check_recur(prog))
1613 return &bpf_task_storage_get_recur_proto;
1614 return &bpf_task_storage_get_proto;
1615 case BPF_FUNC_task_storage_delete:
1616 if (bpf_prog_check_recur(prog))
1617 return &bpf_task_storage_delete_recur_proto;
1618 return &bpf_task_storage_delete_proto;
1619 case BPF_FUNC_for_each_map_elem:
1620 return &bpf_for_each_map_elem_proto;
1621 case BPF_FUNC_snprintf:
1622 return &bpf_snprintf_proto;
1623 case BPF_FUNC_get_func_ip:
1624 return &bpf_get_func_ip_proto_tracing;
1625 case BPF_FUNC_get_branch_snapshot:
1626 return &bpf_get_branch_snapshot_proto;
1627 case BPF_FUNC_find_vma:
1628 return &bpf_find_vma_proto;
1629 case BPF_FUNC_trace_vprintk:
1630 return bpf_get_trace_vprintk_proto();
1631 default:
1632 return bpf_base_func_proto(func_id, prog);
1633 }
1634}
1635
1636static const struct bpf_func_proto *
1637kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1638{
1639 switch (func_id) {
1640 case BPF_FUNC_perf_event_output:
1641 return &bpf_perf_event_output_proto;
1642 case BPF_FUNC_get_stackid:
1643 return &bpf_get_stackid_proto;
1644 case BPF_FUNC_get_stack:
1645 return &bpf_get_stack_proto;
1646#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1647 case BPF_FUNC_override_return:
1648 return &bpf_override_return_proto;
1649#endif
1650 case BPF_FUNC_get_func_ip:
1651 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
1652 return &bpf_get_func_ip_proto_kprobe_multi;
1653 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1654 return &bpf_get_func_ip_proto_uprobe_multi;
1655 return &bpf_get_func_ip_proto_kprobe;
1656 case BPF_FUNC_get_attach_cookie:
1657 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
1658 return &bpf_get_attach_cookie_proto_kmulti;
1659 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1660 return &bpf_get_attach_cookie_proto_umulti;
1661 return &bpf_get_attach_cookie_proto_trace;
1662 default:
1663 return bpf_tracing_func_proto(func_id, prog);
1664 }
1665}
1666
1667/* bpf+kprobe programs can access fields of 'struct pt_regs' */
1668static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1669 const struct bpf_prog *prog,
1670 struct bpf_insn_access_aux *info)
1671{
1672 if (off < 0 || off >= sizeof(struct pt_regs))
1673 return false;
1674 if (type != BPF_READ)
1675 return false;
1676 if (off % size != 0)
1677 return false;
1678 /*
1679 * Assertion for 32 bit to make sure last 8 byte access
1680 * (BPF_DW) to the last 4 byte member is disallowed.
1681 */
1682 if (off + size > sizeof(struct pt_regs))
1683 return false;
1684
1685 return true;
1686}
1687
1688const struct bpf_verifier_ops kprobe_verifier_ops = {
1689 .get_func_proto = kprobe_prog_func_proto,
1690 .is_valid_access = kprobe_prog_is_valid_access,
1691};
1692
1693const struct bpf_prog_ops kprobe_prog_ops = {
1694};
1695
1696BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1697 u64, flags, void *, data, u64, size)
1698{
1699 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1700
1701 /*
1702 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1703 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1704 * from there and call the same bpf_perf_event_output() helper inline.
1705 */
1706 return ____bpf_perf_event_output(regs, map, flags, data, size);
1707}
1708
1709static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1710 .func = bpf_perf_event_output_tp,
1711 .gpl_only = true,
1712 .ret_type = RET_INTEGER,
1713 .arg1_type = ARG_PTR_TO_CTX,
1714 .arg2_type = ARG_CONST_MAP_PTR,
1715 .arg3_type = ARG_ANYTHING,
1716 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1717 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1718};
1719
1720BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1721 u64, flags)
1722{
1723 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1724
1725 /*
1726 * Same comment as in bpf_perf_event_output_tp(), only that this time
1727 * the other helper's function body cannot be inlined due to being
1728 * external, thus we need to call raw helper function.
1729 */
1730 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1731 flags, 0, 0);
1732}
1733
1734static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1735 .func = bpf_get_stackid_tp,
1736 .gpl_only = true,
1737 .ret_type = RET_INTEGER,
1738 .arg1_type = ARG_PTR_TO_CTX,
1739 .arg2_type = ARG_CONST_MAP_PTR,
1740 .arg3_type = ARG_ANYTHING,
1741};
1742
1743BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1744 u64, flags)
1745{
1746 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1747
1748 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1749 (unsigned long) size, flags, 0);
1750}
1751
1752static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1753 .func = bpf_get_stack_tp,
1754 .gpl_only = true,
1755 .ret_type = RET_INTEGER,
1756 .arg1_type = ARG_PTR_TO_CTX,
1757 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1758 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1759 .arg4_type = ARG_ANYTHING,
1760};
1761
1762static const struct bpf_func_proto *
1763tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1764{
1765 switch (func_id) {
1766 case BPF_FUNC_perf_event_output:
1767 return &bpf_perf_event_output_proto_tp;
1768 case BPF_FUNC_get_stackid:
1769 return &bpf_get_stackid_proto_tp;
1770 case BPF_FUNC_get_stack:
1771 return &bpf_get_stack_proto_tp;
1772 case BPF_FUNC_get_attach_cookie:
1773 return &bpf_get_attach_cookie_proto_trace;
1774 default:
1775 return bpf_tracing_func_proto(func_id, prog);
1776 }
1777}
1778
1779static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1780 const struct bpf_prog *prog,
1781 struct bpf_insn_access_aux *info)
1782{
1783 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1784 return false;
1785 if (type != BPF_READ)
1786 return false;
1787 if (off % size != 0)
1788 return false;
1789
1790 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1791 return true;
1792}
1793
1794const struct bpf_verifier_ops tracepoint_verifier_ops = {
1795 .get_func_proto = tp_prog_func_proto,
1796 .is_valid_access = tp_prog_is_valid_access,
1797};
1798
1799const struct bpf_prog_ops tracepoint_prog_ops = {
1800};
1801
1802BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1803 struct bpf_perf_event_value *, buf, u32, size)
1804{
1805 int err = -EINVAL;
1806
1807 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1808 goto clear;
1809 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1810 &buf->running);
1811 if (unlikely(err))
1812 goto clear;
1813 return 0;
1814clear:
1815 memset(buf, 0, size);
1816 return err;
1817}
1818
1819static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1820 .func = bpf_perf_prog_read_value,
1821 .gpl_only = true,
1822 .ret_type = RET_INTEGER,
1823 .arg1_type = ARG_PTR_TO_CTX,
1824 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1825 .arg3_type = ARG_CONST_SIZE,
1826};
1827
1828BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1829 void *, buf, u32, size, u64, flags)
1830{
1831 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1832 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1833 u32 to_copy;
1834
1835 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1836 return -EINVAL;
1837
1838 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1839 return -ENOENT;
1840
1841 if (unlikely(!br_stack))
1842 return -ENOENT;
1843
1844 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1845 return br_stack->nr * br_entry_size;
1846
1847 if (!buf || (size % br_entry_size != 0))
1848 return -EINVAL;
1849
1850 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1851 memcpy(buf, br_stack->entries, to_copy);
1852
1853 return to_copy;
1854}
1855
1856static const struct bpf_func_proto bpf_read_branch_records_proto = {
1857 .func = bpf_read_branch_records,
1858 .gpl_only = true,
1859 .ret_type = RET_INTEGER,
1860 .arg1_type = ARG_PTR_TO_CTX,
1861 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1862 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1863 .arg4_type = ARG_ANYTHING,
1864};
1865
1866static const struct bpf_func_proto *
1867pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1868{
1869 switch (func_id) {
1870 case BPF_FUNC_perf_event_output:
1871 return &bpf_perf_event_output_proto_tp;
1872 case BPF_FUNC_get_stackid:
1873 return &bpf_get_stackid_proto_pe;
1874 case BPF_FUNC_get_stack:
1875 return &bpf_get_stack_proto_pe;
1876 case BPF_FUNC_perf_prog_read_value:
1877 return &bpf_perf_prog_read_value_proto;
1878 case BPF_FUNC_read_branch_records:
1879 return &bpf_read_branch_records_proto;
1880 case BPF_FUNC_get_attach_cookie:
1881 return &bpf_get_attach_cookie_proto_pe;
1882 default:
1883 return bpf_tracing_func_proto(func_id, prog);
1884 }
1885}
1886
1887/*
1888 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1889 * to avoid potential recursive reuse issue when/if tracepoints are added
1890 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1891 *
1892 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1893 * in normal, irq, and nmi context.
1894 */
1895struct bpf_raw_tp_regs {
1896 struct pt_regs regs[3];
1897};
1898static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1899static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1900static struct pt_regs *get_bpf_raw_tp_regs(void)
1901{
1902 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1903 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1904
1905 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1906 this_cpu_dec(bpf_raw_tp_nest_level);
1907 return ERR_PTR(-EBUSY);
1908 }
1909
1910 return &tp_regs->regs[nest_level - 1];
1911}
1912
1913static void put_bpf_raw_tp_regs(void)
1914{
1915 this_cpu_dec(bpf_raw_tp_nest_level);
1916}
1917
1918BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1919 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1920{
1921 struct pt_regs *regs = get_bpf_raw_tp_regs();
1922 int ret;
1923
1924 if (IS_ERR(regs))
1925 return PTR_ERR(regs);
1926
1927 perf_fetch_caller_regs(regs);
1928 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1929
1930 put_bpf_raw_tp_regs();
1931 return ret;
1932}
1933
1934static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1935 .func = bpf_perf_event_output_raw_tp,
1936 .gpl_only = true,
1937 .ret_type = RET_INTEGER,
1938 .arg1_type = ARG_PTR_TO_CTX,
1939 .arg2_type = ARG_CONST_MAP_PTR,
1940 .arg3_type = ARG_ANYTHING,
1941 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1942 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1943};
1944
1945extern const struct bpf_func_proto bpf_skb_output_proto;
1946extern const struct bpf_func_proto bpf_xdp_output_proto;
1947extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1948
1949BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1950 struct bpf_map *, map, u64, flags)
1951{
1952 struct pt_regs *regs = get_bpf_raw_tp_regs();
1953 int ret;
1954
1955 if (IS_ERR(regs))
1956 return PTR_ERR(regs);
1957
1958 perf_fetch_caller_regs(regs);
1959 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1960 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1961 flags, 0, 0);
1962 put_bpf_raw_tp_regs();
1963 return ret;
1964}
1965
1966static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1967 .func = bpf_get_stackid_raw_tp,
1968 .gpl_only = true,
1969 .ret_type = RET_INTEGER,
1970 .arg1_type = ARG_PTR_TO_CTX,
1971 .arg2_type = ARG_CONST_MAP_PTR,
1972 .arg3_type = ARG_ANYTHING,
1973};
1974
1975BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1976 void *, buf, u32, size, u64, flags)
1977{
1978 struct pt_regs *regs = get_bpf_raw_tp_regs();
1979 int ret;
1980
1981 if (IS_ERR(regs))
1982 return PTR_ERR(regs);
1983
1984 perf_fetch_caller_regs(regs);
1985 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1986 (unsigned long) size, flags, 0);
1987 put_bpf_raw_tp_regs();
1988 return ret;
1989}
1990
1991static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1992 .func = bpf_get_stack_raw_tp,
1993 .gpl_only = true,
1994 .ret_type = RET_INTEGER,
1995 .arg1_type = ARG_PTR_TO_CTX,
1996 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1997 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1998 .arg4_type = ARG_ANYTHING,
1999};
2000
2001static const struct bpf_func_proto *
2002raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2003{
2004 switch (func_id) {
2005 case BPF_FUNC_perf_event_output:
2006 return &bpf_perf_event_output_proto_raw_tp;
2007 case BPF_FUNC_get_stackid:
2008 return &bpf_get_stackid_proto_raw_tp;
2009 case BPF_FUNC_get_stack:
2010 return &bpf_get_stack_proto_raw_tp;
2011 default:
2012 return bpf_tracing_func_proto(func_id, prog);
2013 }
2014}
2015
2016const struct bpf_func_proto *
2017tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2018{
2019 const struct bpf_func_proto *fn;
2020
2021 switch (func_id) {
2022#ifdef CONFIG_NET
2023 case BPF_FUNC_skb_output:
2024 return &bpf_skb_output_proto;
2025 case BPF_FUNC_xdp_output:
2026 return &bpf_xdp_output_proto;
2027 case BPF_FUNC_skc_to_tcp6_sock:
2028 return &bpf_skc_to_tcp6_sock_proto;
2029 case BPF_FUNC_skc_to_tcp_sock:
2030 return &bpf_skc_to_tcp_sock_proto;
2031 case BPF_FUNC_skc_to_tcp_timewait_sock:
2032 return &bpf_skc_to_tcp_timewait_sock_proto;
2033 case BPF_FUNC_skc_to_tcp_request_sock:
2034 return &bpf_skc_to_tcp_request_sock_proto;
2035 case BPF_FUNC_skc_to_udp6_sock:
2036 return &bpf_skc_to_udp6_sock_proto;
2037 case BPF_FUNC_skc_to_unix_sock:
2038 return &bpf_skc_to_unix_sock_proto;
2039 case BPF_FUNC_skc_to_mptcp_sock:
2040 return &bpf_skc_to_mptcp_sock_proto;
2041 case BPF_FUNC_sk_storage_get:
2042 return &bpf_sk_storage_get_tracing_proto;
2043 case BPF_FUNC_sk_storage_delete:
2044 return &bpf_sk_storage_delete_tracing_proto;
2045 case BPF_FUNC_sock_from_file:
2046 return &bpf_sock_from_file_proto;
2047 case BPF_FUNC_get_socket_cookie:
2048 return &bpf_get_socket_ptr_cookie_proto;
2049 case BPF_FUNC_xdp_get_buff_len:
2050 return &bpf_xdp_get_buff_len_trace_proto;
2051#endif
2052 case BPF_FUNC_seq_printf:
2053 return prog->expected_attach_type == BPF_TRACE_ITER ?
2054 &bpf_seq_printf_proto :
2055 NULL;
2056 case BPF_FUNC_seq_write:
2057 return prog->expected_attach_type == BPF_TRACE_ITER ?
2058 &bpf_seq_write_proto :
2059 NULL;
2060 case BPF_FUNC_seq_printf_btf:
2061 return prog->expected_attach_type == BPF_TRACE_ITER ?
2062 &bpf_seq_printf_btf_proto :
2063 NULL;
2064 case BPF_FUNC_d_path:
2065 return &bpf_d_path_proto;
2066 case BPF_FUNC_get_func_arg:
2067 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
2068 case BPF_FUNC_get_func_ret:
2069 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
2070 case BPF_FUNC_get_func_arg_cnt:
2071 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2072 case BPF_FUNC_get_attach_cookie:
2073 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2074 default:
2075 fn = raw_tp_prog_func_proto(func_id, prog);
2076 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2077 fn = bpf_iter_get_func_proto(func_id, prog);
2078 return fn;
2079 }
2080}
2081
2082static bool raw_tp_prog_is_valid_access(int off, int size,
2083 enum bpf_access_type type,
2084 const struct bpf_prog *prog,
2085 struct bpf_insn_access_aux *info)
2086{
2087 return bpf_tracing_ctx_access(off, size, type);
2088}
2089
2090static bool tracing_prog_is_valid_access(int off, int size,
2091 enum bpf_access_type type,
2092 const struct bpf_prog *prog,
2093 struct bpf_insn_access_aux *info)
2094{
2095 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
2096}
2097
2098int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2099 const union bpf_attr *kattr,
2100 union bpf_attr __user *uattr)
2101{
2102 return -ENOTSUPP;
2103}
2104
2105const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2106 .get_func_proto = raw_tp_prog_func_proto,
2107 .is_valid_access = raw_tp_prog_is_valid_access,
2108};
2109
2110const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2111#ifdef CONFIG_NET
2112 .test_run = bpf_prog_test_run_raw_tp,
2113#endif
2114};
2115
2116const struct bpf_verifier_ops tracing_verifier_ops = {
2117 .get_func_proto = tracing_prog_func_proto,
2118 .is_valid_access = tracing_prog_is_valid_access,
2119};
2120
2121const struct bpf_prog_ops tracing_prog_ops = {
2122 .test_run = bpf_prog_test_run_tracing,
2123};
2124
2125static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2126 enum bpf_access_type type,
2127 const struct bpf_prog *prog,
2128 struct bpf_insn_access_aux *info)
2129{
2130 if (off == 0) {
2131 if (size != sizeof(u64) || type != BPF_READ)
2132 return false;
2133 info->reg_type = PTR_TO_TP_BUFFER;
2134 }
2135 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2136}
2137
2138const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2139 .get_func_proto = raw_tp_prog_func_proto,
2140 .is_valid_access = raw_tp_writable_prog_is_valid_access,
2141};
2142
2143const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2144};
2145
2146static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2147 const struct bpf_prog *prog,
2148 struct bpf_insn_access_aux *info)
2149{
2150 const int size_u64 = sizeof(u64);
2151
2152 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2153 return false;
2154 if (type != BPF_READ)
2155 return false;
2156 if (off % size != 0) {
2157 if (sizeof(unsigned long) != 4)
2158 return false;
2159 if (size != 8)
2160 return false;
2161 if (off % size != 4)
2162 return false;
2163 }
2164
2165 switch (off) {
2166 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2167 bpf_ctx_record_field_size(info, size_u64);
2168 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2169 return false;
2170 break;
2171 case bpf_ctx_range(struct bpf_perf_event_data, addr):
2172 bpf_ctx_record_field_size(info, size_u64);
2173 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2174 return false;
2175 break;
2176 default:
2177 if (size != sizeof(long))
2178 return false;
2179 }
2180
2181 return true;
2182}
2183
2184static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2185 const struct bpf_insn *si,
2186 struct bpf_insn *insn_buf,
2187 struct bpf_prog *prog, u32 *target_size)
2188{
2189 struct bpf_insn *insn = insn_buf;
2190
2191 switch (si->off) {
2192 case offsetof(struct bpf_perf_event_data, sample_period):
2193 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2194 data), si->dst_reg, si->src_reg,
2195 offsetof(struct bpf_perf_event_data_kern, data));
2196 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2197 bpf_target_off(struct perf_sample_data, period, 8,
2198 target_size));
2199 break;
2200 case offsetof(struct bpf_perf_event_data, addr):
2201 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2202 data), si->dst_reg, si->src_reg,
2203 offsetof(struct bpf_perf_event_data_kern, data));
2204 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2205 bpf_target_off(struct perf_sample_data, addr, 8,
2206 target_size));
2207 break;
2208 default:
2209 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2210 regs), si->dst_reg, si->src_reg,
2211 offsetof(struct bpf_perf_event_data_kern, regs));
2212 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2213 si->off);
2214 break;
2215 }
2216
2217 return insn - insn_buf;
2218}
2219
2220const struct bpf_verifier_ops perf_event_verifier_ops = {
2221 .get_func_proto = pe_prog_func_proto,
2222 .is_valid_access = pe_prog_is_valid_access,
2223 .convert_ctx_access = pe_prog_convert_ctx_access,
2224};
2225
2226const struct bpf_prog_ops perf_event_prog_ops = {
2227};
2228
2229static DEFINE_MUTEX(bpf_event_mutex);
2230
2231#define BPF_TRACE_MAX_PROGS 64
2232
2233int perf_event_attach_bpf_prog(struct perf_event *event,
2234 struct bpf_prog *prog,
2235 u64 bpf_cookie)
2236{
2237 struct bpf_prog_array *old_array;
2238 struct bpf_prog_array *new_array;
2239 int ret = -EEXIST;
2240
2241 /*
2242 * Kprobe override only works if they are on the function entry,
2243 * and only if they are on the opt-in list.
2244 */
2245 if (prog->kprobe_override &&
2246 (!trace_kprobe_on_func_entry(event->tp_event) ||
2247 !trace_kprobe_error_injectable(event->tp_event)))
2248 return -EINVAL;
2249
2250 mutex_lock(&bpf_event_mutex);
2251
2252 if (event->prog)
2253 goto unlock;
2254
2255 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2256 if (old_array &&
2257 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2258 ret = -E2BIG;
2259 goto unlock;
2260 }
2261
2262 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2263 if (ret < 0)
2264 goto unlock;
2265
2266 /* set the new array to event->tp_event and set event->prog */
2267 event->prog = prog;
2268 event->bpf_cookie = bpf_cookie;
2269 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2270 bpf_prog_array_free_sleepable(old_array);
2271
2272unlock:
2273 mutex_unlock(&bpf_event_mutex);
2274 return ret;
2275}
2276
2277void perf_event_detach_bpf_prog(struct perf_event *event)
2278{
2279 struct bpf_prog_array *old_array;
2280 struct bpf_prog_array *new_array;
2281 int ret;
2282
2283 mutex_lock(&bpf_event_mutex);
2284
2285 if (!event->prog)
2286 goto unlock;
2287
2288 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2289 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2290 if (ret == -ENOENT)
2291 goto unlock;
2292 if (ret < 0) {
2293 bpf_prog_array_delete_safe(old_array, event->prog);
2294 } else {
2295 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2296 bpf_prog_array_free_sleepable(old_array);
2297 }
2298
2299 bpf_prog_put(event->prog);
2300 event->prog = NULL;
2301
2302unlock:
2303 mutex_unlock(&bpf_event_mutex);
2304}
2305
2306int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2307{
2308 struct perf_event_query_bpf __user *uquery = info;
2309 struct perf_event_query_bpf query = {};
2310 struct bpf_prog_array *progs;
2311 u32 *ids, prog_cnt, ids_len;
2312 int ret;
2313
2314 if (!perfmon_capable())
2315 return -EPERM;
2316 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2317 return -EINVAL;
2318 if (copy_from_user(&query, uquery, sizeof(query)))
2319 return -EFAULT;
2320
2321 ids_len = query.ids_len;
2322 if (ids_len > BPF_TRACE_MAX_PROGS)
2323 return -E2BIG;
2324 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2325 if (!ids)
2326 return -ENOMEM;
2327 /*
2328 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2329 * is required when user only wants to check for uquery->prog_cnt.
2330 * There is no need to check for it since the case is handled
2331 * gracefully in bpf_prog_array_copy_info.
2332 */
2333
2334 mutex_lock(&bpf_event_mutex);
2335 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2336 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2337 mutex_unlock(&bpf_event_mutex);
2338
2339 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2340 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2341 ret = -EFAULT;
2342
2343 kfree(ids);
2344 return ret;
2345}
2346
2347extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2348extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2349
2350struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2351{
2352 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2353
2354 for (; btp < __stop__bpf_raw_tp; btp++) {
2355 if (!strcmp(btp->tp->name, name))
2356 return btp;
2357 }
2358
2359 return bpf_get_raw_tracepoint_module(name);
2360}
2361
2362void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2363{
2364 struct module *mod;
2365
2366 preempt_disable();
2367 mod = __module_address((unsigned long)btp);
2368 module_put(mod);
2369 preempt_enable();
2370}
2371
2372static __always_inline
2373void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2374{
2375 cant_sleep();
2376 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2377 bpf_prog_inc_misses_counter(prog);
2378 goto out;
2379 }
2380 rcu_read_lock();
2381 (void) bpf_prog_run(prog, args);
2382 rcu_read_unlock();
2383out:
2384 this_cpu_dec(*(prog->active));
2385}
2386
2387#define UNPACK(...) __VA_ARGS__
2388#define REPEAT_1(FN, DL, X, ...) FN(X)
2389#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2390#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2391#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2392#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2393#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2394#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2395#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2396#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2397#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2398#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2399#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2400#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2401
2402#define SARG(X) u64 arg##X
2403#define COPY(X) args[X] = arg##X
2404
2405#define __DL_COM (,)
2406#define __DL_SEM (;)
2407
2408#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2409
2410#define BPF_TRACE_DEFN_x(x) \
2411 void bpf_trace_run##x(struct bpf_prog *prog, \
2412 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2413 { \
2414 u64 args[x]; \
2415 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2416 __bpf_trace_run(prog, args); \
2417 } \
2418 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2419BPF_TRACE_DEFN_x(1);
2420BPF_TRACE_DEFN_x(2);
2421BPF_TRACE_DEFN_x(3);
2422BPF_TRACE_DEFN_x(4);
2423BPF_TRACE_DEFN_x(5);
2424BPF_TRACE_DEFN_x(6);
2425BPF_TRACE_DEFN_x(7);
2426BPF_TRACE_DEFN_x(8);
2427BPF_TRACE_DEFN_x(9);
2428BPF_TRACE_DEFN_x(10);
2429BPF_TRACE_DEFN_x(11);
2430BPF_TRACE_DEFN_x(12);
2431
2432static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2433{
2434 struct tracepoint *tp = btp->tp;
2435
2436 /*
2437 * check that program doesn't access arguments beyond what's
2438 * available in this tracepoint
2439 */
2440 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2441 return -EINVAL;
2442
2443 if (prog->aux->max_tp_access > btp->writable_size)
2444 return -EINVAL;
2445
2446 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2447 prog);
2448}
2449
2450int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2451{
2452 return __bpf_probe_register(btp, prog);
2453}
2454
2455int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2456{
2457 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2458}
2459
2460int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2461 u32 *fd_type, const char **buf,
2462 u64 *probe_offset, u64 *probe_addr,
2463 unsigned long *missed)
2464{
2465 bool is_tracepoint, is_syscall_tp;
2466 struct bpf_prog *prog;
2467 int flags, err = 0;
2468
2469 prog = event->prog;
2470 if (!prog)
2471 return -ENOENT;
2472
2473 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2474 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2475 return -EOPNOTSUPP;
2476
2477 *prog_id = prog->aux->id;
2478 flags = event->tp_event->flags;
2479 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2480 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2481
2482 if (is_tracepoint || is_syscall_tp) {
2483 *buf = is_tracepoint ? event->tp_event->tp->name
2484 : event->tp_event->name;
2485 /* We allow NULL pointer for tracepoint */
2486 if (fd_type)
2487 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2488 if (probe_offset)
2489 *probe_offset = 0x0;
2490 if (probe_addr)
2491 *probe_addr = 0x0;
2492 } else {
2493 /* kprobe/uprobe */
2494 err = -EOPNOTSUPP;
2495#ifdef CONFIG_KPROBE_EVENTS
2496 if (flags & TRACE_EVENT_FL_KPROBE)
2497 err = bpf_get_kprobe_info(event, fd_type, buf,
2498 probe_offset, probe_addr, missed,
2499 event->attr.type == PERF_TYPE_TRACEPOINT);
2500#endif
2501#ifdef CONFIG_UPROBE_EVENTS
2502 if (flags & TRACE_EVENT_FL_UPROBE)
2503 err = bpf_get_uprobe_info(event, fd_type, buf,
2504 probe_offset, probe_addr,
2505 event->attr.type == PERF_TYPE_TRACEPOINT);
2506#endif
2507 }
2508
2509 return err;
2510}
2511
2512static int __init send_signal_irq_work_init(void)
2513{
2514 int cpu;
2515 struct send_signal_irq_work *work;
2516
2517 for_each_possible_cpu(cpu) {
2518 work = per_cpu_ptr(&send_signal_work, cpu);
2519 init_irq_work(&work->irq_work, do_bpf_send_signal);
2520 }
2521 return 0;
2522}
2523
2524subsys_initcall(send_signal_irq_work_init);
2525
2526#ifdef CONFIG_MODULES
2527static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2528 void *module)
2529{
2530 struct bpf_trace_module *btm, *tmp;
2531 struct module *mod = module;
2532 int ret = 0;
2533
2534 if (mod->num_bpf_raw_events == 0 ||
2535 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2536 goto out;
2537
2538 mutex_lock(&bpf_module_mutex);
2539
2540 switch (op) {
2541 case MODULE_STATE_COMING:
2542 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2543 if (btm) {
2544 btm->module = module;
2545 list_add(&btm->list, &bpf_trace_modules);
2546 } else {
2547 ret = -ENOMEM;
2548 }
2549 break;
2550 case MODULE_STATE_GOING:
2551 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2552 if (btm->module == module) {
2553 list_del(&btm->list);
2554 kfree(btm);
2555 break;
2556 }
2557 }
2558 break;
2559 }
2560
2561 mutex_unlock(&bpf_module_mutex);
2562
2563out:
2564 return notifier_from_errno(ret);
2565}
2566
2567static struct notifier_block bpf_module_nb = {
2568 .notifier_call = bpf_event_notify,
2569};
2570
2571static int __init bpf_event_init(void)
2572{
2573 register_module_notifier(&bpf_module_nb);
2574 return 0;
2575}
2576
2577fs_initcall(bpf_event_init);
2578#endif /* CONFIG_MODULES */
2579
2580#ifdef CONFIG_FPROBE
2581struct bpf_kprobe_multi_link {
2582 struct bpf_link link;
2583 struct fprobe fp;
2584 unsigned long *addrs;
2585 u64 *cookies;
2586 u32 cnt;
2587 u32 mods_cnt;
2588 struct module **mods;
2589 u32 flags;
2590};
2591
2592struct bpf_kprobe_multi_run_ctx {
2593 struct bpf_run_ctx run_ctx;
2594 struct bpf_kprobe_multi_link *link;
2595 unsigned long entry_ip;
2596};
2597
2598struct user_syms {
2599 const char **syms;
2600 char *buf;
2601};
2602
2603static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2604{
2605 unsigned long __user usymbol;
2606 const char **syms = NULL;
2607 char *buf = NULL, *p;
2608 int err = -ENOMEM;
2609 unsigned int i;
2610
2611 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2612 if (!syms)
2613 goto error;
2614
2615 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2616 if (!buf)
2617 goto error;
2618
2619 for (p = buf, i = 0; i < cnt; i++) {
2620 if (__get_user(usymbol, usyms + i)) {
2621 err = -EFAULT;
2622 goto error;
2623 }
2624 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2625 if (err == KSYM_NAME_LEN)
2626 err = -E2BIG;
2627 if (err < 0)
2628 goto error;
2629 syms[i] = p;
2630 p += err + 1;
2631 }
2632
2633 us->syms = syms;
2634 us->buf = buf;
2635 return 0;
2636
2637error:
2638 if (err) {
2639 kvfree(syms);
2640 kvfree(buf);
2641 }
2642 return err;
2643}
2644
2645static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2646{
2647 u32 i;
2648
2649 for (i = 0; i < cnt; i++)
2650 module_put(mods[i]);
2651}
2652
2653static void free_user_syms(struct user_syms *us)
2654{
2655 kvfree(us->syms);
2656 kvfree(us->buf);
2657}
2658
2659static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2660{
2661 struct bpf_kprobe_multi_link *kmulti_link;
2662
2663 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2664 unregister_fprobe(&kmulti_link->fp);
2665 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2666}
2667
2668static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2669{
2670 struct bpf_kprobe_multi_link *kmulti_link;
2671
2672 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2673 kvfree(kmulti_link->addrs);
2674 kvfree(kmulti_link->cookies);
2675 kfree(kmulti_link->mods);
2676 kfree(kmulti_link);
2677}
2678
2679static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2680 struct bpf_link_info *info)
2681{
2682 u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
2683 u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2684 struct bpf_kprobe_multi_link *kmulti_link;
2685 u32 ucount = info->kprobe_multi.count;
2686 int err = 0, i;
2687
2688 if (!uaddrs ^ !ucount)
2689 return -EINVAL;
2690 if (ucookies && !ucount)
2691 return -EINVAL;
2692
2693 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2694 info->kprobe_multi.count = kmulti_link->cnt;
2695 info->kprobe_multi.flags = kmulti_link->flags;
2696 info->kprobe_multi.missed = kmulti_link->fp.nmissed;
2697
2698 if (!uaddrs)
2699 return 0;
2700 if (ucount < kmulti_link->cnt)
2701 err = -ENOSPC;
2702 else
2703 ucount = kmulti_link->cnt;
2704
2705 if (ucookies) {
2706 if (kmulti_link->cookies) {
2707 if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
2708 return -EFAULT;
2709 } else {
2710 for (i = 0; i < ucount; i++) {
2711 if (put_user(0, ucookies + i))
2712 return -EFAULT;
2713 }
2714 }
2715 }
2716
2717 if (kallsyms_show_value(current_cred())) {
2718 if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2719 return -EFAULT;
2720 } else {
2721 for (i = 0; i < ucount; i++) {
2722 if (put_user(0, uaddrs + i))
2723 return -EFAULT;
2724 }
2725 }
2726 return err;
2727}
2728
2729static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2730 .release = bpf_kprobe_multi_link_release,
2731 .dealloc_deferred = bpf_kprobe_multi_link_dealloc,
2732 .fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2733};
2734
2735static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2736{
2737 const struct bpf_kprobe_multi_link *link = priv;
2738 unsigned long *addr_a = a, *addr_b = b;
2739 u64 *cookie_a, *cookie_b;
2740
2741 cookie_a = link->cookies + (addr_a - link->addrs);
2742 cookie_b = link->cookies + (addr_b - link->addrs);
2743
2744 /* swap addr_a/addr_b and cookie_a/cookie_b values */
2745 swap(*addr_a, *addr_b);
2746 swap(*cookie_a, *cookie_b);
2747}
2748
2749static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2750{
2751 const unsigned long *addr_a = a, *addr_b = b;
2752
2753 if (*addr_a == *addr_b)
2754 return 0;
2755 return *addr_a < *addr_b ? -1 : 1;
2756}
2757
2758static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2759{
2760 return bpf_kprobe_multi_addrs_cmp(a, b);
2761}
2762
2763static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2764{
2765 struct bpf_kprobe_multi_run_ctx *run_ctx;
2766 struct bpf_kprobe_multi_link *link;
2767 u64 *cookie, entry_ip;
2768 unsigned long *addr;
2769
2770 if (WARN_ON_ONCE(!ctx))
2771 return 0;
2772 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2773 link = run_ctx->link;
2774 if (!link->cookies)
2775 return 0;
2776 entry_ip = run_ctx->entry_ip;
2777 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2778 bpf_kprobe_multi_addrs_cmp);
2779 if (!addr)
2780 return 0;
2781 cookie = link->cookies + (addr - link->addrs);
2782 return *cookie;
2783}
2784
2785static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2786{
2787 struct bpf_kprobe_multi_run_ctx *run_ctx;
2788
2789 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2790 return run_ctx->entry_ip;
2791}
2792
2793static int
2794kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2795 unsigned long entry_ip, struct pt_regs *regs)
2796{
2797 struct bpf_kprobe_multi_run_ctx run_ctx = {
2798 .link = link,
2799 .entry_ip = entry_ip,
2800 };
2801 struct bpf_run_ctx *old_run_ctx;
2802 int err;
2803
2804 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2805 bpf_prog_inc_misses_counter(link->link.prog);
2806 err = 0;
2807 goto out;
2808 }
2809
2810 migrate_disable();
2811 rcu_read_lock();
2812 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2813 err = bpf_prog_run(link->link.prog, regs);
2814 bpf_reset_run_ctx(old_run_ctx);
2815 rcu_read_unlock();
2816 migrate_enable();
2817
2818 out:
2819 __this_cpu_dec(bpf_prog_active);
2820 return err;
2821}
2822
2823static int
2824kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2825 unsigned long ret_ip, struct pt_regs *regs,
2826 void *data)
2827{
2828 struct bpf_kprobe_multi_link *link;
2829
2830 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2831 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2832 return 0;
2833}
2834
2835static void
2836kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2837 unsigned long ret_ip, struct pt_regs *regs,
2838 void *data)
2839{
2840 struct bpf_kprobe_multi_link *link;
2841
2842 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2843 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2844}
2845
2846static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2847{
2848 const char **str_a = (const char **) a;
2849 const char **str_b = (const char **) b;
2850
2851 return strcmp(*str_a, *str_b);
2852}
2853
2854struct multi_symbols_sort {
2855 const char **funcs;
2856 u64 *cookies;
2857};
2858
2859static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2860{
2861 const struct multi_symbols_sort *data = priv;
2862 const char **name_a = a, **name_b = b;
2863
2864 swap(*name_a, *name_b);
2865
2866 /* If defined, swap also related cookies. */
2867 if (data->cookies) {
2868 u64 *cookie_a, *cookie_b;
2869
2870 cookie_a = data->cookies + (name_a - data->funcs);
2871 cookie_b = data->cookies + (name_b - data->funcs);
2872 swap(*cookie_a, *cookie_b);
2873 }
2874}
2875
2876struct modules_array {
2877 struct module **mods;
2878 int mods_cnt;
2879 int mods_cap;
2880};
2881
2882static int add_module(struct modules_array *arr, struct module *mod)
2883{
2884 struct module **mods;
2885
2886 if (arr->mods_cnt == arr->mods_cap) {
2887 arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2888 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2889 if (!mods)
2890 return -ENOMEM;
2891 arr->mods = mods;
2892 }
2893
2894 arr->mods[arr->mods_cnt] = mod;
2895 arr->mods_cnt++;
2896 return 0;
2897}
2898
2899static bool has_module(struct modules_array *arr, struct module *mod)
2900{
2901 int i;
2902
2903 for (i = arr->mods_cnt - 1; i >= 0; i--) {
2904 if (arr->mods[i] == mod)
2905 return true;
2906 }
2907 return false;
2908}
2909
2910static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2911{
2912 struct modules_array arr = {};
2913 u32 i, err = 0;
2914
2915 for (i = 0; i < addrs_cnt; i++) {
2916 struct module *mod;
2917
2918 preempt_disable();
2919 mod = __module_address(addrs[i]);
2920 /* Either no module or we it's already stored */
2921 if (!mod || has_module(&arr, mod)) {
2922 preempt_enable();
2923 continue;
2924 }
2925 if (!try_module_get(mod))
2926 err = -EINVAL;
2927 preempt_enable();
2928 if (err)
2929 break;
2930 err = add_module(&arr, mod);
2931 if (err) {
2932 module_put(mod);
2933 break;
2934 }
2935 }
2936
2937 /* We return either err < 0 in case of error, ... */
2938 if (err) {
2939 kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2940 kfree(arr.mods);
2941 return err;
2942 }
2943
2944 /* or number of modules found if everything is ok. */
2945 *mods = arr.mods;
2946 return arr.mods_cnt;
2947}
2948
2949static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2950{
2951 u32 i;
2952
2953 for (i = 0; i < cnt; i++) {
2954 if (!within_error_injection_list(addrs[i]))
2955 return -EINVAL;
2956 }
2957 return 0;
2958}
2959
2960int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2961{
2962 struct bpf_kprobe_multi_link *link = NULL;
2963 struct bpf_link_primer link_primer;
2964 void __user *ucookies;
2965 unsigned long *addrs;
2966 u32 flags, cnt, size;
2967 void __user *uaddrs;
2968 u64 *cookies = NULL;
2969 void __user *usyms;
2970 int err;
2971
2972 /* no support for 32bit archs yet */
2973 if (sizeof(u64) != sizeof(void *))
2974 return -EOPNOTSUPP;
2975
2976 if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2977 return -EINVAL;
2978
2979 flags = attr->link_create.kprobe_multi.flags;
2980 if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2981 return -EINVAL;
2982
2983 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2984 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2985 if (!!uaddrs == !!usyms)
2986 return -EINVAL;
2987
2988 cnt = attr->link_create.kprobe_multi.cnt;
2989 if (!cnt)
2990 return -EINVAL;
2991 if (cnt > MAX_KPROBE_MULTI_CNT)
2992 return -E2BIG;
2993
2994 size = cnt * sizeof(*addrs);
2995 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2996 if (!addrs)
2997 return -ENOMEM;
2998
2999 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
3000 if (ucookies) {
3001 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
3002 if (!cookies) {
3003 err = -ENOMEM;
3004 goto error;
3005 }
3006 if (copy_from_user(cookies, ucookies, size)) {
3007 err = -EFAULT;
3008 goto error;
3009 }
3010 }
3011
3012 if (uaddrs) {
3013 if (copy_from_user(addrs, uaddrs, size)) {
3014 err = -EFAULT;
3015 goto error;
3016 }
3017 } else {
3018 struct multi_symbols_sort data = {
3019 .cookies = cookies,
3020 };
3021 struct user_syms us;
3022
3023 err = copy_user_syms(&us, usyms, cnt);
3024 if (err)
3025 goto error;
3026
3027 if (cookies)
3028 data.funcs = us.syms;
3029
3030 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
3031 symbols_swap_r, &data);
3032
3033 err = ftrace_lookup_symbols(us.syms, cnt, addrs);
3034 free_user_syms(&us);
3035 if (err)
3036 goto error;
3037 }
3038
3039 if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
3040 err = -EINVAL;
3041 goto error;
3042 }
3043
3044 link = kzalloc(sizeof(*link), GFP_KERNEL);
3045 if (!link) {
3046 err = -ENOMEM;
3047 goto error;
3048 }
3049
3050 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
3051 &bpf_kprobe_multi_link_lops, prog);
3052
3053 err = bpf_link_prime(&link->link, &link_primer);
3054 if (err)
3055 goto error;
3056
3057 if (flags & BPF_F_KPROBE_MULTI_RETURN)
3058 link->fp.exit_handler = kprobe_multi_link_exit_handler;
3059 else
3060 link->fp.entry_handler = kprobe_multi_link_handler;
3061
3062 link->addrs = addrs;
3063 link->cookies = cookies;
3064 link->cnt = cnt;
3065 link->flags = flags;
3066
3067 if (cookies) {
3068 /*
3069 * Sorting addresses will trigger sorting cookies as well
3070 * (check bpf_kprobe_multi_cookie_swap). This way we can
3071 * find cookie based on the address in bpf_get_attach_cookie
3072 * helper.
3073 */
3074 sort_r(addrs, cnt, sizeof(*addrs),
3075 bpf_kprobe_multi_cookie_cmp,
3076 bpf_kprobe_multi_cookie_swap,
3077 link);
3078 }
3079
3080 err = get_modules_for_addrs(&link->mods, addrs, cnt);
3081 if (err < 0) {
3082 bpf_link_cleanup(&link_primer);
3083 return err;
3084 }
3085 link->mods_cnt = err;
3086
3087 err = register_fprobe_ips(&link->fp, addrs, cnt);
3088 if (err) {
3089 kprobe_multi_put_modules(link->mods, link->mods_cnt);
3090 bpf_link_cleanup(&link_primer);
3091 return err;
3092 }
3093
3094 return bpf_link_settle(&link_primer);
3095
3096error:
3097 kfree(link);
3098 kvfree(addrs);
3099 kvfree(cookies);
3100 return err;
3101}
3102#else /* !CONFIG_FPROBE */
3103int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3104{
3105 return -EOPNOTSUPP;
3106}
3107static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3108{
3109 return 0;
3110}
3111static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3112{
3113 return 0;
3114}
3115#endif
3116
3117#ifdef CONFIG_UPROBES
3118struct bpf_uprobe_multi_link;
3119
3120struct bpf_uprobe {
3121 struct bpf_uprobe_multi_link *link;
3122 loff_t offset;
3123 unsigned long ref_ctr_offset;
3124 u64 cookie;
3125 struct uprobe_consumer consumer;
3126};
3127
3128struct bpf_uprobe_multi_link {
3129 struct path path;
3130 struct bpf_link link;
3131 u32 cnt;
3132 u32 flags;
3133 struct bpf_uprobe *uprobes;
3134 struct task_struct *task;
3135};
3136
3137struct bpf_uprobe_multi_run_ctx {
3138 struct bpf_run_ctx run_ctx;
3139 unsigned long entry_ip;
3140 struct bpf_uprobe *uprobe;
3141};
3142
3143static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
3144 u32 cnt)
3145{
3146 u32 i;
3147
3148 for (i = 0; i < cnt; i++) {
3149 uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
3150 &uprobes[i].consumer);
3151 }
3152}
3153
3154static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3155{
3156 struct bpf_uprobe_multi_link *umulti_link;
3157
3158 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3159 bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
3160 if (umulti_link->task)
3161 put_task_struct(umulti_link->task);
3162 path_put(&umulti_link->path);
3163}
3164
3165static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3166{
3167 struct bpf_uprobe_multi_link *umulti_link;
3168
3169 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3170 kvfree(umulti_link->uprobes);
3171 kfree(umulti_link);
3172}
3173
3174static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
3175 struct bpf_link_info *info)
3176{
3177 u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3178 u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3179 u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3180 u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3181 u32 upath_size = info->uprobe_multi.path_size;
3182 struct bpf_uprobe_multi_link *umulti_link;
3183 u32 ucount = info->uprobe_multi.count;
3184 int err = 0, i;
3185 long left;
3186
3187 if (!upath ^ !upath_size)
3188 return -EINVAL;
3189
3190 if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
3191 return -EINVAL;
3192
3193 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3194 info->uprobe_multi.count = umulti_link->cnt;
3195 info->uprobe_multi.flags = umulti_link->flags;
3196 info->uprobe_multi.pid = umulti_link->task ?
3197 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3198
3199 if (upath) {
3200 char *p, *buf;
3201
3202 upath_size = min_t(u32, upath_size, PATH_MAX);
3203
3204 buf = kmalloc(upath_size, GFP_KERNEL);
3205 if (!buf)
3206 return -ENOMEM;
3207 p = d_path(&umulti_link->path, buf, upath_size);
3208 if (IS_ERR(p)) {
3209 kfree(buf);
3210 return PTR_ERR(p);
3211 }
3212 upath_size = buf + upath_size - p;
3213 left = copy_to_user(upath, p, upath_size);
3214 kfree(buf);
3215 if (left)
3216 return -EFAULT;
3217 info->uprobe_multi.path_size = upath_size;
3218 }
3219
3220 if (!uoffsets && !ucookies && !uref_ctr_offsets)
3221 return 0;
3222
3223 if (ucount < umulti_link->cnt)
3224 err = -ENOSPC;
3225 else
3226 ucount = umulti_link->cnt;
3227
3228 for (i = 0; i < ucount; i++) {
3229 if (uoffsets &&
3230 put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3231 return -EFAULT;
3232 if (uref_ctr_offsets &&
3233 put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3234 return -EFAULT;
3235 if (ucookies &&
3236 put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3237 return -EFAULT;
3238 }
3239
3240 return err;
3241}
3242
3243static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3244 .release = bpf_uprobe_multi_link_release,
3245 .dealloc_deferred = bpf_uprobe_multi_link_dealloc,
3246 .fill_link_info = bpf_uprobe_multi_link_fill_link_info,
3247};
3248
3249static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3250 unsigned long entry_ip,
3251 struct pt_regs *regs)
3252{
3253 struct bpf_uprobe_multi_link *link = uprobe->link;
3254 struct bpf_uprobe_multi_run_ctx run_ctx = {
3255 .entry_ip = entry_ip,
3256 .uprobe = uprobe,
3257 };
3258 struct bpf_prog *prog = link->link.prog;
3259 bool sleepable = prog->sleepable;
3260 struct bpf_run_ctx *old_run_ctx;
3261 int err = 0;
3262
3263 if (link->task && current != link->task)
3264 return 0;
3265
3266 if (sleepable)
3267 rcu_read_lock_trace();
3268 else
3269 rcu_read_lock();
3270
3271 migrate_disable();
3272
3273 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
3274 err = bpf_prog_run(link->link.prog, regs);
3275 bpf_reset_run_ctx(old_run_ctx);
3276
3277 migrate_enable();
3278
3279 if (sleepable)
3280 rcu_read_unlock_trace();
3281 else
3282 rcu_read_unlock();
3283 return err;
3284}
3285
3286static bool
3287uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
3288 struct mm_struct *mm)
3289{
3290 struct bpf_uprobe *uprobe;
3291
3292 uprobe = container_of(con, struct bpf_uprobe, consumer);
3293 return uprobe->link->task->mm == mm;
3294}
3295
3296static int
3297uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
3298{
3299 struct bpf_uprobe *uprobe;
3300
3301 uprobe = container_of(con, struct bpf_uprobe, consumer);
3302 return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
3303}
3304
3305static int
3306uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
3307{
3308 struct bpf_uprobe *uprobe;
3309
3310 uprobe = container_of(con, struct bpf_uprobe, consumer);
3311 return uprobe_prog_run(uprobe, func, regs);
3312}
3313
3314static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3315{
3316 struct bpf_uprobe_multi_run_ctx *run_ctx;
3317
3318 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3319 return run_ctx->entry_ip;
3320}
3321
3322static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3323{
3324 struct bpf_uprobe_multi_run_ctx *run_ctx;
3325
3326 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3327 return run_ctx->uprobe->cookie;
3328}
3329
3330int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3331{
3332 struct bpf_uprobe_multi_link *link = NULL;
3333 unsigned long __user *uref_ctr_offsets;
3334 struct bpf_link_primer link_primer;
3335 struct bpf_uprobe *uprobes = NULL;
3336 struct task_struct *task = NULL;
3337 unsigned long __user *uoffsets;
3338 u64 __user *ucookies;
3339 void __user *upath;
3340 u32 flags, cnt, i;
3341 struct path path;
3342 char *name;
3343 pid_t pid;
3344 int err;
3345
3346 /* no support for 32bit archs yet */
3347 if (sizeof(u64) != sizeof(void *))
3348 return -EOPNOTSUPP;
3349
3350 if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
3351 return -EINVAL;
3352
3353 flags = attr->link_create.uprobe_multi.flags;
3354 if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3355 return -EINVAL;
3356
3357 /*
3358 * path, offsets and cnt are mandatory,
3359 * ref_ctr_offsets and cookies are optional
3360 */
3361 upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3362 uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3363 cnt = attr->link_create.uprobe_multi.cnt;
3364
3365 if (!upath || !uoffsets || !cnt)
3366 return -EINVAL;
3367 if (cnt > MAX_UPROBE_MULTI_CNT)
3368 return -E2BIG;
3369
3370 uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3371 ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3372
3373 name = strndup_user(upath, PATH_MAX);
3374 if (IS_ERR(name)) {
3375 err = PTR_ERR(name);
3376 return err;
3377 }
3378
3379 err = kern_path(name, LOOKUP_FOLLOW, &path);
3380 kfree(name);
3381 if (err)
3382 return err;
3383
3384 if (!d_is_reg(path.dentry)) {
3385 err = -EBADF;
3386 goto error_path_put;
3387 }
3388
3389 pid = attr->link_create.uprobe_multi.pid;
3390 if (pid) {
3391 rcu_read_lock();
3392 task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3393 rcu_read_unlock();
3394 if (!task) {
3395 err = -ESRCH;
3396 goto error_path_put;
3397 }
3398 }
3399
3400 err = -ENOMEM;
3401
3402 link = kzalloc(sizeof(*link), GFP_KERNEL);
3403 uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3404
3405 if (!uprobes || !link)
3406 goto error_free;
3407
3408 for (i = 0; i < cnt; i++) {
3409 if (__get_user(uprobes[i].offset, uoffsets + i)) {
3410 err = -EFAULT;
3411 goto error_free;
3412 }
3413 if (uprobes[i].offset < 0) {
3414 err = -EINVAL;
3415 goto error_free;
3416 }
3417 if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
3418 err = -EFAULT;
3419 goto error_free;
3420 }
3421 if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3422 err = -EFAULT;
3423 goto error_free;
3424 }
3425
3426 uprobes[i].link = link;
3427
3428 if (flags & BPF_F_UPROBE_MULTI_RETURN)
3429 uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3430 else
3431 uprobes[i].consumer.handler = uprobe_multi_link_handler;
3432
3433 if (pid)
3434 uprobes[i].consumer.filter = uprobe_multi_link_filter;
3435 }
3436
3437 link->cnt = cnt;
3438 link->uprobes = uprobes;
3439 link->path = path;
3440 link->task = task;
3441 link->flags = flags;
3442
3443 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3444 &bpf_uprobe_multi_link_lops, prog);
3445
3446 for (i = 0; i < cnt; i++) {
3447 err = uprobe_register_refctr(d_real_inode(link->path.dentry),
3448 uprobes[i].offset,
3449 uprobes[i].ref_ctr_offset,
3450 &uprobes[i].consumer);
3451 if (err) {
3452 bpf_uprobe_unregister(&path, uprobes, i);
3453 goto error_free;
3454 }
3455 }
3456
3457 err = bpf_link_prime(&link->link, &link_primer);
3458 if (err)
3459 goto error_free;
3460
3461 return bpf_link_settle(&link_primer);
3462
3463error_free:
3464 kvfree(uprobes);
3465 kfree(link);
3466 if (task)
3467 put_task_struct(task);
3468error_path_put:
3469 path_put(&path);
3470 return err;
3471}
3472#else /* !CONFIG_UPROBES */
3473int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3474{
3475 return -EOPNOTSUPP;
3476}
3477static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3478{
3479 return 0;
3480}
3481static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3482{
3483 return 0;
3484}
3485#endif /* CONFIG_UPROBES */