Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
   4 */
   5#include <linux/kernel.h>
   6#include <linux/types.h>
   7#include <linux/slab.h>
   8#include <linux/bpf.h>
   9#include <linux/bpf_verifier.h>
  10#include <linux/bpf_perf_event.h>
  11#include <linux/btf.h>
  12#include <linux/filter.h>
  13#include <linux/uaccess.h>
  14#include <linux/ctype.h>
  15#include <linux/kprobes.h>
  16#include <linux/spinlock.h>
  17#include <linux/syscalls.h>
  18#include <linux/error-injection.h>
  19#include <linux/btf_ids.h>
  20#include <linux/bpf_lsm.h>
  21#include <linux/fprobe.h>
  22#include <linux/bsearch.h>
  23#include <linux/sort.h>
  24#include <linux/key.h>
  25#include <linux/verification.h>
  26#include <linux/namei.h>
  27
  28#include <net/bpf_sk_storage.h>
  29
  30#include <uapi/linux/bpf.h>
  31#include <uapi/linux/btf.h>
  32
  33#include <asm/tlb.h>
  34
  35#include "trace_probe.h"
  36#include "trace.h"
  37
  38#define CREATE_TRACE_POINTS
  39#include "bpf_trace.h"
  40
  41#define bpf_event_rcu_dereference(p)					\
  42	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
  43
  44#define MAX_UPROBE_MULTI_CNT (1U << 20)
  45#define MAX_KPROBE_MULTI_CNT (1U << 20)
  46
  47#ifdef CONFIG_MODULES
  48struct bpf_trace_module {
  49	struct module *module;
  50	struct list_head list;
  51};
  52
  53static LIST_HEAD(bpf_trace_modules);
  54static DEFINE_MUTEX(bpf_module_mutex);
  55
  56static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  57{
  58	struct bpf_raw_event_map *btp, *ret = NULL;
  59	struct bpf_trace_module *btm;
  60	unsigned int i;
  61
  62	mutex_lock(&bpf_module_mutex);
  63	list_for_each_entry(btm, &bpf_trace_modules, list) {
  64		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
  65			btp = &btm->module->bpf_raw_events[i];
  66			if (!strcmp(btp->tp->name, name)) {
  67				if (try_module_get(btm->module))
  68					ret = btp;
  69				goto out;
  70			}
  71		}
  72	}
  73out:
  74	mutex_unlock(&bpf_module_mutex);
  75	return ret;
  76}
  77#else
  78static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  79{
  80	return NULL;
  81}
  82#endif /* CONFIG_MODULES */
  83
  84u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  85u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  86
  87static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
  88				  u64 flags, const struct btf **btf,
  89				  s32 *btf_id);
  90static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
  91static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
  92
  93static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
  94static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
  95
  96/**
  97 * trace_call_bpf - invoke BPF program
  98 * @call: tracepoint event
  99 * @ctx: opaque context pointer
 100 *
 101 * kprobe handlers execute BPF programs via this helper.
 102 * Can be used from static tracepoints in the future.
 103 *
 104 * Return: BPF programs always return an integer which is interpreted by
 105 * kprobe handler as:
 106 * 0 - return from kprobe (event is filtered out)
 107 * 1 - store kprobe event into ring buffer
 108 * Other values are reserved and currently alias to 1
 109 */
 110unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 111{
 112	unsigned int ret;
 113
 
 
 
 114	cant_sleep();
 115
 116	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
 117		/*
 118		 * since some bpf program is already running on this cpu,
 119		 * don't call into another bpf program (same or different)
 120		 * and don't send kprobe event into ring-buffer,
 121		 * so return zero here
 122		 */
 123		rcu_read_lock();
 124		bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
 125		rcu_read_unlock();
 126		ret = 0;
 127		goto out;
 128	}
 129
 130	/*
 131	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
 132	 * to all call sites, we did a bpf_prog_array_valid() there to check
 133	 * whether call->prog_array is empty or not, which is
 134	 * a heuristic to speed up execution.
 135	 *
 136	 * If bpf_prog_array_valid() fetched prog_array was
 137	 * non-NULL, we go into trace_call_bpf() and do the actual
 138	 * proper rcu_dereference() under RCU lock.
 139	 * If it turns out that prog_array is NULL then, we bail out.
 140	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
 141	 * was NULL, you'll skip the prog_array with the risk of missing
 142	 * out of events when it was updated in between this and the
 143	 * rcu_dereference() which is accepted risk.
 144	 */
 145	rcu_read_lock();
 146	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
 147				 ctx, bpf_prog_run);
 148	rcu_read_unlock();
 149
 150 out:
 151	__this_cpu_dec(bpf_prog_active);
 152
 153	return ret;
 154}
 155
 156#ifdef CONFIG_BPF_KPROBE_OVERRIDE
 157BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
 158{
 159	regs_set_return_value(regs, rc);
 160	override_function_with_return(regs);
 161	return 0;
 162}
 163
 164static const struct bpf_func_proto bpf_override_return_proto = {
 165	.func		= bpf_override_return,
 166	.gpl_only	= true,
 167	.ret_type	= RET_INTEGER,
 168	.arg1_type	= ARG_PTR_TO_CTX,
 169	.arg2_type	= ARG_ANYTHING,
 170};
 171#endif
 172
 173static __always_inline int
 174bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
 175{
 176	int ret;
 177
 178	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
 179	if (unlikely(ret < 0))
 180		memset(dst, 0, size);
 181	return ret;
 182}
 183
 184BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
 185	   const void __user *, unsafe_ptr)
 186{
 187	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
 188}
 189
 190const struct bpf_func_proto bpf_probe_read_user_proto = {
 191	.func		= bpf_probe_read_user,
 192	.gpl_only	= true,
 193	.ret_type	= RET_INTEGER,
 194	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 195	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 196	.arg3_type	= ARG_ANYTHING,
 197};
 198
 199static __always_inline int
 200bpf_probe_read_user_str_common(void *dst, u32 size,
 201			       const void __user *unsafe_ptr)
 202{
 203	int ret;
 204
 205	/*
 206	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
 207	 * terminator into `dst`.
 208	 *
 209	 * strncpy_from_user() does long-sized strides in the fast path. If the
 210	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
 211	 * then there could be junk after the NUL in `dst`. If user takes `dst`
 212	 * and keys a hash map with it, then semantically identical strings can
 213	 * occupy multiple entries in the map.
 214	 */
 215	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
 216	if (unlikely(ret < 0))
 217		memset(dst, 0, size);
 218	return ret;
 219}
 220
 221BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
 222	   const void __user *, unsafe_ptr)
 223{
 224	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
 225}
 226
 227const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 228	.func		= bpf_probe_read_user_str,
 229	.gpl_only	= true,
 230	.ret_type	= RET_INTEGER,
 231	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 232	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 233	.arg3_type	= ARG_ANYTHING,
 234};
 235
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 236BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
 237	   const void *, unsafe_ptr)
 238{
 239	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 240}
 241
 242const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 243	.func		= bpf_probe_read_kernel,
 244	.gpl_only	= true,
 245	.ret_type	= RET_INTEGER,
 246	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 247	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 248	.arg3_type	= ARG_ANYTHING,
 249};
 250
 251static __always_inline int
 252bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
 253{
 254	int ret;
 
 
 
 255
 256	/*
 257	 * The strncpy_from_kernel_nofault() call will likely not fill the
 258	 * entire buffer, but that's okay in this circumstance as we're probing
 259	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
 260	 * as well probe the stack. Thus, memory is explicitly cleared
 261	 * only in error case, so that improper users ignoring return
 262	 * code altogether don't copy garbage; otherwise length of string
 263	 * is returned that can be used for bpf_perf_event_output() et al.
 264	 */
 265	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
 266	if (unlikely(ret < 0))
 267		memset(dst, 0, size);
 
 
 
 
 268	return ret;
 269}
 270
 271BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
 272	   const void *, unsafe_ptr)
 273{
 274	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 275}
 276
 277const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
 278	.func		= bpf_probe_read_kernel_str,
 279	.gpl_only	= true,
 280	.ret_type	= RET_INTEGER,
 281	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 282	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 283	.arg3_type	= ARG_ANYTHING,
 284};
 285
 286#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 287BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
 288	   const void *, unsafe_ptr)
 289{
 290	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 291		return bpf_probe_read_user_common(dst, size,
 292				(__force void __user *)unsafe_ptr);
 293	}
 294	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 295}
 296
 297static const struct bpf_func_proto bpf_probe_read_compat_proto = {
 298	.func		= bpf_probe_read_compat,
 299	.gpl_only	= true,
 300	.ret_type	= RET_INTEGER,
 301	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 302	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 303	.arg3_type	= ARG_ANYTHING,
 304};
 305
 306BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
 307	   const void *, unsafe_ptr)
 308{
 309	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 310		return bpf_probe_read_user_str_common(dst, size,
 311				(__force void __user *)unsafe_ptr);
 312	}
 313	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 314}
 315
 316static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
 317	.func		= bpf_probe_read_compat_str,
 318	.gpl_only	= true,
 319	.ret_type	= RET_INTEGER,
 320	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 321	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 322	.arg3_type	= ARG_ANYTHING,
 323};
 324#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
 325
 326BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
 327	   u32, size)
 328{
 329	/*
 330	 * Ensure we're in user context which is safe for the helper to
 331	 * run. This helper has no business in a kthread.
 332	 *
 333	 * access_ok() should prevent writing to non-user memory, but in
 334	 * some situations (nommu, temporary switch, etc) access_ok() does
 335	 * not provide enough validation, hence the check on KERNEL_DS.
 336	 *
 337	 * nmi_uaccess_okay() ensures the probe is not run in an interim
 338	 * state, when the task or mm are switched. This is specifically
 339	 * required to prevent the use of temporary mm.
 340	 */
 341
 342	if (unlikely(in_interrupt() ||
 343		     current->flags & (PF_KTHREAD | PF_EXITING)))
 344		return -EPERM;
 
 
 345	if (unlikely(!nmi_uaccess_okay()))
 346		return -EPERM;
 347
 348	return copy_to_user_nofault(unsafe_ptr, src, size);
 349}
 350
 351static const struct bpf_func_proto bpf_probe_write_user_proto = {
 352	.func		= bpf_probe_write_user,
 353	.gpl_only	= true,
 354	.ret_type	= RET_INTEGER,
 355	.arg1_type	= ARG_ANYTHING,
 356	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 357	.arg3_type	= ARG_CONST_SIZE,
 358};
 359
 360static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
 361{
 362	if (!capable(CAP_SYS_ADMIN))
 363		return NULL;
 364
 365	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
 366			    current->comm, task_pid_nr(current));
 367
 368	return &bpf_probe_write_user_proto;
 369}
 370
 371#define MAX_TRACE_PRINTK_VARARGS	3
 372#define BPF_TRACE_PRINTK_SIZE		1024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 373
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 374BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
 375	   u64, arg2, u64, arg3)
 376{
 377	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
 378	struct bpf_bprintf_data data = {
 379		.get_bin_args	= true,
 380		.get_buf	= true,
 381	};
 382	int ret;
 383
 384	ret = bpf_bprintf_prepare(fmt, fmt_size, args,
 385				  MAX_TRACE_PRINTK_VARARGS, &data);
 386	if (ret < 0)
 387		return ret;
 
 
 
 388
 389	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
 
 
 
 390
 391	trace_bpf_trace_printk(data.buf);
 
 392
 393	bpf_bprintf_cleanup(&data);
 
 394
 395	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 396}
 397
 398static const struct bpf_func_proto bpf_trace_printk_proto = {
 399	.func		= bpf_trace_printk,
 400	.gpl_only	= true,
 401	.ret_type	= RET_INTEGER,
 402	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 403	.arg2_type	= ARG_CONST_SIZE,
 404};
 405
 406static void __set_printk_clr_event(void)
 407{
 408	/*
 409	 * This program might be calling bpf_trace_printk,
 410	 * so enable the associated bpf_trace/bpf_trace_printk event.
 411	 * Repeat this each time as it is possible a user has
 412	 * disabled bpf_trace_printk events.  By loading a program
 413	 * calling bpf_trace_printk() however the user has expressed
 414	 * the intent to see such events.
 415	 */
 416	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
 417		pr_warn_ratelimited("could not enable bpf_trace_printk events");
 418}
 419
 420const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 421{
 422	__set_printk_clr_event();
 423	return &bpf_trace_printk_proto;
 424}
 425
 426BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
 427	   u32, data_len)
 
 
 
 
 
 
 
 
 
 
 428{
 429	struct bpf_bprintf_data data = {
 430		.get_bin_args	= true,
 431		.get_buf	= true,
 432	};
 433	int ret, num_args;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 434
 435	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 436	    (data_len && !args))
 437		return -EINVAL;
 438	num_args = data_len / 8;
 439
 440	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
 441	if (ret < 0)
 442		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 443
 444	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
 
 
 
 445
 446	trace_bpf_trace_printk(data.buf);
 
 
 
 447
 448	bpf_bprintf_cleanup(&data);
 
 449
 450	return ret;
 451}
 
 
 
 
 
 
 
 452
 453static const struct bpf_func_proto bpf_trace_vprintk_proto = {
 454	.func		= bpf_trace_vprintk,
 455	.gpl_only	= true,
 456	.ret_type	= RET_INTEGER,
 457	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 458	.arg2_type	= ARG_CONST_SIZE,
 459	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
 460	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
 461};
 462
 463const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
 464{
 465	__set_printk_clr_event();
 466	return &bpf_trace_vprintk_proto;
 467}
 468
 469BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
 470	   const void *, args, u32, data_len)
 471{
 472	struct bpf_bprintf_data data = {
 473		.get_bin_args	= true,
 474	};
 475	int err, num_args;
 476
 477	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 478	    (data_len && !args))
 479		return -EINVAL;
 480	num_args = data_len / 8;
 481
 482	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
 483	if (err < 0)
 484		return err;
 
 
 
 
 
 
 
 485
 486	seq_bprintf(m, fmt, data.bin_args);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487
 488	bpf_bprintf_cleanup(&data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 489
 490	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 491}
 492
 493BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
 
 494
 495static const struct bpf_func_proto bpf_seq_printf_proto = {
 496	.func		= bpf_seq_printf,
 497	.gpl_only	= true,
 498	.ret_type	= RET_INTEGER,
 499	.arg1_type	= ARG_PTR_TO_BTF_ID,
 500	.arg1_btf_id	= &btf_seq_file_ids[0],
 501	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 502	.arg3_type	= ARG_CONST_SIZE,
 503	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
 504	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 
 505};
 506
 507BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
 508{
 509	return seq_write(m, data, len) ? -EOVERFLOW : 0;
 510}
 511
 
 
 
 512static const struct bpf_func_proto bpf_seq_write_proto = {
 513	.func		= bpf_seq_write,
 514	.gpl_only	= true,
 515	.ret_type	= RET_INTEGER,
 516	.arg1_type	= ARG_PTR_TO_BTF_ID,
 517	.arg1_btf_id	= &btf_seq_file_ids[0],
 518	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 519	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 520};
 521
 522BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
 523	   u32, btf_ptr_size, u64, flags)
 524{
 525	const struct btf *btf;
 526	s32 btf_id;
 527	int ret;
 528
 529	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
 530	if (ret)
 531		return ret;
 532
 533	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
 534}
 535
 536static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
 537	.func		= bpf_seq_printf_btf,
 538	.gpl_only	= true,
 539	.ret_type	= RET_INTEGER,
 540	.arg1_type	= ARG_PTR_TO_BTF_ID,
 541	.arg1_btf_id	= &btf_seq_file_ids[0],
 542	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 543	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 544	.arg4_type	= ARG_ANYTHING,
 545};
 546
 547static __always_inline int
 548get_map_perf_counter(struct bpf_map *map, u64 flags,
 549		     u64 *value, u64 *enabled, u64 *running)
 550{
 551	struct bpf_array *array = container_of(map, struct bpf_array, map);
 552	unsigned int cpu = smp_processor_id();
 553	u64 index = flags & BPF_F_INDEX_MASK;
 554	struct bpf_event_entry *ee;
 555
 556	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 557		return -EINVAL;
 558	if (index == BPF_F_CURRENT_CPU)
 559		index = cpu;
 560	if (unlikely(index >= array->map.max_entries))
 561		return -E2BIG;
 562
 563	ee = READ_ONCE(array->ptrs[index]);
 564	if (!ee)
 565		return -ENOENT;
 566
 567	return perf_event_read_local(ee->event, value, enabled, running);
 568}
 569
 570BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
 571{
 572	u64 value = 0;
 573	int err;
 574
 575	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
 576	/*
 577	 * this api is ugly since we miss [-22..-2] range of valid
 578	 * counter values, but that's uapi
 579	 */
 580	if (err)
 581		return err;
 582	return value;
 583}
 584
 585static const struct bpf_func_proto bpf_perf_event_read_proto = {
 586	.func		= bpf_perf_event_read,
 587	.gpl_only	= true,
 588	.ret_type	= RET_INTEGER,
 589	.arg1_type	= ARG_CONST_MAP_PTR,
 590	.arg2_type	= ARG_ANYTHING,
 591};
 592
 593BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
 594	   struct bpf_perf_event_value *, buf, u32, size)
 595{
 596	int err = -EINVAL;
 597
 598	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 599		goto clear;
 600	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
 601				   &buf->running);
 602	if (unlikely(err))
 603		goto clear;
 604	return 0;
 605clear:
 606	memset(buf, 0, size);
 607	return err;
 608}
 609
 610static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
 611	.func		= bpf_perf_event_read_value,
 612	.gpl_only	= true,
 613	.ret_type	= RET_INTEGER,
 614	.arg1_type	= ARG_CONST_MAP_PTR,
 615	.arg2_type	= ARG_ANYTHING,
 616	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
 617	.arg4_type	= ARG_CONST_SIZE,
 618};
 619
 620static __always_inline u64
 621__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 622			u64 flags, struct perf_raw_record *raw,
 623			struct perf_sample_data *sd)
 624{
 625	struct bpf_array *array = container_of(map, struct bpf_array, map);
 626	unsigned int cpu = smp_processor_id();
 627	u64 index = flags & BPF_F_INDEX_MASK;
 628	struct bpf_event_entry *ee;
 629	struct perf_event *event;
 630
 631	if (index == BPF_F_CURRENT_CPU)
 632		index = cpu;
 633	if (unlikely(index >= array->map.max_entries))
 634		return -E2BIG;
 635
 636	ee = READ_ONCE(array->ptrs[index]);
 637	if (!ee)
 638		return -ENOENT;
 639
 640	event = ee->event;
 641	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
 642		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
 643		return -EINVAL;
 644
 645	if (unlikely(event->oncpu != cpu))
 646		return -EOPNOTSUPP;
 647
 648	perf_sample_save_raw_data(sd, event, raw);
 649
 650	return perf_event_output(event, sd, regs);
 651}
 652
 653/*
 654 * Support executing tracepoints in normal, irq, and nmi context that each call
 655 * bpf_perf_event_output
 656 */
 657struct bpf_trace_sample_data {
 658	struct perf_sample_data sds[3];
 659};
 660
 661static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
 662static DEFINE_PER_CPU(int, bpf_trace_nest_level);
 663BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
 664	   u64, flags, void *, data, u64, size)
 665{
 666	struct bpf_trace_sample_data *sds;
 
 667	struct perf_raw_record raw = {
 668		.frag = {
 669			.size = size,
 670			.data = data,
 671		},
 672	};
 673	struct perf_sample_data *sd;
 674	int nest_level, err;
 675
 676	preempt_disable();
 677	sds = this_cpu_ptr(&bpf_trace_sds);
 678	nest_level = this_cpu_inc_return(bpf_trace_nest_level);
 679
 680	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
 681		err = -EBUSY;
 682		goto out;
 683	}
 684
 685	sd = &sds->sds[nest_level - 1];
 686
 687	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
 688		err = -EINVAL;
 689		goto out;
 690	}
 691
 692	perf_sample_data_init(sd, 0, 0);
 
 
 
 693
 694	err = __bpf_perf_event_output(regs, map, flags, &raw, sd);
 695out:
 696	this_cpu_dec(bpf_trace_nest_level);
 697	preempt_enable();
 698	return err;
 699}
 700
 701static const struct bpf_func_proto bpf_perf_event_output_proto = {
 702	.func		= bpf_perf_event_output,
 703	.gpl_only	= true,
 704	.ret_type	= RET_INTEGER,
 705	.arg1_type	= ARG_PTR_TO_CTX,
 706	.arg2_type	= ARG_CONST_MAP_PTR,
 707	.arg3_type	= ARG_ANYTHING,
 708	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 709	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 710};
 711
 712static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
 713struct bpf_nested_pt_regs {
 714	struct pt_regs regs[3];
 715};
 716static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
 717static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
 718
 719u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 720		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 721{
 
 722	struct perf_raw_frag frag = {
 723		.copy		= ctx_copy,
 724		.size		= ctx_size,
 725		.data		= ctx,
 726	};
 727	struct perf_raw_record raw = {
 728		.frag = {
 729			{
 730				.next	= ctx_size ? &frag : NULL,
 731			},
 732			.size	= meta_size,
 733			.data	= meta,
 734		},
 735	};
 736	struct perf_sample_data *sd;
 737	struct pt_regs *regs;
 738	int nest_level;
 739	u64 ret;
 740
 741	preempt_disable();
 742	nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
 743
 744	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
 745		ret = -EBUSY;
 746		goto out;
 747	}
 748	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
 749	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
 750
 751	perf_fetch_caller_regs(regs);
 752	perf_sample_data_init(sd, 0, 0);
 
 753
 754	ret = __bpf_perf_event_output(regs, map, flags, &raw, sd);
 755out:
 756	this_cpu_dec(bpf_event_output_nest_level);
 757	preempt_enable();
 758	return ret;
 759}
 760
 761BPF_CALL_0(bpf_get_current_task)
 762{
 763	return (long) current;
 764}
 765
 766const struct bpf_func_proto bpf_get_current_task_proto = {
 767	.func		= bpf_get_current_task,
 768	.gpl_only	= true,
 769	.ret_type	= RET_INTEGER,
 770};
 771
 772BPF_CALL_0(bpf_get_current_task_btf)
 773{
 774	return (unsigned long) current;
 775}
 776
 777const struct bpf_func_proto bpf_get_current_task_btf_proto = {
 778	.func		= bpf_get_current_task_btf,
 779	.gpl_only	= true,
 780	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
 781	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 782};
 783
 784BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
 785{
 786	return (unsigned long) task_pt_regs(task);
 787}
 788
 789BTF_ID_LIST(bpf_task_pt_regs_ids)
 790BTF_ID(struct, pt_regs)
 791
 792const struct bpf_func_proto bpf_task_pt_regs_proto = {
 793	.func		= bpf_task_pt_regs,
 794	.gpl_only	= true,
 795	.arg1_type	= ARG_PTR_TO_BTF_ID,
 796	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 797	.ret_type	= RET_PTR_TO_BTF_ID,
 798	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
 799};
 800
 801struct send_signal_irq_work {
 802	struct irq_work irq_work;
 803	struct task_struct *task;
 804	u32 sig;
 805	enum pid_type type;
 806	bool has_siginfo;
 807	struct kernel_siginfo info;
 808};
 809
 810static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
 811
 812static void do_bpf_send_signal(struct irq_work *entry)
 813{
 814	struct send_signal_irq_work *work;
 815	struct kernel_siginfo *siginfo;
 816
 817	work = container_of(entry, struct send_signal_irq_work, irq_work);
 818	siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV;
 819
 820	group_send_sig_info(work->sig, siginfo, work->task, work->type);
 821	put_task_struct(work->task);
 822}
 823
 824static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value)
 825{
 826	struct send_signal_irq_work *work = NULL;
 827	struct kernel_siginfo info;
 828	struct kernel_siginfo *siginfo;
 829
 830	if (!task) {
 831		task = current;
 832		siginfo = SEND_SIG_PRIV;
 833	} else {
 834		clear_siginfo(&info);
 835		info.si_signo = sig;
 836		info.si_errno = 0;
 837		info.si_code = SI_KERNEL;
 838		info.si_pid = 0;
 839		info.si_uid = 0;
 840		info.si_value.sival_ptr = (void *)(unsigned long)value;
 841		siginfo = &info;
 842	}
 843
 844	/* Similar to bpf_probe_write_user, task needs to be
 845	 * in a sound condition and kernel memory access be
 846	 * permitted in order to send signal to the current
 847	 * task.
 848	 */
 849	if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING)))
 850		return -EPERM;
 851	if (unlikely(!nmi_uaccess_okay()))
 852		return -EPERM;
 853	/* Task should not be pid=1 to avoid kernel panic. */
 854	if (unlikely(is_global_init(task)))
 855		return -EPERM;
 856
 857	if (!preemptible()) {
 858		/* Do an early check on signal validity. Otherwise,
 859		 * the error is lost in deferred irq_work.
 860		 */
 861		if (unlikely(!valid_signal(sig)))
 862			return -EINVAL;
 863
 864		work = this_cpu_ptr(&send_signal_work);
 865		if (irq_work_is_busy(&work->irq_work))
 866			return -EBUSY;
 867
 868		/* Add the current task, which is the target of sending signal,
 869		 * to the irq_work. The current task may change when queued
 870		 * irq works get executed.
 871		 */
 872		work->task = get_task_struct(task);
 873		work->has_siginfo = siginfo == &info;
 874		if (work->has_siginfo)
 875			copy_siginfo(&work->info, &info);
 876		work->sig = sig;
 877		work->type = type;
 878		irq_work_queue(&work->irq_work);
 879		return 0;
 880	}
 881
 882	return group_send_sig_info(sig, siginfo, task, type);
 883}
 884
 885BPF_CALL_1(bpf_send_signal, u32, sig)
 886{
 887	return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0);
 888}
 889
 890static const struct bpf_func_proto bpf_send_signal_proto = {
 891	.func		= bpf_send_signal,
 892	.gpl_only	= false,
 893	.ret_type	= RET_INTEGER,
 894	.arg1_type	= ARG_ANYTHING,
 895};
 896
 897BPF_CALL_1(bpf_send_signal_thread, u32, sig)
 898{
 899	return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0);
 900}
 901
 902static const struct bpf_func_proto bpf_send_signal_thread_proto = {
 903	.func		= bpf_send_signal_thread,
 904	.gpl_only	= false,
 905	.ret_type	= RET_INTEGER,
 906	.arg1_type	= ARG_ANYTHING,
 907};
 908
 909BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
 910{
 911	struct path copy;
 912	long len;
 913	char *p;
 914
 915	if (!sz)
 916		return 0;
 917
 918	/*
 919	 * The path pointer is verified as trusted and safe to use,
 920	 * but let's double check it's valid anyway to workaround
 921	 * potentially broken verifier.
 922	 */
 923	len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
 924	if (len < 0)
 925		return len;
 926
 927	p = d_path(&copy, buf, sz);
 928	if (IS_ERR(p)) {
 929		len = PTR_ERR(p);
 930	} else {
 931		len = buf + sz - p;
 932		memmove(buf, p, len);
 933	}
 934
 935	return len;
 936}
 937
 938BTF_SET_START(btf_allowlist_d_path)
 939#ifdef CONFIG_SECURITY
 940BTF_ID(func, security_file_permission)
 941BTF_ID(func, security_inode_getattr)
 942BTF_ID(func, security_file_open)
 943#endif
 944#ifdef CONFIG_SECURITY_PATH
 945BTF_ID(func, security_path_truncate)
 946#endif
 947BTF_ID(func, vfs_truncate)
 948BTF_ID(func, vfs_fallocate)
 949BTF_ID(func, dentry_open)
 950BTF_ID(func, vfs_getattr)
 951BTF_ID(func, filp_close)
 952BTF_SET_END(btf_allowlist_d_path)
 953
 954static bool bpf_d_path_allowed(const struct bpf_prog *prog)
 955{
 956	if (prog->type == BPF_PROG_TYPE_TRACING &&
 957	    prog->expected_attach_type == BPF_TRACE_ITER)
 958		return true;
 959
 960	if (prog->type == BPF_PROG_TYPE_LSM)
 961		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
 962
 963	return btf_id_set_contains(&btf_allowlist_d_path,
 964				   prog->aux->attach_btf_id);
 965}
 966
 967BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
 968
 969static const struct bpf_func_proto bpf_d_path_proto = {
 970	.func		= bpf_d_path,
 971	.gpl_only	= false,
 972	.ret_type	= RET_INTEGER,
 973	.arg1_type	= ARG_PTR_TO_BTF_ID,
 974	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
 975	.arg2_type	= ARG_PTR_TO_MEM,
 976	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 977	.allowed	= bpf_d_path_allowed,
 978};
 979
 980#define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
 981			 BTF_F_PTR_RAW | BTF_F_ZERO)
 982
 983static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
 984				  u64 flags, const struct btf **btf,
 985				  s32 *btf_id)
 986{
 987	const struct btf_type *t;
 988
 989	if (unlikely(flags & ~(BTF_F_ALL)))
 990		return -EINVAL;
 991
 992	if (btf_ptr_size != sizeof(struct btf_ptr))
 993		return -EINVAL;
 994
 995	*btf = bpf_get_btf_vmlinux();
 996
 997	if (IS_ERR_OR_NULL(*btf))
 998		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
 999
1000	if (ptr->type_id > 0)
1001		*btf_id = ptr->type_id;
1002	else
1003		return -EINVAL;
1004
1005	if (*btf_id > 0)
1006		t = btf_type_by_id(*btf, *btf_id);
1007	if (*btf_id <= 0 || !t)
1008		return -ENOENT;
1009
1010	return 0;
1011}
1012
1013BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1014	   u32, btf_ptr_size, u64, flags)
1015{
1016	const struct btf *btf;
1017	s32 btf_id;
1018	int ret;
1019
1020	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1021	if (ret)
1022		return ret;
1023
1024	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1025				      flags);
1026}
1027
1028const struct bpf_func_proto bpf_snprintf_btf_proto = {
1029	.func		= bpf_snprintf_btf,
1030	.gpl_only	= false,
1031	.ret_type	= RET_INTEGER,
1032	.arg1_type	= ARG_PTR_TO_MEM,
1033	.arg2_type	= ARG_CONST_SIZE,
1034	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1035	.arg4_type	= ARG_CONST_SIZE,
1036	.arg5_type	= ARG_ANYTHING,
1037};
1038
1039BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1040{
1041	/* This helper call is inlined by verifier. */
1042	return ((u64 *)ctx)[-2];
1043}
1044
1045static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1046	.func		= bpf_get_func_ip_tracing,
1047	.gpl_only	= true,
1048	.ret_type	= RET_INTEGER,
1049	.arg1_type	= ARG_PTR_TO_CTX,
1050};
1051
1052#ifdef CONFIG_X86_KERNEL_IBT
1053static unsigned long get_entry_ip(unsigned long fentry_ip)
1054{
1055	u32 instr;
1056
1057	/* We want to be extra safe in case entry ip is on the page edge,
1058	 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
1059	 */
1060	if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
1061		if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
1062			return fentry_ip;
1063	} else {
1064		instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
1065	}
1066	if (is_endbr(instr))
1067		fentry_ip -= ENDBR_INSN_SIZE;
1068	return fentry_ip;
1069}
1070#else
1071#define get_entry_ip(fentry_ip) fentry_ip
1072#endif
1073
1074BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1075{
1076	struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1077	struct kprobe *kp;
1078
1079#ifdef CONFIG_UPROBES
1080	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1081	if (run_ctx->is_uprobe)
1082		return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1083#endif
1084
1085	kp = kprobe_running();
1086
1087	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1088		return 0;
1089
1090	return get_entry_ip((uintptr_t)kp->addr);
1091}
1092
1093static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1094	.func		= bpf_get_func_ip_kprobe,
1095	.gpl_only	= true,
1096	.ret_type	= RET_INTEGER,
1097	.arg1_type	= ARG_PTR_TO_CTX,
1098};
1099
1100BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1101{
1102	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1103}
1104
1105static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1106	.func		= bpf_get_func_ip_kprobe_multi,
1107	.gpl_only	= false,
1108	.ret_type	= RET_INTEGER,
1109	.arg1_type	= ARG_PTR_TO_CTX,
1110};
1111
1112BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1113{
1114	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1115}
1116
1117static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1118	.func		= bpf_get_attach_cookie_kprobe_multi,
1119	.gpl_only	= false,
1120	.ret_type	= RET_INTEGER,
1121	.arg1_type	= ARG_PTR_TO_CTX,
1122};
1123
1124BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1125{
1126	return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1127}
1128
1129static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1130	.func		= bpf_get_func_ip_uprobe_multi,
1131	.gpl_only	= false,
1132	.ret_type	= RET_INTEGER,
1133	.arg1_type	= ARG_PTR_TO_CTX,
1134};
1135
1136BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1137{
1138	return bpf_uprobe_multi_cookie(current->bpf_ctx);
1139}
1140
1141static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1142	.func		= bpf_get_attach_cookie_uprobe_multi,
1143	.gpl_only	= false,
1144	.ret_type	= RET_INTEGER,
1145	.arg1_type	= ARG_PTR_TO_CTX,
1146};
1147
1148BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1149{
1150	struct bpf_trace_run_ctx *run_ctx;
1151
1152	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1153	return run_ctx->bpf_cookie;
1154}
1155
1156static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1157	.func		= bpf_get_attach_cookie_trace,
1158	.gpl_only	= false,
1159	.ret_type	= RET_INTEGER,
1160	.arg1_type	= ARG_PTR_TO_CTX,
1161};
1162
1163BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1164{
1165	return ctx->event->bpf_cookie;
1166}
1167
1168static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1169	.func		= bpf_get_attach_cookie_pe,
1170	.gpl_only	= false,
1171	.ret_type	= RET_INTEGER,
1172	.arg1_type	= ARG_PTR_TO_CTX,
1173};
1174
1175BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1176{
1177	struct bpf_trace_run_ctx *run_ctx;
1178
1179	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1180	return run_ctx->bpf_cookie;
1181}
1182
1183static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1184	.func		= bpf_get_attach_cookie_tracing,
1185	.gpl_only	= false,
1186	.ret_type	= RET_INTEGER,
1187	.arg1_type	= ARG_PTR_TO_CTX,
1188};
1189
1190BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1191{
1192	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1193	u32 entry_cnt = size / br_entry_size;
1194
1195	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1196
1197	if (unlikely(flags))
1198		return -EINVAL;
1199
1200	if (!entry_cnt)
1201		return -ENOENT;
1202
1203	return entry_cnt * br_entry_size;
1204}
1205
1206static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1207	.func		= bpf_get_branch_snapshot,
1208	.gpl_only	= true,
1209	.ret_type	= RET_INTEGER,
1210	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1211	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1212};
1213
1214BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1215{
1216	/* This helper call is inlined by verifier. */
1217	u64 nr_args = ((u64 *)ctx)[-1];
1218
1219	if ((u64) n >= nr_args)
1220		return -EINVAL;
1221	*value = ((u64 *)ctx)[n];
1222	return 0;
1223}
1224
1225static const struct bpf_func_proto bpf_get_func_arg_proto = {
1226	.func		= get_func_arg,
1227	.ret_type	= RET_INTEGER,
1228	.arg1_type	= ARG_PTR_TO_CTX,
1229	.arg2_type	= ARG_ANYTHING,
1230	.arg3_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
1231	.arg3_size	= sizeof(u64),
1232};
1233
1234BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1235{
1236	/* This helper call is inlined by verifier. */
1237	u64 nr_args = ((u64 *)ctx)[-1];
1238
1239	*value = ((u64 *)ctx)[nr_args];
1240	return 0;
1241}
1242
1243static const struct bpf_func_proto bpf_get_func_ret_proto = {
1244	.func		= get_func_ret,
1245	.ret_type	= RET_INTEGER,
1246	.arg1_type	= ARG_PTR_TO_CTX,
1247	.arg2_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
1248	.arg2_size	= sizeof(u64),
1249};
1250
1251BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1252{
1253	/* This helper call is inlined by verifier. */
1254	return ((u64 *)ctx)[-1];
1255}
1256
1257static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1258	.func		= get_func_arg_cnt,
1259	.ret_type	= RET_INTEGER,
1260	.arg1_type	= ARG_PTR_TO_CTX,
1261};
1262
1263#ifdef CONFIG_KEYS
1264__bpf_kfunc_start_defs();
1265
1266/**
1267 * bpf_lookup_user_key - lookup a key by its serial
1268 * @serial: key handle serial number
1269 * @flags: lookup-specific flags
1270 *
1271 * Search a key with a given *serial* and the provided *flags*.
1272 * If found, increment the reference count of the key by one, and
1273 * return it in the bpf_key structure.
1274 *
1275 * The bpf_key structure must be passed to bpf_key_put() when done
1276 * with it, so that the key reference count is decremented and the
1277 * bpf_key structure is freed.
1278 *
1279 * Permission checks are deferred to the time the key is used by
1280 * one of the available key-specific kfuncs.
1281 *
1282 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1283 * special keyring (e.g. session keyring), if it doesn't yet exist.
1284 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1285 * for the key construction, and to retrieve uninstantiated keys (keys
1286 * without data attached to them).
1287 *
1288 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1289 *         NULL pointer otherwise.
1290 */
1291__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1292{
1293	key_ref_t key_ref;
1294	struct bpf_key *bkey;
1295
1296	if (flags & ~KEY_LOOKUP_ALL)
1297		return NULL;
1298
1299	/*
1300	 * Permission check is deferred until the key is used, as the
1301	 * intent of the caller is unknown here.
1302	 */
1303	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1304	if (IS_ERR(key_ref))
1305		return NULL;
1306
1307	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1308	if (!bkey) {
1309		key_put(key_ref_to_ptr(key_ref));
1310		return NULL;
1311	}
1312
1313	bkey->key = key_ref_to_ptr(key_ref);
1314	bkey->has_ref = true;
1315
1316	return bkey;
1317}
1318
1319/**
1320 * bpf_lookup_system_key - lookup a key by a system-defined ID
1321 * @id: key ID
1322 *
1323 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1324 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1325 * attempting to decrement the key reference count on that pointer. The key
1326 * pointer set in such way is currently understood only by
1327 * verify_pkcs7_signature().
1328 *
1329 * Set *id* to one of the values defined in include/linux/verification.h:
1330 * 0 for the primary keyring (immutable keyring of system keys);
1331 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1332 * (where keys can be added only if they are vouched for by existing keys
1333 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1334 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1335 * kerned image and, possibly, the initramfs signature).
1336 *
1337 * Return: a bpf_key pointer with an invalid key pointer set from the
1338 *         pre-determined ID on success, a NULL pointer otherwise
1339 */
1340__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1341{
1342	struct bpf_key *bkey;
1343
1344	if (system_keyring_id_check(id) < 0)
1345		return NULL;
1346
1347	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1348	if (!bkey)
1349		return NULL;
1350
1351	bkey->key = (struct key *)(unsigned long)id;
1352	bkey->has_ref = false;
1353
1354	return bkey;
1355}
1356
1357/**
1358 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1359 * @bkey: bpf_key structure
1360 *
1361 * Decrement the reference count of the key inside *bkey*, if the pointer
1362 * is valid, and free *bkey*.
1363 */
1364__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1365{
1366	if (bkey->has_ref)
1367		key_put(bkey->key);
1368
1369	kfree(bkey);
1370}
1371
1372#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1373/**
1374 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1375 * @data_p: data to verify
1376 * @sig_p: signature of the data
1377 * @trusted_keyring: keyring with keys trusted for signature verification
1378 *
1379 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1380 * with keys in a keyring referenced by *trusted_keyring*.
1381 *
1382 * Return: 0 on success, a negative value on error.
1383 */
1384__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
1385			       struct bpf_dynptr *sig_p,
1386			       struct bpf_key *trusted_keyring)
1387{
1388	struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
1389	struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
1390	const void *data, *sig;
1391	u32 data_len, sig_len;
1392	int ret;
1393
1394	if (trusted_keyring->has_ref) {
1395		/*
1396		 * Do the permission check deferred in bpf_lookup_user_key().
1397		 * See bpf_lookup_user_key() for more details.
1398		 *
1399		 * A call to key_task_permission() here would be redundant, as
1400		 * it is already done by keyring_search() called by
1401		 * find_asymmetric_key().
1402		 */
1403		ret = key_validate(trusted_keyring->key);
1404		if (ret < 0)
1405			return ret;
1406	}
1407
1408	data_len = __bpf_dynptr_size(data_ptr);
1409	data = __bpf_dynptr_data(data_ptr, data_len);
1410	sig_len = __bpf_dynptr_size(sig_ptr);
1411	sig = __bpf_dynptr_data(sig_ptr, sig_len);
1412
1413	return verify_pkcs7_signature(data, data_len, sig, sig_len,
1414				      trusted_keyring->key,
1415				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1416				      NULL);
1417}
1418#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1419
1420__bpf_kfunc_end_defs();
1421
1422BTF_KFUNCS_START(key_sig_kfunc_set)
1423BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1424BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1425BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1426#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1427BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1428#endif
1429BTF_KFUNCS_END(key_sig_kfunc_set)
1430
1431static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1432	.owner = THIS_MODULE,
1433	.set = &key_sig_kfunc_set,
1434};
1435
1436static int __init bpf_key_sig_kfuncs_init(void)
1437{
1438	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1439					 &bpf_key_sig_kfunc_set);
1440}
1441
1442late_initcall(bpf_key_sig_kfuncs_init);
1443#endif /* CONFIG_KEYS */
1444
1445static const struct bpf_func_proto *
1446bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1447{
1448	switch (func_id) {
1449	case BPF_FUNC_map_lookup_elem:
1450		return &bpf_map_lookup_elem_proto;
1451	case BPF_FUNC_map_update_elem:
1452		return &bpf_map_update_elem_proto;
1453	case BPF_FUNC_map_delete_elem:
1454		return &bpf_map_delete_elem_proto;
1455	case BPF_FUNC_map_push_elem:
1456		return &bpf_map_push_elem_proto;
1457	case BPF_FUNC_map_pop_elem:
1458		return &bpf_map_pop_elem_proto;
1459	case BPF_FUNC_map_peek_elem:
1460		return &bpf_map_peek_elem_proto;
1461	case BPF_FUNC_map_lookup_percpu_elem:
1462		return &bpf_map_lookup_percpu_elem_proto;
1463	case BPF_FUNC_ktime_get_ns:
1464		return &bpf_ktime_get_ns_proto;
1465	case BPF_FUNC_ktime_get_boot_ns:
1466		return &bpf_ktime_get_boot_ns_proto;
1467	case BPF_FUNC_tail_call:
1468		return &bpf_tail_call_proto;
 
 
1469	case BPF_FUNC_get_current_task:
1470		return &bpf_get_current_task_proto;
1471	case BPF_FUNC_get_current_task_btf:
1472		return &bpf_get_current_task_btf_proto;
1473	case BPF_FUNC_task_pt_regs:
1474		return &bpf_task_pt_regs_proto;
1475	case BPF_FUNC_get_current_uid_gid:
1476		return &bpf_get_current_uid_gid_proto;
1477	case BPF_FUNC_get_current_comm:
1478		return &bpf_get_current_comm_proto;
1479	case BPF_FUNC_trace_printk:
1480		return bpf_get_trace_printk_proto();
1481	case BPF_FUNC_get_smp_processor_id:
1482		return &bpf_get_smp_processor_id_proto;
1483	case BPF_FUNC_get_numa_node_id:
1484		return &bpf_get_numa_node_id_proto;
1485	case BPF_FUNC_perf_event_read:
1486		return &bpf_perf_event_read_proto;
 
 
 
 
1487	case BPF_FUNC_get_prandom_u32:
1488		return &bpf_get_prandom_u32_proto;
1489	case BPF_FUNC_probe_write_user:
1490		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1491		       NULL : bpf_get_probe_write_proto();
1492	case BPF_FUNC_probe_read_user:
1493		return &bpf_probe_read_user_proto;
1494	case BPF_FUNC_probe_read_kernel:
1495		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1496		       NULL : &bpf_probe_read_kernel_proto;
1497	case BPF_FUNC_probe_read_user_str:
1498		return &bpf_probe_read_user_str_proto;
1499	case BPF_FUNC_probe_read_kernel_str:
1500		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1501		       NULL : &bpf_probe_read_kernel_str_proto;
1502#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1503	case BPF_FUNC_probe_read:
1504		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1505		       NULL : &bpf_probe_read_compat_proto;
1506	case BPF_FUNC_probe_read_str:
1507		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1508		       NULL : &bpf_probe_read_compat_str_proto;
1509#endif
1510#ifdef CONFIG_CGROUPS
1511	case BPF_FUNC_cgrp_storage_get:
1512		return &bpf_cgrp_storage_get_proto;
1513	case BPF_FUNC_cgrp_storage_delete:
1514		return &bpf_cgrp_storage_delete_proto;
1515	case BPF_FUNC_current_task_under_cgroup:
1516		return &bpf_current_task_under_cgroup_proto;
1517#endif
1518	case BPF_FUNC_send_signal:
1519		return &bpf_send_signal_proto;
1520	case BPF_FUNC_send_signal_thread:
1521		return &bpf_send_signal_thread_proto;
1522	case BPF_FUNC_perf_event_read_value:
1523		return &bpf_perf_event_read_value_proto;
 
 
1524	case BPF_FUNC_ringbuf_output:
1525		return &bpf_ringbuf_output_proto;
1526	case BPF_FUNC_ringbuf_reserve:
1527		return &bpf_ringbuf_reserve_proto;
1528	case BPF_FUNC_ringbuf_submit:
1529		return &bpf_ringbuf_submit_proto;
1530	case BPF_FUNC_ringbuf_discard:
1531		return &bpf_ringbuf_discard_proto;
1532	case BPF_FUNC_ringbuf_query:
1533		return &bpf_ringbuf_query_proto;
1534	case BPF_FUNC_jiffies64:
1535		return &bpf_jiffies64_proto;
1536	case BPF_FUNC_get_task_stack:
1537		return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
1538				       : &bpf_get_task_stack_proto;
1539	case BPF_FUNC_copy_from_user:
1540		return &bpf_copy_from_user_proto;
1541	case BPF_FUNC_copy_from_user_task:
1542		return &bpf_copy_from_user_task_proto;
1543	case BPF_FUNC_snprintf_btf:
1544		return &bpf_snprintf_btf_proto;
1545	case BPF_FUNC_per_cpu_ptr:
1546		return &bpf_per_cpu_ptr_proto;
1547	case BPF_FUNC_this_cpu_ptr:
1548		return &bpf_this_cpu_ptr_proto;
1549	case BPF_FUNC_task_storage_get:
1550		if (bpf_prog_check_recur(prog))
1551			return &bpf_task_storage_get_recur_proto;
1552		return &bpf_task_storage_get_proto;
1553	case BPF_FUNC_task_storage_delete:
1554		if (bpf_prog_check_recur(prog))
1555			return &bpf_task_storage_delete_recur_proto;
1556		return &bpf_task_storage_delete_proto;
1557	case BPF_FUNC_for_each_map_elem:
1558		return &bpf_for_each_map_elem_proto;
1559	case BPF_FUNC_snprintf:
1560		return &bpf_snprintf_proto;
1561	case BPF_FUNC_get_func_ip:
1562		return &bpf_get_func_ip_proto_tracing;
1563	case BPF_FUNC_get_branch_snapshot:
1564		return &bpf_get_branch_snapshot_proto;
1565	case BPF_FUNC_find_vma:
1566		return &bpf_find_vma_proto;
1567	case BPF_FUNC_trace_vprintk:
1568		return bpf_get_trace_vprintk_proto();
1569	default:
1570		return bpf_base_func_proto(func_id, prog);
1571	}
1572}
1573
1574static bool is_kprobe_multi(const struct bpf_prog *prog)
1575{
1576	return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ||
1577	       prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1578}
1579
1580static inline bool is_kprobe_session(const struct bpf_prog *prog)
1581{
1582	return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1583}
1584
1585static inline bool is_uprobe_multi(const struct bpf_prog *prog)
1586{
1587	return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI ||
1588	       prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1589}
1590
1591static inline bool is_uprobe_session(const struct bpf_prog *prog)
1592{
1593	return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1594}
1595
1596static const struct bpf_func_proto *
1597kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1598{
1599	switch (func_id) {
1600	case BPF_FUNC_perf_event_output:
1601		return &bpf_perf_event_output_proto;
1602	case BPF_FUNC_get_stackid:
1603		return &bpf_get_stackid_proto;
1604	case BPF_FUNC_get_stack:
1605		return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto;
1606#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1607	case BPF_FUNC_override_return:
1608		return &bpf_override_return_proto;
1609#endif
1610	case BPF_FUNC_get_func_ip:
1611		if (is_kprobe_multi(prog))
1612			return &bpf_get_func_ip_proto_kprobe_multi;
1613		if (is_uprobe_multi(prog))
1614			return &bpf_get_func_ip_proto_uprobe_multi;
1615		return &bpf_get_func_ip_proto_kprobe;
1616	case BPF_FUNC_get_attach_cookie:
1617		if (is_kprobe_multi(prog))
1618			return &bpf_get_attach_cookie_proto_kmulti;
1619		if (is_uprobe_multi(prog))
1620			return &bpf_get_attach_cookie_proto_umulti;
1621		return &bpf_get_attach_cookie_proto_trace;
1622	default:
1623		return bpf_tracing_func_proto(func_id, prog);
1624	}
1625}
1626
1627/* bpf+kprobe programs can access fields of 'struct pt_regs' */
1628static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1629					const struct bpf_prog *prog,
1630					struct bpf_insn_access_aux *info)
1631{
1632	if (off < 0 || off >= sizeof(struct pt_regs))
1633		return false;
1634	if (type != BPF_READ)
1635		return false;
1636	if (off % size != 0)
1637		return false;
1638	/*
1639	 * Assertion for 32 bit to make sure last 8 byte access
1640	 * (BPF_DW) to the last 4 byte member is disallowed.
1641	 */
1642	if (off + size > sizeof(struct pt_regs))
1643		return false;
1644
1645	return true;
1646}
1647
1648const struct bpf_verifier_ops kprobe_verifier_ops = {
1649	.get_func_proto  = kprobe_prog_func_proto,
1650	.is_valid_access = kprobe_prog_is_valid_access,
1651};
1652
1653const struct bpf_prog_ops kprobe_prog_ops = {
1654};
1655
1656BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1657	   u64, flags, void *, data, u64, size)
1658{
1659	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1660
1661	/*
1662	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1663	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1664	 * from there and call the same bpf_perf_event_output() helper inline.
1665	 */
1666	return ____bpf_perf_event_output(regs, map, flags, data, size);
1667}
1668
1669static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1670	.func		= bpf_perf_event_output_tp,
1671	.gpl_only	= true,
1672	.ret_type	= RET_INTEGER,
1673	.arg1_type	= ARG_PTR_TO_CTX,
1674	.arg2_type	= ARG_CONST_MAP_PTR,
1675	.arg3_type	= ARG_ANYTHING,
1676	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1677	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1678};
1679
1680BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1681	   u64, flags)
1682{
1683	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1684
1685	/*
1686	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1687	 * the other helper's function body cannot be inlined due to being
1688	 * external, thus we need to call raw helper function.
1689	 */
1690	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1691			       flags, 0, 0);
1692}
1693
1694static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1695	.func		= bpf_get_stackid_tp,
1696	.gpl_only	= true,
1697	.ret_type	= RET_INTEGER,
1698	.arg1_type	= ARG_PTR_TO_CTX,
1699	.arg2_type	= ARG_CONST_MAP_PTR,
1700	.arg3_type	= ARG_ANYTHING,
1701};
1702
1703BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1704	   u64, flags)
1705{
1706	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1707
1708	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1709			     (unsigned long) size, flags, 0);
1710}
1711
1712static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1713	.func		= bpf_get_stack_tp,
1714	.gpl_only	= true,
1715	.ret_type	= RET_INTEGER,
1716	.arg1_type	= ARG_PTR_TO_CTX,
1717	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1718	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1719	.arg4_type	= ARG_ANYTHING,
1720};
1721
1722static const struct bpf_func_proto *
1723tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1724{
1725	switch (func_id) {
1726	case BPF_FUNC_perf_event_output:
1727		return &bpf_perf_event_output_proto_tp;
1728	case BPF_FUNC_get_stackid:
1729		return &bpf_get_stackid_proto_tp;
1730	case BPF_FUNC_get_stack:
1731		return &bpf_get_stack_proto_tp;
1732	case BPF_FUNC_get_attach_cookie:
1733		return &bpf_get_attach_cookie_proto_trace;
1734	default:
1735		return bpf_tracing_func_proto(func_id, prog);
1736	}
1737}
1738
1739static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1740				    const struct bpf_prog *prog,
1741				    struct bpf_insn_access_aux *info)
1742{
1743	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1744		return false;
1745	if (type != BPF_READ)
1746		return false;
1747	if (off % size != 0)
1748		return false;
1749
1750	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1751	return true;
1752}
1753
1754const struct bpf_verifier_ops tracepoint_verifier_ops = {
1755	.get_func_proto  = tp_prog_func_proto,
1756	.is_valid_access = tp_prog_is_valid_access,
1757};
1758
1759const struct bpf_prog_ops tracepoint_prog_ops = {
1760};
1761
1762BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1763	   struct bpf_perf_event_value *, buf, u32, size)
1764{
1765	int err = -EINVAL;
1766
1767	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1768		goto clear;
1769	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1770				    &buf->running);
1771	if (unlikely(err))
1772		goto clear;
1773	return 0;
1774clear:
1775	memset(buf, 0, size);
1776	return err;
1777}
1778
1779static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1780         .func           = bpf_perf_prog_read_value,
1781         .gpl_only       = true,
1782         .ret_type       = RET_INTEGER,
1783         .arg1_type      = ARG_PTR_TO_CTX,
1784         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1785         .arg3_type      = ARG_CONST_SIZE,
1786};
1787
1788BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1789	   void *, buf, u32, size, u64, flags)
1790{
 
 
 
1791	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1792	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1793	u32 to_copy;
1794
1795	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1796		return -EINVAL;
1797
1798	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1799		return -ENOENT;
1800
1801	if (unlikely(!br_stack))
1802		return -ENOENT;
1803
1804	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1805		return br_stack->nr * br_entry_size;
1806
1807	if (!buf || (size % br_entry_size != 0))
1808		return -EINVAL;
1809
1810	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1811	memcpy(buf, br_stack->entries, to_copy);
1812
1813	return to_copy;
 
1814}
1815
1816static const struct bpf_func_proto bpf_read_branch_records_proto = {
1817	.func           = bpf_read_branch_records,
1818	.gpl_only       = true,
1819	.ret_type       = RET_INTEGER,
1820	.arg1_type      = ARG_PTR_TO_CTX,
1821	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1822	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1823	.arg4_type      = ARG_ANYTHING,
1824};
1825
1826static const struct bpf_func_proto *
1827pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1828{
1829	switch (func_id) {
1830	case BPF_FUNC_perf_event_output:
1831		return &bpf_perf_event_output_proto_tp;
1832	case BPF_FUNC_get_stackid:
1833		return &bpf_get_stackid_proto_pe;
1834	case BPF_FUNC_get_stack:
1835		return &bpf_get_stack_proto_pe;
1836	case BPF_FUNC_perf_prog_read_value:
1837		return &bpf_perf_prog_read_value_proto;
1838	case BPF_FUNC_read_branch_records:
1839		return &bpf_read_branch_records_proto;
1840	case BPF_FUNC_get_attach_cookie:
1841		return &bpf_get_attach_cookie_proto_pe;
1842	default:
1843		return bpf_tracing_func_proto(func_id, prog);
1844	}
1845}
1846
1847/*
1848 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1849 * to avoid potential recursive reuse issue when/if tracepoints are added
1850 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1851 *
1852 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1853 * in normal, irq, and nmi context.
1854 */
1855struct bpf_raw_tp_regs {
1856	struct pt_regs regs[3];
1857};
1858static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1859static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1860static struct pt_regs *get_bpf_raw_tp_regs(void)
1861{
1862	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1863	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1864
1865	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1866		this_cpu_dec(bpf_raw_tp_nest_level);
1867		return ERR_PTR(-EBUSY);
1868	}
1869
1870	return &tp_regs->regs[nest_level - 1];
1871}
1872
1873static void put_bpf_raw_tp_regs(void)
1874{
1875	this_cpu_dec(bpf_raw_tp_nest_level);
1876}
1877
1878BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1879	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1880{
1881	struct pt_regs *regs = get_bpf_raw_tp_regs();
1882	int ret;
1883
1884	if (IS_ERR(regs))
1885		return PTR_ERR(regs);
1886
1887	perf_fetch_caller_regs(regs);
1888	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1889
1890	put_bpf_raw_tp_regs();
1891	return ret;
1892}
1893
1894static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1895	.func		= bpf_perf_event_output_raw_tp,
1896	.gpl_only	= true,
1897	.ret_type	= RET_INTEGER,
1898	.arg1_type	= ARG_PTR_TO_CTX,
1899	.arg2_type	= ARG_CONST_MAP_PTR,
1900	.arg3_type	= ARG_ANYTHING,
1901	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1902	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1903};
1904
1905extern const struct bpf_func_proto bpf_skb_output_proto;
1906extern const struct bpf_func_proto bpf_xdp_output_proto;
1907extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1908
1909BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1910	   struct bpf_map *, map, u64, flags)
1911{
1912	struct pt_regs *regs = get_bpf_raw_tp_regs();
1913	int ret;
1914
1915	if (IS_ERR(regs))
1916		return PTR_ERR(regs);
1917
1918	perf_fetch_caller_regs(regs);
1919	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1920	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1921			      flags, 0, 0);
1922	put_bpf_raw_tp_regs();
1923	return ret;
1924}
1925
1926static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1927	.func		= bpf_get_stackid_raw_tp,
1928	.gpl_only	= true,
1929	.ret_type	= RET_INTEGER,
1930	.arg1_type	= ARG_PTR_TO_CTX,
1931	.arg2_type	= ARG_CONST_MAP_PTR,
1932	.arg3_type	= ARG_ANYTHING,
1933};
1934
1935BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1936	   void *, buf, u32, size, u64, flags)
1937{
1938	struct pt_regs *regs = get_bpf_raw_tp_regs();
1939	int ret;
1940
1941	if (IS_ERR(regs))
1942		return PTR_ERR(regs);
1943
1944	perf_fetch_caller_regs(regs);
1945	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1946			    (unsigned long) size, flags, 0);
1947	put_bpf_raw_tp_regs();
1948	return ret;
1949}
1950
1951static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1952	.func		= bpf_get_stack_raw_tp,
1953	.gpl_only	= true,
1954	.ret_type	= RET_INTEGER,
1955	.arg1_type	= ARG_PTR_TO_CTX,
1956	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1957	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1958	.arg4_type	= ARG_ANYTHING,
1959};
1960
1961static const struct bpf_func_proto *
1962raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1963{
1964	switch (func_id) {
1965	case BPF_FUNC_perf_event_output:
1966		return &bpf_perf_event_output_proto_raw_tp;
1967	case BPF_FUNC_get_stackid:
1968		return &bpf_get_stackid_proto_raw_tp;
1969	case BPF_FUNC_get_stack:
1970		return &bpf_get_stack_proto_raw_tp;
1971	case BPF_FUNC_get_attach_cookie:
1972		return &bpf_get_attach_cookie_proto_tracing;
1973	default:
1974		return bpf_tracing_func_proto(func_id, prog);
1975	}
1976}
1977
1978const struct bpf_func_proto *
1979tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1980{
1981	const struct bpf_func_proto *fn;
1982
1983	switch (func_id) {
1984#ifdef CONFIG_NET
1985	case BPF_FUNC_skb_output:
1986		return &bpf_skb_output_proto;
1987	case BPF_FUNC_xdp_output:
1988		return &bpf_xdp_output_proto;
1989	case BPF_FUNC_skc_to_tcp6_sock:
1990		return &bpf_skc_to_tcp6_sock_proto;
1991	case BPF_FUNC_skc_to_tcp_sock:
1992		return &bpf_skc_to_tcp_sock_proto;
1993	case BPF_FUNC_skc_to_tcp_timewait_sock:
1994		return &bpf_skc_to_tcp_timewait_sock_proto;
1995	case BPF_FUNC_skc_to_tcp_request_sock:
1996		return &bpf_skc_to_tcp_request_sock_proto;
1997	case BPF_FUNC_skc_to_udp6_sock:
1998		return &bpf_skc_to_udp6_sock_proto;
1999	case BPF_FUNC_skc_to_unix_sock:
2000		return &bpf_skc_to_unix_sock_proto;
2001	case BPF_FUNC_skc_to_mptcp_sock:
2002		return &bpf_skc_to_mptcp_sock_proto;
2003	case BPF_FUNC_sk_storage_get:
2004		return &bpf_sk_storage_get_tracing_proto;
2005	case BPF_FUNC_sk_storage_delete:
2006		return &bpf_sk_storage_delete_tracing_proto;
2007	case BPF_FUNC_sock_from_file:
2008		return &bpf_sock_from_file_proto;
2009	case BPF_FUNC_get_socket_cookie:
2010		return &bpf_get_socket_ptr_cookie_proto;
2011	case BPF_FUNC_xdp_get_buff_len:
2012		return &bpf_xdp_get_buff_len_trace_proto;
2013#endif
2014	case BPF_FUNC_seq_printf:
2015		return prog->expected_attach_type == BPF_TRACE_ITER ?
2016		       &bpf_seq_printf_proto :
2017		       NULL;
2018	case BPF_FUNC_seq_write:
2019		return prog->expected_attach_type == BPF_TRACE_ITER ?
2020		       &bpf_seq_write_proto :
2021		       NULL;
2022	case BPF_FUNC_seq_printf_btf:
2023		return prog->expected_attach_type == BPF_TRACE_ITER ?
2024		       &bpf_seq_printf_btf_proto :
2025		       NULL;
2026	case BPF_FUNC_d_path:
2027		return &bpf_d_path_proto;
2028	case BPF_FUNC_get_func_arg:
2029		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
2030	case BPF_FUNC_get_func_ret:
2031		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
2032	case BPF_FUNC_get_func_arg_cnt:
2033		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2034	case BPF_FUNC_get_attach_cookie:
2035		if (prog->type == BPF_PROG_TYPE_TRACING &&
2036		    prog->expected_attach_type == BPF_TRACE_RAW_TP)
2037			return &bpf_get_attach_cookie_proto_tracing;
2038		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2039	default:
2040		fn = raw_tp_prog_func_proto(func_id, prog);
2041		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2042			fn = bpf_iter_get_func_proto(func_id, prog);
2043		return fn;
2044	}
2045}
2046
2047static bool raw_tp_prog_is_valid_access(int off, int size,
2048					enum bpf_access_type type,
2049					const struct bpf_prog *prog,
2050					struct bpf_insn_access_aux *info)
2051{
2052	return bpf_tracing_ctx_access(off, size, type);
 
 
 
 
 
 
2053}
2054
2055static bool tracing_prog_is_valid_access(int off, int size,
2056					 enum bpf_access_type type,
2057					 const struct bpf_prog *prog,
2058					 struct bpf_insn_access_aux *info)
2059{
2060	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
 
 
 
 
 
 
2061}
2062
2063int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2064				     const union bpf_attr *kattr,
2065				     union bpf_attr __user *uattr)
2066{
2067	return -ENOTSUPP;
2068}
2069
2070const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2071	.get_func_proto  = raw_tp_prog_func_proto,
2072	.is_valid_access = raw_tp_prog_is_valid_access,
2073};
2074
2075const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2076#ifdef CONFIG_NET
2077	.test_run = bpf_prog_test_run_raw_tp,
2078#endif
2079};
2080
2081const struct bpf_verifier_ops tracing_verifier_ops = {
2082	.get_func_proto  = tracing_prog_func_proto,
2083	.is_valid_access = tracing_prog_is_valid_access,
2084};
2085
2086const struct bpf_prog_ops tracing_prog_ops = {
2087	.test_run = bpf_prog_test_run_tracing,
2088};
2089
2090static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2091						 enum bpf_access_type type,
2092						 const struct bpf_prog *prog,
2093						 struct bpf_insn_access_aux *info)
2094{
2095	if (off == 0) {
2096		if (size != sizeof(u64) || type != BPF_READ)
2097			return false;
2098		info->reg_type = PTR_TO_TP_BUFFER;
2099	}
2100	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2101}
2102
2103const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2104	.get_func_proto  = raw_tp_prog_func_proto,
2105	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2106};
2107
2108const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2109};
2110
2111static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2112				    const struct bpf_prog *prog,
2113				    struct bpf_insn_access_aux *info)
2114{
2115	const int size_u64 = sizeof(u64);
2116
2117	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2118		return false;
2119	if (type != BPF_READ)
2120		return false;
2121	if (off % size != 0) {
2122		if (sizeof(unsigned long) != 4)
2123			return false;
2124		if (size != 8)
2125			return false;
2126		if (off % size != 4)
2127			return false;
2128	}
2129
2130	switch (off) {
2131	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2132		bpf_ctx_record_field_size(info, size_u64);
2133		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2134			return false;
2135		break;
2136	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2137		bpf_ctx_record_field_size(info, size_u64);
2138		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2139			return false;
2140		break;
2141	default:
2142		if (size != sizeof(long))
2143			return false;
2144	}
2145
2146	return true;
2147}
2148
2149static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2150				      const struct bpf_insn *si,
2151				      struct bpf_insn *insn_buf,
2152				      struct bpf_prog *prog, u32 *target_size)
2153{
2154	struct bpf_insn *insn = insn_buf;
2155
2156	switch (si->off) {
2157	case offsetof(struct bpf_perf_event_data, sample_period):
2158		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2159						       data), si->dst_reg, si->src_reg,
2160				      offsetof(struct bpf_perf_event_data_kern, data));
2161		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2162				      bpf_target_off(struct perf_sample_data, period, 8,
2163						     target_size));
2164		break;
2165	case offsetof(struct bpf_perf_event_data, addr):
2166		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2167						       data), si->dst_reg, si->src_reg,
2168				      offsetof(struct bpf_perf_event_data_kern, data));
2169		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2170				      bpf_target_off(struct perf_sample_data, addr, 8,
2171						     target_size));
2172		break;
2173	default:
2174		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2175						       regs), si->dst_reg, si->src_reg,
2176				      offsetof(struct bpf_perf_event_data_kern, regs));
2177		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2178				      si->off);
2179		break;
2180	}
2181
2182	return insn - insn_buf;
2183}
2184
2185const struct bpf_verifier_ops perf_event_verifier_ops = {
2186	.get_func_proto		= pe_prog_func_proto,
2187	.is_valid_access	= pe_prog_is_valid_access,
2188	.convert_ctx_access	= pe_prog_convert_ctx_access,
2189};
2190
2191const struct bpf_prog_ops perf_event_prog_ops = {
2192};
2193
2194static DEFINE_MUTEX(bpf_event_mutex);
2195
2196#define BPF_TRACE_MAX_PROGS 64
2197
2198int perf_event_attach_bpf_prog(struct perf_event *event,
2199			       struct bpf_prog *prog,
2200			       u64 bpf_cookie)
2201{
2202	struct bpf_prog_array *old_array;
2203	struct bpf_prog_array *new_array;
2204	int ret = -EEXIST;
2205
2206	/*
2207	 * Kprobe override only works if they are on the function entry,
2208	 * and only if they are on the opt-in list.
2209	 */
2210	if (prog->kprobe_override &&
2211	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2212	     !trace_kprobe_error_injectable(event->tp_event)))
2213		return -EINVAL;
2214
2215	mutex_lock(&bpf_event_mutex);
2216
2217	if (event->prog)
2218		goto unlock;
2219
2220	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2221	if (old_array &&
2222	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2223		ret = -E2BIG;
2224		goto unlock;
2225	}
2226
2227	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2228	if (ret < 0)
2229		goto unlock;
2230
2231	/* set the new array to event->tp_event and set event->prog */
2232	event->prog = prog;
2233	event->bpf_cookie = bpf_cookie;
2234	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2235	bpf_prog_array_free_sleepable(old_array);
2236
2237unlock:
2238	mutex_unlock(&bpf_event_mutex);
2239	return ret;
2240}
2241
2242void perf_event_detach_bpf_prog(struct perf_event *event)
2243{
2244	struct bpf_prog_array *old_array;
2245	struct bpf_prog_array *new_array;
2246	int ret;
2247
2248	mutex_lock(&bpf_event_mutex);
2249
2250	if (!event->prog)
2251		goto unlock;
2252
2253	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2254	if (!old_array)
2255		goto put;
2256
2257	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2258	if (ret < 0) {
2259		bpf_prog_array_delete_safe(old_array, event->prog);
2260	} else {
2261		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2262		bpf_prog_array_free_sleepable(old_array);
2263	}
2264
2265put:
2266	/*
2267	 * It could be that the bpf_prog is not sleepable (and will be freed
2268	 * via normal RCU), but is called from a point that supports sleepable
2269	 * programs and uses tasks-trace-RCU.
2270	 */
2271	synchronize_rcu_tasks_trace();
2272
2273	bpf_prog_put(event->prog);
2274	event->prog = NULL;
2275
2276unlock:
2277	mutex_unlock(&bpf_event_mutex);
2278}
2279
2280int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2281{
2282	struct perf_event_query_bpf __user *uquery = info;
2283	struct perf_event_query_bpf query = {};
2284	struct bpf_prog_array *progs;
2285	u32 *ids, prog_cnt, ids_len;
2286	int ret;
2287
2288	if (!perfmon_capable())
2289		return -EPERM;
2290	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2291		return -EINVAL;
2292	if (copy_from_user(&query, uquery, sizeof(query)))
2293		return -EFAULT;
2294
2295	ids_len = query.ids_len;
2296	if (ids_len > BPF_TRACE_MAX_PROGS)
2297		return -E2BIG;
2298	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2299	if (!ids)
2300		return -ENOMEM;
2301	/*
2302	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2303	 * is required when user only wants to check for uquery->prog_cnt.
2304	 * There is no need to check for it since the case is handled
2305	 * gracefully in bpf_prog_array_copy_info.
2306	 */
2307
2308	mutex_lock(&bpf_event_mutex);
2309	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2310	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2311	mutex_unlock(&bpf_event_mutex);
2312
2313	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2314	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2315		ret = -EFAULT;
2316
2317	kfree(ids);
2318	return ret;
2319}
2320
2321extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2322extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2323
2324struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2325{
2326	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2327
2328	for (; btp < __stop__bpf_raw_tp; btp++) {
2329		if (!strcmp(btp->tp->name, name))
2330			return btp;
2331	}
2332
2333	return bpf_get_raw_tracepoint_module(name);
2334}
2335
2336void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2337{
2338	struct module *mod;
2339
2340	preempt_disable();
2341	mod = __module_address((unsigned long)btp);
2342	module_put(mod);
2343	preempt_enable();
2344}
2345
2346static __always_inline
2347void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
2348{
2349	struct bpf_prog *prog = link->link.prog;
2350	struct bpf_run_ctx *old_run_ctx;
2351	struct bpf_trace_run_ctx run_ctx;
2352
2353	cant_sleep();
2354	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2355		bpf_prog_inc_misses_counter(prog);
2356		goto out;
2357	}
2358
2359	run_ctx.bpf_cookie = link->cookie;
2360	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2361
2362	rcu_read_lock();
2363	(void) bpf_prog_run(prog, args);
2364	rcu_read_unlock();
2365
2366	bpf_reset_run_ctx(old_run_ctx);
2367out:
2368	this_cpu_dec(*(prog->active));
2369}
2370
2371#define UNPACK(...)			__VA_ARGS__
2372#define REPEAT_1(FN, DL, X, ...)	FN(X)
2373#define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2374#define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2375#define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2376#define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2377#define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2378#define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2379#define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2380#define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2381#define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2382#define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2383#define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2384#define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2385
2386#define SARG(X)		u64 arg##X
2387#define COPY(X)		args[X] = arg##X
2388
2389#define __DL_COM	(,)
2390#define __DL_SEM	(;)
2391
2392#define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2393
2394#define BPF_TRACE_DEFN_x(x)						\
2395	void bpf_trace_run##x(struct bpf_raw_tp_link *link,		\
2396			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2397	{								\
2398		u64 args[x];						\
2399		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2400		__bpf_trace_run(link, args);				\
2401	}								\
2402	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2403BPF_TRACE_DEFN_x(1);
2404BPF_TRACE_DEFN_x(2);
2405BPF_TRACE_DEFN_x(3);
2406BPF_TRACE_DEFN_x(4);
2407BPF_TRACE_DEFN_x(5);
2408BPF_TRACE_DEFN_x(6);
2409BPF_TRACE_DEFN_x(7);
2410BPF_TRACE_DEFN_x(8);
2411BPF_TRACE_DEFN_x(9);
2412BPF_TRACE_DEFN_x(10);
2413BPF_TRACE_DEFN_x(11);
2414BPF_TRACE_DEFN_x(12);
2415
2416int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2417{
2418	struct tracepoint *tp = btp->tp;
2419	struct bpf_prog *prog = link->link.prog;
2420
2421	/*
2422	 * check that program doesn't access arguments beyond what's
2423	 * available in this tracepoint
2424	 */
2425	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2426		return -EINVAL;
2427
2428	if (prog->aux->max_tp_access > btp->writable_size)
2429		return -EINVAL;
2430
2431	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link);
 
 
 
 
 
2432}
2433
2434int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2435{
2436	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link);
2437}
2438
2439int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2440			    u32 *fd_type, const char **buf,
2441			    u64 *probe_offset, u64 *probe_addr,
2442			    unsigned long *missed)
2443{
2444	bool is_tracepoint, is_syscall_tp;
2445	struct bpf_prog *prog;
2446	int flags, err = 0;
2447
2448	prog = event->prog;
2449	if (!prog)
2450		return -ENOENT;
2451
2452	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2453	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2454		return -EOPNOTSUPP;
2455
2456	*prog_id = prog->aux->id;
2457	flags = event->tp_event->flags;
2458	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2459	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2460
2461	if (is_tracepoint || is_syscall_tp) {
2462		*buf = is_tracepoint ? event->tp_event->tp->name
2463				     : event->tp_event->name;
2464		/* We allow NULL pointer for tracepoint */
2465		if (fd_type)
2466			*fd_type = BPF_FD_TYPE_TRACEPOINT;
2467		if (probe_offset)
2468			*probe_offset = 0x0;
2469		if (probe_addr)
2470			*probe_addr = 0x0;
2471	} else {
2472		/* kprobe/uprobe */
2473		err = -EOPNOTSUPP;
2474#ifdef CONFIG_KPROBE_EVENTS
2475		if (flags & TRACE_EVENT_FL_KPROBE)
2476			err = bpf_get_kprobe_info(event, fd_type, buf,
2477						  probe_offset, probe_addr, missed,
2478						  event->attr.type == PERF_TYPE_TRACEPOINT);
2479#endif
2480#ifdef CONFIG_UPROBE_EVENTS
2481		if (flags & TRACE_EVENT_FL_UPROBE)
2482			err = bpf_get_uprobe_info(event, fd_type, buf,
2483						  probe_offset, probe_addr,
2484						  event->attr.type == PERF_TYPE_TRACEPOINT);
2485#endif
2486	}
2487
2488	return err;
2489}
2490
2491static int __init send_signal_irq_work_init(void)
2492{
2493	int cpu;
2494	struct send_signal_irq_work *work;
2495
2496	for_each_possible_cpu(cpu) {
2497		work = per_cpu_ptr(&send_signal_work, cpu);
2498		init_irq_work(&work->irq_work, do_bpf_send_signal);
2499	}
2500	return 0;
2501}
2502
2503subsys_initcall(send_signal_irq_work_init);
2504
2505#ifdef CONFIG_MODULES
2506static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2507			    void *module)
2508{
2509	struct bpf_trace_module *btm, *tmp;
2510	struct module *mod = module;
2511	int ret = 0;
2512
2513	if (mod->num_bpf_raw_events == 0 ||
2514	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2515		goto out;
2516
2517	mutex_lock(&bpf_module_mutex);
2518
2519	switch (op) {
2520	case MODULE_STATE_COMING:
2521		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2522		if (btm) {
2523			btm->module = module;
2524			list_add(&btm->list, &bpf_trace_modules);
2525		} else {
2526			ret = -ENOMEM;
2527		}
2528		break;
2529	case MODULE_STATE_GOING:
2530		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2531			if (btm->module == module) {
2532				list_del(&btm->list);
2533				kfree(btm);
2534				break;
2535			}
2536		}
2537		break;
2538	}
2539
2540	mutex_unlock(&bpf_module_mutex);
2541
2542out:
2543	return notifier_from_errno(ret);
2544}
2545
2546static struct notifier_block bpf_module_nb = {
2547	.notifier_call = bpf_event_notify,
2548};
2549
2550static int __init bpf_event_init(void)
2551{
2552	register_module_notifier(&bpf_module_nb);
2553	return 0;
2554}
2555
2556fs_initcall(bpf_event_init);
2557#endif /* CONFIG_MODULES */
2558
2559struct bpf_session_run_ctx {
2560	struct bpf_run_ctx run_ctx;
2561	bool is_return;
2562	void *data;
2563};
2564
2565#ifdef CONFIG_FPROBE
2566struct bpf_kprobe_multi_link {
2567	struct bpf_link link;
2568	struct fprobe fp;
2569	unsigned long *addrs;
2570	u64 *cookies;
2571	u32 cnt;
2572	u32 mods_cnt;
2573	struct module **mods;
2574	u32 flags;
2575};
2576
2577struct bpf_kprobe_multi_run_ctx {
2578	struct bpf_session_run_ctx session_ctx;
2579	struct bpf_kprobe_multi_link *link;
2580	unsigned long entry_ip;
2581};
2582
2583struct user_syms {
2584	const char **syms;
2585	char *buf;
2586};
2587
2588static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2589{
2590	unsigned long __user usymbol;
2591	const char **syms = NULL;
2592	char *buf = NULL, *p;
2593	int err = -ENOMEM;
2594	unsigned int i;
2595
2596	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2597	if (!syms)
2598		goto error;
2599
2600	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2601	if (!buf)
2602		goto error;
2603
2604	for (p = buf, i = 0; i < cnt; i++) {
2605		if (__get_user(usymbol, usyms + i)) {
2606			err = -EFAULT;
2607			goto error;
2608		}
2609		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2610		if (err == KSYM_NAME_LEN)
2611			err = -E2BIG;
2612		if (err < 0)
2613			goto error;
2614		syms[i] = p;
2615		p += err + 1;
2616	}
2617
2618	us->syms = syms;
2619	us->buf = buf;
2620	return 0;
2621
2622error:
2623	if (err) {
2624		kvfree(syms);
2625		kvfree(buf);
2626	}
2627	return err;
2628}
2629
2630static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2631{
2632	u32 i;
2633
2634	for (i = 0; i < cnt; i++)
2635		module_put(mods[i]);
2636}
2637
2638static void free_user_syms(struct user_syms *us)
2639{
2640	kvfree(us->syms);
2641	kvfree(us->buf);
2642}
2643
2644static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2645{
2646	struct bpf_kprobe_multi_link *kmulti_link;
2647
2648	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2649	unregister_fprobe(&kmulti_link->fp);
2650	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2651}
2652
2653static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2654{
2655	struct bpf_kprobe_multi_link *kmulti_link;
2656
2657	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2658	kvfree(kmulti_link->addrs);
2659	kvfree(kmulti_link->cookies);
2660	kfree(kmulti_link->mods);
2661	kfree(kmulti_link);
2662}
2663
2664static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2665						struct bpf_link_info *info)
2666{
2667	u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
2668	u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2669	struct bpf_kprobe_multi_link *kmulti_link;
2670	u32 ucount = info->kprobe_multi.count;
2671	int err = 0, i;
2672
2673	if (!uaddrs ^ !ucount)
2674		return -EINVAL;
2675	if (ucookies && !ucount)
2676		return -EINVAL;
2677
2678	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2679	info->kprobe_multi.count = kmulti_link->cnt;
2680	info->kprobe_multi.flags = kmulti_link->flags;
2681	info->kprobe_multi.missed = kmulti_link->fp.nmissed;
2682
2683	if (!uaddrs)
2684		return 0;
2685	if (ucount < kmulti_link->cnt)
2686		err = -ENOSPC;
2687	else
2688		ucount = kmulti_link->cnt;
2689
2690	if (ucookies) {
2691		if (kmulti_link->cookies) {
2692			if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
2693				return -EFAULT;
2694		} else {
2695			for (i = 0; i < ucount; i++) {
2696				if (put_user(0, ucookies + i))
2697					return -EFAULT;
2698			}
2699		}
2700	}
2701
2702	if (kallsyms_show_value(current_cred())) {
2703		if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2704			return -EFAULT;
2705	} else {
2706		for (i = 0; i < ucount; i++) {
2707			if (put_user(0, uaddrs + i))
2708				return -EFAULT;
2709		}
2710	}
2711	return err;
2712}
2713
2714static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2715	.release = bpf_kprobe_multi_link_release,
2716	.dealloc_deferred = bpf_kprobe_multi_link_dealloc,
2717	.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2718};
2719
2720static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2721{
2722	const struct bpf_kprobe_multi_link *link = priv;
2723	unsigned long *addr_a = a, *addr_b = b;
2724	u64 *cookie_a, *cookie_b;
2725
2726	cookie_a = link->cookies + (addr_a - link->addrs);
2727	cookie_b = link->cookies + (addr_b - link->addrs);
2728
2729	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2730	swap(*addr_a, *addr_b);
2731	swap(*cookie_a, *cookie_b);
2732}
2733
2734static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2735{
2736	const unsigned long *addr_a = a, *addr_b = b;
2737
2738	if (*addr_a == *addr_b)
2739		return 0;
2740	return *addr_a < *addr_b ? -1 : 1;
2741}
2742
2743static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2744{
2745	return bpf_kprobe_multi_addrs_cmp(a, b);
2746}
2747
2748static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2749{
2750	struct bpf_kprobe_multi_run_ctx *run_ctx;
2751	struct bpf_kprobe_multi_link *link;
2752	u64 *cookie, entry_ip;
2753	unsigned long *addr;
2754
2755	if (WARN_ON_ONCE(!ctx))
2756		return 0;
2757	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2758			       session_ctx.run_ctx);
2759	link = run_ctx->link;
2760	if (!link->cookies)
2761		return 0;
2762	entry_ip = run_ctx->entry_ip;
2763	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2764		       bpf_kprobe_multi_addrs_cmp);
2765	if (!addr)
2766		return 0;
2767	cookie = link->cookies + (addr - link->addrs);
2768	return *cookie;
2769}
2770
2771static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2772{
2773	struct bpf_kprobe_multi_run_ctx *run_ctx;
2774
2775	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2776			       session_ctx.run_ctx);
2777	return run_ctx->entry_ip;
2778}
2779
2780static int
2781kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2782			   unsigned long entry_ip, struct pt_regs *regs,
2783			   bool is_return, void *data)
2784{
2785	struct bpf_kprobe_multi_run_ctx run_ctx = {
2786		.session_ctx = {
2787			.is_return = is_return,
2788			.data = data,
2789		},
2790		.link = link,
2791		.entry_ip = entry_ip,
2792	};
2793	struct bpf_run_ctx *old_run_ctx;
2794	int err;
2795
2796	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2797		bpf_prog_inc_misses_counter(link->link.prog);
2798		err = 0;
2799		goto out;
2800	}
2801
2802	migrate_disable();
2803	rcu_read_lock();
2804	old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
2805	err = bpf_prog_run(link->link.prog, regs);
2806	bpf_reset_run_ctx(old_run_ctx);
2807	rcu_read_unlock();
2808	migrate_enable();
2809
2810 out:
2811	__this_cpu_dec(bpf_prog_active);
2812	return err;
2813}
2814
2815static int
2816kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2817			  unsigned long ret_ip, struct pt_regs *regs,
2818			  void *data)
2819{
2820	struct bpf_kprobe_multi_link *link;
2821	int err;
2822
2823	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2824	err = kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, false, data);
2825	return is_kprobe_session(link->link.prog) ? err : 0;
2826}
2827
2828static void
2829kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2830			       unsigned long ret_ip, struct pt_regs *regs,
2831			       void *data)
2832{
2833	struct bpf_kprobe_multi_link *link;
2834
2835	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2836	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, true, data);
2837}
2838
2839static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2840{
2841	const char **str_a = (const char **) a;
2842	const char **str_b = (const char **) b;
2843
2844	return strcmp(*str_a, *str_b);
2845}
2846
2847struct multi_symbols_sort {
2848	const char **funcs;
2849	u64 *cookies;
2850};
2851
2852static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2853{
2854	const struct multi_symbols_sort *data = priv;
2855	const char **name_a = a, **name_b = b;
2856
2857	swap(*name_a, *name_b);
2858
2859	/* If defined, swap also related cookies. */
2860	if (data->cookies) {
2861		u64 *cookie_a, *cookie_b;
2862
2863		cookie_a = data->cookies + (name_a - data->funcs);
2864		cookie_b = data->cookies + (name_b - data->funcs);
2865		swap(*cookie_a, *cookie_b);
2866	}
2867}
2868
2869struct modules_array {
2870	struct module **mods;
2871	int mods_cnt;
2872	int mods_cap;
2873};
2874
2875static int add_module(struct modules_array *arr, struct module *mod)
2876{
2877	struct module **mods;
2878
2879	if (arr->mods_cnt == arr->mods_cap) {
2880		arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2881		mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2882		if (!mods)
2883			return -ENOMEM;
2884		arr->mods = mods;
2885	}
2886
2887	arr->mods[arr->mods_cnt] = mod;
2888	arr->mods_cnt++;
2889	return 0;
2890}
2891
2892static bool has_module(struct modules_array *arr, struct module *mod)
2893{
2894	int i;
2895
2896	for (i = arr->mods_cnt - 1; i >= 0; i--) {
2897		if (arr->mods[i] == mod)
2898			return true;
2899	}
2900	return false;
2901}
2902
2903static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2904{
2905	struct modules_array arr = {};
2906	u32 i, err = 0;
2907
2908	for (i = 0; i < addrs_cnt; i++) {
2909		struct module *mod;
2910
2911		preempt_disable();
2912		mod = __module_address(addrs[i]);
2913		/* Either no module or we it's already stored  */
2914		if (!mod || has_module(&arr, mod)) {
2915			preempt_enable();
2916			continue;
2917		}
2918		if (!try_module_get(mod))
2919			err = -EINVAL;
2920		preempt_enable();
2921		if (err)
2922			break;
2923		err = add_module(&arr, mod);
2924		if (err) {
2925			module_put(mod);
2926			break;
2927		}
2928	}
2929
2930	/* We return either err < 0 in case of error, ... */
2931	if (err) {
2932		kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2933		kfree(arr.mods);
2934		return err;
2935	}
2936
2937	/* or number of modules found if everything is ok. */
2938	*mods = arr.mods;
2939	return arr.mods_cnt;
2940}
2941
2942static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2943{
2944	u32 i;
2945
2946	for (i = 0; i < cnt; i++) {
2947		if (!within_error_injection_list(addrs[i]))
2948			return -EINVAL;
2949	}
2950	return 0;
2951}
2952
2953int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2954{
2955	struct bpf_kprobe_multi_link *link = NULL;
2956	struct bpf_link_primer link_primer;
2957	void __user *ucookies;
2958	unsigned long *addrs;
2959	u32 flags, cnt, size;
2960	void __user *uaddrs;
2961	u64 *cookies = NULL;
2962	void __user *usyms;
2963	int err;
2964
2965	/* no support for 32bit archs yet */
2966	if (sizeof(u64) != sizeof(void *))
2967		return -EOPNOTSUPP;
2968
2969	if (!is_kprobe_multi(prog))
2970		return -EINVAL;
2971
2972	flags = attr->link_create.kprobe_multi.flags;
2973	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2974		return -EINVAL;
2975
2976	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2977	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2978	if (!!uaddrs == !!usyms)
2979		return -EINVAL;
2980
2981	cnt = attr->link_create.kprobe_multi.cnt;
2982	if (!cnt)
2983		return -EINVAL;
2984	if (cnt > MAX_KPROBE_MULTI_CNT)
2985		return -E2BIG;
2986
2987	size = cnt * sizeof(*addrs);
2988	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2989	if (!addrs)
2990		return -ENOMEM;
2991
2992	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2993	if (ucookies) {
2994		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2995		if (!cookies) {
2996			err = -ENOMEM;
2997			goto error;
2998		}
2999		if (copy_from_user(cookies, ucookies, size)) {
3000			err = -EFAULT;
3001			goto error;
3002		}
3003	}
3004
3005	if (uaddrs) {
3006		if (copy_from_user(addrs, uaddrs, size)) {
3007			err = -EFAULT;
3008			goto error;
3009		}
3010	} else {
3011		struct multi_symbols_sort data = {
3012			.cookies = cookies,
3013		};
3014		struct user_syms us;
3015
3016		err = copy_user_syms(&us, usyms, cnt);
3017		if (err)
3018			goto error;
3019
3020		if (cookies)
3021			data.funcs = us.syms;
3022
3023		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
3024		       symbols_swap_r, &data);
3025
3026		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
3027		free_user_syms(&us);
3028		if (err)
3029			goto error;
3030	}
3031
3032	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
3033		err = -EINVAL;
3034		goto error;
3035	}
3036
3037	link = kzalloc(sizeof(*link), GFP_KERNEL);
3038	if (!link) {
3039		err = -ENOMEM;
3040		goto error;
3041	}
3042
3043	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
3044		      &bpf_kprobe_multi_link_lops, prog);
3045
3046	err = bpf_link_prime(&link->link, &link_primer);
3047	if (err)
3048		goto error;
3049
3050	if (!(flags & BPF_F_KPROBE_MULTI_RETURN))
3051		link->fp.entry_handler = kprobe_multi_link_handler;
3052	if ((flags & BPF_F_KPROBE_MULTI_RETURN) || is_kprobe_session(prog))
3053		link->fp.exit_handler = kprobe_multi_link_exit_handler;
3054	if (is_kprobe_session(prog))
3055		link->fp.entry_data_size = sizeof(u64);
3056
3057	link->addrs = addrs;
3058	link->cookies = cookies;
3059	link->cnt = cnt;
3060	link->flags = flags;
3061
3062	if (cookies) {
3063		/*
3064		 * Sorting addresses will trigger sorting cookies as well
3065		 * (check bpf_kprobe_multi_cookie_swap). This way we can
3066		 * find cookie based on the address in bpf_get_attach_cookie
3067		 * helper.
3068		 */
3069		sort_r(addrs, cnt, sizeof(*addrs),
3070		       bpf_kprobe_multi_cookie_cmp,
3071		       bpf_kprobe_multi_cookie_swap,
3072		       link);
3073	}
3074
3075	err = get_modules_for_addrs(&link->mods, addrs, cnt);
3076	if (err < 0) {
3077		bpf_link_cleanup(&link_primer);
3078		return err;
3079	}
3080	link->mods_cnt = err;
3081
3082	err = register_fprobe_ips(&link->fp, addrs, cnt);
3083	if (err) {
3084		kprobe_multi_put_modules(link->mods, link->mods_cnt);
3085		bpf_link_cleanup(&link_primer);
3086		return err;
3087	}
3088
3089	return bpf_link_settle(&link_primer);
3090
3091error:
3092	kfree(link);
3093	kvfree(addrs);
3094	kvfree(cookies);
3095	return err;
3096}
3097#else /* !CONFIG_FPROBE */
3098int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3099{
3100	return -EOPNOTSUPP;
3101}
3102static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3103{
3104	return 0;
3105}
3106static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3107{
3108	return 0;
3109}
3110#endif
3111
3112#ifdef CONFIG_UPROBES
3113struct bpf_uprobe_multi_link;
3114
3115struct bpf_uprobe {
3116	struct bpf_uprobe_multi_link *link;
3117	loff_t offset;
3118	unsigned long ref_ctr_offset;
3119	u64 cookie;
3120	struct uprobe *uprobe;
3121	struct uprobe_consumer consumer;
3122	bool session;
3123};
3124
3125struct bpf_uprobe_multi_link {
3126	struct path path;
3127	struct bpf_link link;
3128	u32 cnt;
3129	u32 flags;
3130	struct bpf_uprobe *uprobes;
3131	struct task_struct *task;
3132};
3133
3134struct bpf_uprobe_multi_run_ctx {
3135	struct bpf_session_run_ctx session_ctx;
3136	unsigned long entry_ip;
3137	struct bpf_uprobe *uprobe;
3138};
3139
3140static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt)
3141{
3142	u32 i;
3143
3144	for (i = 0; i < cnt; i++)
3145		uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer);
3146
3147	if (cnt)
3148		uprobe_unregister_sync();
3149}
3150
3151static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3152{
3153	struct bpf_uprobe_multi_link *umulti_link;
3154
3155	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3156	bpf_uprobe_unregister(umulti_link->uprobes, umulti_link->cnt);
3157	if (umulti_link->task)
3158		put_task_struct(umulti_link->task);
3159	path_put(&umulti_link->path);
3160}
3161
3162static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3163{
3164	struct bpf_uprobe_multi_link *umulti_link;
3165
3166	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3167	kvfree(umulti_link->uprobes);
3168	kfree(umulti_link);
3169}
3170
3171static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
3172						struct bpf_link_info *info)
3173{
3174	u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3175	u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3176	u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3177	u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3178	u32 upath_size = info->uprobe_multi.path_size;
3179	struct bpf_uprobe_multi_link *umulti_link;
3180	u32 ucount = info->uprobe_multi.count;
3181	int err = 0, i;
3182	char *p, *buf;
3183	long left = 0;
3184
3185	if (!upath ^ !upath_size)
3186		return -EINVAL;
3187
3188	if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
3189		return -EINVAL;
3190
3191	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3192	info->uprobe_multi.count = umulti_link->cnt;
3193	info->uprobe_multi.flags = umulti_link->flags;
3194	info->uprobe_multi.pid = umulti_link->task ?
3195				 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3196
3197	upath_size = upath_size ? min_t(u32, upath_size, PATH_MAX) : PATH_MAX;
3198	buf = kmalloc(upath_size, GFP_KERNEL);
3199	if (!buf)
3200		return -ENOMEM;
3201	p = d_path(&umulti_link->path, buf, upath_size);
3202	if (IS_ERR(p)) {
3203		kfree(buf);
3204		return PTR_ERR(p);
3205	}
3206	upath_size = buf + upath_size - p;
3207
3208	if (upath)
3209		left = copy_to_user(upath, p, upath_size);
3210	kfree(buf);
3211	if (left)
3212		return -EFAULT;
3213	info->uprobe_multi.path_size = upath_size;
3214
3215	if (!uoffsets && !ucookies && !uref_ctr_offsets)
3216		return 0;
3217
3218	if (ucount < umulti_link->cnt)
3219		err = -ENOSPC;
3220	else
3221		ucount = umulti_link->cnt;
3222
3223	for (i = 0; i < ucount; i++) {
3224		if (uoffsets &&
3225		    put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3226			return -EFAULT;
3227		if (uref_ctr_offsets &&
3228		    put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3229			return -EFAULT;
3230		if (ucookies &&
3231		    put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3232			return -EFAULT;
3233	}
3234
3235	return err;
3236}
3237
3238static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3239	.release = bpf_uprobe_multi_link_release,
3240	.dealloc_deferred = bpf_uprobe_multi_link_dealloc,
3241	.fill_link_info = bpf_uprobe_multi_link_fill_link_info,
3242};
3243
3244static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3245			   unsigned long entry_ip,
3246			   struct pt_regs *regs,
3247			   bool is_return, void *data)
3248{
3249	struct bpf_uprobe_multi_link *link = uprobe->link;
3250	struct bpf_uprobe_multi_run_ctx run_ctx = {
3251		.session_ctx = {
3252			.is_return = is_return,
3253			.data = data,
3254		},
3255		.entry_ip = entry_ip,
3256		.uprobe = uprobe,
3257	};
3258	struct bpf_prog *prog = link->link.prog;
3259	bool sleepable = prog->sleepable;
3260	struct bpf_run_ctx *old_run_ctx;
3261	int err;
3262
3263	if (link->task && !same_thread_group(current, link->task))
3264		return 0;
3265
3266	if (sleepable)
3267		rcu_read_lock_trace();
3268	else
3269		rcu_read_lock();
3270
3271	migrate_disable();
3272
3273	old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
3274	err = bpf_prog_run(link->link.prog, regs);
3275	bpf_reset_run_ctx(old_run_ctx);
3276
3277	migrate_enable();
3278
3279	if (sleepable)
3280		rcu_read_unlock_trace();
3281	else
3282		rcu_read_unlock();
3283	return err;
3284}
3285
3286static bool
3287uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
3288{
3289	struct bpf_uprobe *uprobe;
3290
3291	uprobe = container_of(con, struct bpf_uprobe, consumer);
3292	return uprobe->link->task->mm == mm;
3293}
3294
3295static int
3296uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
3297			  __u64 *data)
3298{
3299	struct bpf_uprobe *uprobe;
3300	int ret;
3301
3302	uprobe = container_of(con, struct bpf_uprobe, consumer);
3303	ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs, false, data);
3304	if (uprobe->session)
3305		return ret ? UPROBE_HANDLER_IGNORE : 0;
3306	return 0;
3307}
3308
3309static int
3310uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs,
3311			      __u64 *data)
3312{
3313	struct bpf_uprobe *uprobe;
3314
3315	uprobe = container_of(con, struct bpf_uprobe, consumer);
3316	uprobe_prog_run(uprobe, func, regs, true, data);
3317	return 0;
3318}
3319
3320static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3321{
3322	struct bpf_uprobe_multi_run_ctx *run_ctx;
3323
3324	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3325			       session_ctx.run_ctx);
3326	return run_ctx->entry_ip;
3327}
3328
3329static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3330{
3331	struct bpf_uprobe_multi_run_ctx *run_ctx;
3332
3333	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3334			       session_ctx.run_ctx);
3335	return run_ctx->uprobe->cookie;
3336}
3337
3338int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3339{
3340	struct bpf_uprobe_multi_link *link = NULL;
3341	unsigned long __user *uref_ctr_offsets;
3342	struct bpf_link_primer link_primer;
3343	struct bpf_uprobe *uprobes = NULL;
3344	struct task_struct *task = NULL;
3345	unsigned long __user *uoffsets;
3346	u64 __user *ucookies;
3347	void __user *upath;
3348	u32 flags, cnt, i;
3349	struct path path;
3350	char *name;
3351	pid_t pid;
3352	int err;
3353
3354	/* no support for 32bit archs yet */
3355	if (sizeof(u64) != sizeof(void *))
3356		return -EOPNOTSUPP;
3357
3358	if (!is_uprobe_multi(prog))
3359		return -EINVAL;
3360
3361	flags = attr->link_create.uprobe_multi.flags;
3362	if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3363		return -EINVAL;
3364
3365	/*
3366	 * path, offsets and cnt are mandatory,
3367	 * ref_ctr_offsets and cookies are optional
3368	 */
3369	upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3370	uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3371	cnt = attr->link_create.uprobe_multi.cnt;
3372	pid = attr->link_create.uprobe_multi.pid;
3373
3374	if (!upath || !uoffsets || !cnt || pid < 0)
3375		return -EINVAL;
3376	if (cnt > MAX_UPROBE_MULTI_CNT)
3377		return -E2BIG;
3378
3379	uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3380	ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3381
3382	name = strndup_user(upath, PATH_MAX);
3383	if (IS_ERR(name)) {
3384		err = PTR_ERR(name);
3385		return err;
3386	}
3387
3388	err = kern_path(name, LOOKUP_FOLLOW, &path);
3389	kfree(name);
3390	if (err)
3391		return err;
3392
3393	if (!d_is_reg(path.dentry)) {
3394		err = -EBADF;
3395		goto error_path_put;
3396	}
3397
3398	if (pid) {
3399		task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
3400		if (!task) {
3401			err = -ESRCH;
3402			goto error_path_put;
3403		}
3404	}
3405
3406	err = -ENOMEM;
3407
3408	link = kzalloc(sizeof(*link), GFP_KERNEL);
3409	uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3410
3411	if (!uprobes || !link)
3412		goto error_free;
3413
3414	for (i = 0; i < cnt; i++) {
3415		if (__get_user(uprobes[i].offset, uoffsets + i)) {
3416			err = -EFAULT;
3417			goto error_free;
3418		}
3419		if (uprobes[i].offset < 0) {
3420			err = -EINVAL;
3421			goto error_free;
3422		}
3423		if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
3424			err = -EFAULT;
3425			goto error_free;
3426		}
3427		if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3428			err = -EFAULT;
3429			goto error_free;
3430		}
3431
3432		uprobes[i].link = link;
3433
3434		if (!(flags & BPF_F_UPROBE_MULTI_RETURN))
3435			uprobes[i].consumer.handler = uprobe_multi_link_handler;
3436		if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog))
3437			uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3438		if (is_uprobe_session(prog))
3439			uprobes[i].session = true;
3440		if (pid)
3441			uprobes[i].consumer.filter = uprobe_multi_link_filter;
3442	}
3443
3444	link->cnt = cnt;
3445	link->uprobes = uprobes;
3446	link->path = path;
3447	link->task = task;
3448	link->flags = flags;
3449
3450	bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3451		      &bpf_uprobe_multi_link_lops, prog);
3452
3453	for (i = 0; i < cnt; i++) {
3454		uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry),
3455						    uprobes[i].offset,
3456						    uprobes[i].ref_ctr_offset,
3457						    &uprobes[i].consumer);
3458		if (IS_ERR(uprobes[i].uprobe)) {
3459			err = PTR_ERR(uprobes[i].uprobe);
3460			link->cnt = i;
3461			goto error_unregister;
3462		}
3463	}
3464
3465	err = bpf_link_prime(&link->link, &link_primer);
3466	if (err)
3467		goto error_unregister;
3468
3469	return bpf_link_settle(&link_primer);
3470
3471error_unregister:
3472	bpf_uprobe_unregister(uprobes, link->cnt);
3473
3474error_free:
3475	kvfree(uprobes);
3476	kfree(link);
3477	if (task)
3478		put_task_struct(task);
3479error_path_put:
3480	path_put(&path);
3481	return err;
3482}
3483#else /* !CONFIG_UPROBES */
3484int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3485{
3486	return -EOPNOTSUPP;
3487}
3488static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3489{
3490	return 0;
3491}
3492static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3493{
3494	return 0;
3495}
3496#endif /* CONFIG_UPROBES */
3497
3498__bpf_kfunc_start_defs();
3499
3500__bpf_kfunc bool bpf_session_is_return(void)
3501{
3502	struct bpf_session_run_ctx *session_ctx;
3503
3504	session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3505	return session_ctx->is_return;
3506}
3507
3508__bpf_kfunc __u64 *bpf_session_cookie(void)
3509{
3510	struct bpf_session_run_ctx *session_ctx;
3511
3512	session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3513	return session_ctx->data;
3514}
3515
3516__bpf_kfunc_end_defs();
3517
3518BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids)
3519BTF_ID_FLAGS(func, bpf_session_is_return)
3520BTF_ID_FLAGS(func, bpf_session_cookie)
3521BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids)
3522
3523static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
3524{
3525	if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id))
3526		return 0;
3527
3528	if (!is_kprobe_session(prog) && !is_uprobe_session(prog))
3529		return -EACCES;
3530
3531	return 0;
3532}
3533
3534static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
3535	.owner = THIS_MODULE,
3536	.set = &kprobe_multi_kfunc_set_ids,
3537	.filter = bpf_kprobe_multi_filter,
3538};
3539
3540static int __init bpf_kprobe_multi_kfuncs_init(void)
3541{
3542	return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
3543}
3544
3545late_initcall(bpf_kprobe_multi_kfuncs_init);
3546
3547__bpf_kfunc_start_defs();
3548
3549__bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type,
3550				     u64 value)
3551{
3552	if (type != PIDTYPE_PID && type != PIDTYPE_TGID)
3553		return -EINVAL;
3554
3555	return bpf_send_signal_common(sig, type, task, value);
3556}
3557
3558__bpf_kfunc_end_defs();
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
   4 */
   5#include <linux/kernel.h>
   6#include <linux/types.h>
   7#include <linux/slab.h>
   8#include <linux/bpf.h>
 
   9#include <linux/bpf_perf_event.h>
 
  10#include <linux/filter.h>
  11#include <linux/uaccess.h>
  12#include <linux/ctype.h>
  13#include <linux/kprobes.h>
  14#include <linux/spinlock.h>
  15#include <linux/syscalls.h>
  16#include <linux/error-injection.h>
  17#include <linux/btf_ids.h>
 
 
 
 
 
 
 
 
 
 
 
 
  18
  19#include <asm/tlb.h>
  20
  21#include "trace_probe.h"
  22#include "trace.h"
  23
  24#define CREATE_TRACE_POINTS
  25#include "bpf_trace.h"
  26
  27#define bpf_event_rcu_dereference(p)					\
  28	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
  29
 
 
 
  30#ifdef CONFIG_MODULES
  31struct bpf_trace_module {
  32	struct module *module;
  33	struct list_head list;
  34};
  35
  36static LIST_HEAD(bpf_trace_modules);
  37static DEFINE_MUTEX(bpf_module_mutex);
  38
  39static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  40{
  41	struct bpf_raw_event_map *btp, *ret = NULL;
  42	struct bpf_trace_module *btm;
  43	unsigned int i;
  44
  45	mutex_lock(&bpf_module_mutex);
  46	list_for_each_entry(btm, &bpf_trace_modules, list) {
  47		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
  48			btp = &btm->module->bpf_raw_events[i];
  49			if (!strcmp(btp->tp->name, name)) {
  50				if (try_module_get(btm->module))
  51					ret = btp;
  52				goto out;
  53			}
  54		}
  55	}
  56out:
  57	mutex_unlock(&bpf_module_mutex);
  58	return ret;
  59}
  60#else
  61static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  62{
  63	return NULL;
  64}
  65#endif /* CONFIG_MODULES */
  66
  67u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  68u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  69
 
 
 
 
 
 
 
 
 
  70/**
  71 * trace_call_bpf - invoke BPF program
  72 * @call: tracepoint event
  73 * @ctx: opaque context pointer
  74 *
  75 * kprobe handlers execute BPF programs via this helper.
  76 * Can be used from static tracepoints in the future.
  77 *
  78 * Return: BPF programs always return an integer which is interpreted by
  79 * kprobe handler as:
  80 * 0 - return from kprobe (event is filtered out)
  81 * 1 - store kprobe event into ring buffer
  82 * Other values are reserved and currently alias to 1
  83 */
  84unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
  85{
  86	unsigned int ret;
  87
  88	if (in_nmi()) /* not supported yet */
  89		return 1;
  90
  91	cant_sleep();
  92
  93	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
  94		/*
  95		 * since some bpf program is already running on this cpu,
  96		 * don't call into another bpf program (same or different)
  97		 * and don't send kprobe event into ring-buffer,
  98		 * so return zero here
  99		 */
 
 
 
 100		ret = 0;
 101		goto out;
 102	}
 103
 104	/*
 105	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
 106	 * to all call sites, we did a bpf_prog_array_valid() there to check
 107	 * whether call->prog_array is empty or not, which is
 108	 * a heurisitc to speed up execution.
 109	 *
 110	 * If bpf_prog_array_valid() fetched prog_array was
 111	 * non-NULL, we go into trace_call_bpf() and do the actual
 112	 * proper rcu_dereference() under RCU lock.
 113	 * If it turns out that prog_array is NULL then, we bail out.
 114	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
 115	 * was NULL, you'll skip the prog_array with the risk of missing
 116	 * out of events when it was updated in between this and the
 117	 * rcu_dereference() which is accepted risk.
 118	 */
 119	ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
 
 
 
 120
 121 out:
 122	__this_cpu_dec(bpf_prog_active);
 123
 124	return ret;
 125}
 126
 127#ifdef CONFIG_BPF_KPROBE_OVERRIDE
 128BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
 129{
 130	regs_set_return_value(regs, rc);
 131	override_function_with_return(regs);
 132	return 0;
 133}
 134
 135static const struct bpf_func_proto bpf_override_return_proto = {
 136	.func		= bpf_override_return,
 137	.gpl_only	= true,
 138	.ret_type	= RET_INTEGER,
 139	.arg1_type	= ARG_PTR_TO_CTX,
 140	.arg2_type	= ARG_ANYTHING,
 141};
 142#endif
 143
 144static __always_inline int
 145bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
 146{
 147	int ret;
 148
 149	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
 150	if (unlikely(ret < 0))
 151		memset(dst, 0, size);
 152	return ret;
 153}
 154
 155BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
 156	   const void __user *, unsafe_ptr)
 157{
 158	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
 159}
 160
 161const struct bpf_func_proto bpf_probe_read_user_proto = {
 162	.func		= bpf_probe_read_user,
 163	.gpl_only	= true,
 164	.ret_type	= RET_INTEGER,
 165	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 166	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 167	.arg3_type	= ARG_ANYTHING,
 168};
 169
 170static __always_inline int
 171bpf_probe_read_user_str_common(void *dst, u32 size,
 172			       const void __user *unsafe_ptr)
 173{
 174	int ret;
 175
 
 
 
 
 
 
 
 
 
 
 176	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
 177	if (unlikely(ret < 0))
 178		memset(dst, 0, size);
 179	return ret;
 180}
 181
 182BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
 183	   const void __user *, unsafe_ptr)
 184{
 185	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
 186}
 187
 188const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 189	.func		= bpf_probe_read_user_str,
 190	.gpl_only	= true,
 191	.ret_type	= RET_INTEGER,
 192	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 193	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 194	.arg3_type	= ARG_ANYTHING,
 195};
 196
 197static __always_inline int
 198bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
 199{
 200	int ret = security_locked_down(LOCKDOWN_BPF_READ);
 201
 202	if (unlikely(ret < 0))
 203		goto fail;
 204	ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
 205	if (unlikely(ret < 0))
 206		goto fail;
 207	return ret;
 208fail:
 209	memset(dst, 0, size);
 210	return ret;
 211}
 212
 213BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
 214	   const void *, unsafe_ptr)
 215{
 216	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 217}
 218
 219const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 220	.func		= bpf_probe_read_kernel,
 221	.gpl_only	= true,
 222	.ret_type	= RET_INTEGER,
 223	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 224	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 225	.arg3_type	= ARG_ANYTHING,
 226};
 227
 228static __always_inline int
 229bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
 230{
 231	int ret = security_locked_down(LOCKDOWN_BPF_READ);
 232
 233	if (unlikely(ret < 0))
 234		goto fail;
 235
 236	/*
 237	 * The strncpy_from_kernel_nofault() call will likely not fill the
 238	 * entire buffer, but that's okay in this circumstance as we're probing
 239	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
 240	 * as well probe the stack. Thus, memory is explicitly cleared
 241	 * only in error case, so that improper users ignoring return
 242	 * code altogether don't copy garbage; otherwise length of string
 243	 * is returned that can be used for bpf_perf_event_output() et al.
 244	 */
 245	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
 246	if (unlikely(ret < 0))
 247		goto fail;
 248
 249	return ret;
 250fail:
 251	memset(dst, 0, size);
 252	return ret;
 253}
 254
 255BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
 256	   const void *, unsafe_ptr)
 257{
 258	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 259}
 260
 261const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
 262	.func		= bpf_probe_read_kernel_str,
 263	.gpl_only	= true,
 264	.ret_type	= RET_INTEGER,
 265	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 266	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 267	.arg3_type	= ARG_ANYTHING,
 268};
 269
 270#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 271BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
 272	   const void *, unsafe_ptr)
 273{
 274	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 275		return bpf_probe_read_user_common(dst, size,
 276				(__force void __user *)unsafe_ptr);
 277	}
 278	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 279}
 280
 281static const struct bpf_func_proto bpf_probe_read_compat_proto = {
 282	.func		= bpf_probe_read_compat,
 283	.gpl_only	= true,
 284	.ret_type	= RET_INTEGER,
 285	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 286	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 287	.arg3_type	= ARG_ANYTHING,
 288};
 289
 290BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
 291	   const void *, unsafe_ptr)
 292{
 293	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 294		return bpf_probe_read_user_str_common(dst, size,
 295				(__force void __user *)unsafe_ptr);
 296	}
 297	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 298}
 299
 300static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
 301	.func		= bpf_probe_read_compat_str,
 302	.gpl_only	= true,
 303	.ret_type	= RET_INTEGER,
 304	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 305	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 306	.arg3_type	= ARG_ANYTHING,
 307};
 308#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
 309
 310BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
 311	   u32, size)
 312{
 313	/*
 314	 * Ensure we're in user context which is safe for the helper to
 315	 * run. This helper has no business in a kthread.
 316	 *
 317	 * access_ok() should prevent writing to non-user memory, but in
 318	 * some situations (nommu, temporary switch, etc) access_ok() does
 319	 * not provide enough validation, hence the check on KERNEL_DS.
 320	 *
 321	 * nmi_uaccess_okay() ensures the probe is not run in an interim
 322	 * state, when the task or mm are switched. This is specifically
 323	 * required to prevent the use of temporary mm.
 324	 */
 325
 326	if (unlikely(in_interrupt() ||
 327		     current->flags & (PF_KTHREAD | PF_EXITING)))
 328		return -EPERM;
 329	if (unlikely(uaccess_kernel()))
 330		return -EPERM;
 331	if (unlikely(!nmi_uaccess_okay()))
 332		return -EPERM;
 333
 334	return copy_to_user_nofault(unsafe_ptr, src, size);
 335}
 336
 337static const struct bpf_func_proto bpf_probe_write_user_proto = {
 338	.func		= bpf_probe_write_user,
 339	.gpl_only	= true,
 340	.ret_type	= RET_INTEGER,
 341	.arg1_type	= ARG_ANYTHING,
 342	.arg2_type	= ARG_PTR_TO_MEM,
 343	.arg3_type	= ARG_CONST_SIZE,
 344};
 345
 346static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
 347{
 348	if (!capable(CAP_SYS_ADMIN))
 349		return NULL;
 350
 351	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
 352			    current->comm, task_pid_nr(current));
 353
 354	return &bpf_probe_write_user_proto;
 355}
 356
 357static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
 358		size_t bufsz)
 359{
 360	void __user *user_ptr = (__force void __user *)unsafe_ptr;
 361
 362	buf[0] = 0;
 363
 364	switch (fmt_ptype) {
 365	case 's':
 366#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 367		if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 368			strncpy_from_user_nofault(buf, user_ptr, bufsz);
 369			break;
 370		}
 371		fallthrough;
 372#endif
 373	case 'k':
 374		strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
 375		break;
 376	case 'u':
 377		strncpy_from_user_nofault(buf, user_ptr, bufsz);
 378		break;
 379	}
 380}
 381
 382static DEFINE_RAW_SPINLOCK(trace_printk_lock);
 383
 384#define BPF_TRACE_PRINTK_SIZE   1024
 385
 386static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
 387{
 388	static char buf[BPF_TRACE_PRINTK_SIZE];
 389	unsigned long flags;
 390	va_list ap;
 391	int ret;
 392
 393	raw_spin_lock_irqsave(&trace_printk_lock, flags);
 394	va_start(ap, fmt);
 395	ret = vsnprintf(buf, sizeof(buf), fmt, ap);
 396	va_end(ap);
 397	/* vsnprintf() will not append null for zero-length strings */
 398	if (ret == 0)
 399		buf[0] = '\0';
 400	trace_bpf_trace_printk(buf);
 401	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
 402
 403	return ret;
 404}
 405
 406/*
 407 * Only limited trace_printk() conversion specifiers allowed:
 408 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
 409 */
 410BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
 411	   u64, arg2, u64, arg3)
 412{
 413	int i, mod[3] = {}, fmt_cnt = 0;
 414	char buf[64], fmt_ptype;
 415	void *unsafe_ptr = NULL;
 416	bool str_seen = false;
 
 
 417
 418	/*
 419	 * bpf_check()->check_func_arg()->check_stack_boundary()
 420	 * guarantees that fmt points to bpf program stack,
 421	 * fmt_size bytes of it were initialized and fmt_size > 0
 422	 */
 423	if (fmt[--fmt_size] != 0)
 424		return -EINVAL;
 425
 426	/* check format string for allowed specifiers */
 427	for (i = 0; i < fmt_size; i++) {
 428		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
 429			return -EINVAL;
 430
 431		if (fmt[i] != '%')
 432			continue;
 433
 434		if (fmt_cnt >= 3)
 435			return -EINVAL;
 436
 437		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
 438		i++;
 439		if (fmt[i] == 'l') {
 440			mod[fmt_cnt]++;
 441			i++;
 442		} else if (fmt[i] == 'p') {
 443			mod[fmt_cnt]++;
 444			if ((fmt[i + 1] == 'k' ||
 445			     fmt[i + 1] == 'u') &&
 446			    fmt[i + 2] == 's') {
 447				fmt_ptype = fmt[i + 1];
 448				i += 2;
 449				goto fmt_str;
 450			}
 451
 452			if (fmt[i + 1] == 'B') {
 453				i++;
 454				goto fmt_next;
 455			}
 456
 457			/* disallow any further format extensions */
 458			if (fmt[i + 1] != 0 &&
 459			    !isspace(fmt[i + 1]) &&
 460			    !ispunct(fmt[i + 1]))
 461				return -EINVAL;
 462
 463			goto fmt_next;
 464		} else if (fmt[i] == 's') {
 465			mod[fmt_cnt]++;
 466			fmt_ptype = fmt[i];
 467fmt_str:
 468			if (str_seen)
 469				/* allow only one '%s' per fmt string */
 470				return -EINVAL;
 471			str_seen = true;
 472
 473			if (fmt[i + 1] != 0 &&
 474			    !isspace(fmt[i + 1]) &&
 475			    !ispunct(fmt[i + 1]))
 476				return -EINVAL;
 477
 478			switch (fmt_cnt) {
 479			case 0:
 480				unsafe_ptr = (void *)(long)arg1;
 481				arg1 = (long)buf;
 482				break;
 483			case 1:
 484				unsafe_ptr = (void *)(long)arg2;
 485				arg2 = (long)buf;
 486				break;
 487			case 2:
 488				unsafe_ptr = (void *)(long)arg3;
 489				arg3 = (long)buf;
 490				break;
 491			}
 492
 493			bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
 494					sizeof(buf));
 495			goto fmt_next;
 496		}
 497
 498		if (fmt[i] == 'l') {
 499			mod[fmt_cnt]++;
 500			i++;
 501		}
 502
 503		if (fmt[i] != 'i' && fmt[i] != 'd' &&
 504		    fmt[i] != 'u' && fmt[i] != 'x')
 505			return -EINVAL;
 506fmt_next:
 507		fmt_cnt++;
 508	}
 509
 510/* Horrid workaround for getting va_list handling working with different
 511 * argument type combinations generically for 32 and 64 bit archs.
 512 */
 513#define __BPF_TP_EMIT()	__BPF_ARG3_TP()
 514#define __BPF_TP(...)							\
 515	bpf_do_trace_printk(fmt, ##__VA_ARGS__)
 516
 517#define __BPF_ARG1_TP(...)						\
 518	((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))	\
 519	  ? __BPF_TP(arg1, ##__VA_ARGS__)				\
 520	  : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32))	\
 521	      ? __BPF_TP((long)arg1, ##__VA_ARGS__)			\
 522	      : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
 523
 524#define __BPF_ARG2_TP(...)						\
 525	((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64))	\
 526	  ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__)				\
 527	  : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32))	\
 528	      ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__)		\
 529	      : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
 530
 531#define __BPF_ARG3_TP(...)						\
 532	((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64))	\
 533	  ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__)				\
 534	  : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32))	\
 535	      ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__)		\
 536	      : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
 537
 538	return __BPF_TP_EMIT();
 539}
 540
 541static const struct bpf_func_proto bpf_trace_printk_proto = {
 542	.func		= bpf_trace_printk,
 543	.gpl_only	= true,
 544	.ret_type	= RET_INTEGER,
 545	.arg1_type	= ARG_PTR_TO_MEM,
 546	.arg2_type	= ARG_CONST_SIZE,
 547};
 548
 549const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 550{
 551	/*
 552	 * This program might be calling bpf_trace_printk,
 553	 * so enable the associated bpf_trace/bpf_trace_printk event.
 554	 * Repeat this each time as it is possible a user has
 555	 * disabled bpf_trace_printk events.  By loading a program
 556	 * calling bpf_trace_printk() however the user has expressed
 557	 * the intent to see such events.
 558	 */
 559	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
 560		pr_warn_ratelimited("could not enable bpf_trace_printk events");
 
 561
 
 
 
 562	return &bpf_trace_printk_proto;
 563}
 564
 565#define MAX_SEQ_PRINTF_VARARGS		12
 566#define MAX_SEQ_PRINTF_MAX_MEMCPY	6
 567#define MAX_SEQ_PRINTF_STR_LEN		128
 568
 569struct bpf_seq_printf_buf {
 570	char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
 571};
 572static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
 573static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
 574
 575BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
 576	   const void *, data, u32, data_len)
 577{
 578	int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
 579	int i, buf_used, copy_size, num_args;
 580	u64 params[MAX_SEQ_PRINTF_VARARGS];
 581	struct bpf_seq_printf_buf *bufs;
 582	const u64 *args = data;
 583
 584	buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
 585	if (WARN_ON_ONCE(buf_used > 1)) {
 586		err = -EBUSY;
 587		goto out;
 588	}
 589
 590	bufs = this_cpu_ptr(&bpf_seq_printf_buf);
 591
 592	/*
 593	 * bpf_check()->check_func_arg()->check_stack_boundary()
 594	 * guarantees that fmt points to bpf program stack,
 595	 * fmt_size bytes of it were initialized and fmt_size > 0
 596	 */
 597	if (fmt[--fmt_size] != 0)
 598		goto out;
 599
 600	if (data_len & 7)
 601		goto out;
 602
 603	for (i = 0; i < fmt_size; i++) {
 604		if (fmt[i] == '%') {
 605			if (fmt[i + 1] == '%')
 606				i++;
 607			else if (!data || !data_len)
 608				goto out;
 609		}
 610	}
 611
 
 
 
 612	num_args = data_len / 8;
 613
 614	/* check format string for allowed specifiers */
 615	for (i = 0; i < fmt_size; i++) {
 616		/* only printable ascii for now. */
 617		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
 618			err = -EINVAL;
 619			goto out;
 620		}
 621
 622		if (fmt[i] != '%')
 623			continue;
 624
 625		if (fmt[i + 1] == '%') {
 626			i++;
 627			continue;
 628		}
 629
 630		if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
 631			err = -E2BIG;
 632			goto out;
 633		}
 634
 635		if (fmt_cnt >= num_args) {
 636			err = -EINVAL;
 637			goto out;
 638		}
 639
 640		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
 641		i++;
 642
 643		/* skip optional "[0 +-][num]" width formating field */
 644		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
 645		       fmt[i] == ' ')
 646			i++;
 647		if (fmt[i] >= '1' && fmt[i] <= '9') {
 648			i++;
 649			while (fmt[i] >= '0' && fmt[i] <= '9')
 650				i++;
 651		}
 652
 653		if (fmt[i] == 's') {
 654			void *unsafe_ptr;
 
 
 
 
 
 
 
 655
 656			/* try our best to copy */
 657			if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
 658				err = -E2BIG;
 659				goto out;
 660			}
 661
 662			unsafe_ptr = (void *)(long)args[fmt_cnt];
 663			err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
 664					unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
 665			if (err < 0)
 666				bufs->buf[memcpy_cnt][0] = '\0';
 667			params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
 
 668
 669			fmt_cnt++;
 670			memcpy_cnt++;
 671			continue;
 672		}
 673
 674		if (fmt[i] == 'p') {
 675			if (fmt[i + 1] == 0 ||
 676			    fmt[i + 1] == 'K' ||
 677			    fmt[i + 1] == 'x' ||
 678			    fmt[i + 1] == 'B') {
 679				/* just kernel pointers */
 680				params[fmt_cnt] = args[fmt_cnt];
 681				fmt_cnt++;
 682				continue;
 683			}
 684
 685			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
 686			if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
 687				err = -EINVAL;
 688				goto out;
 689			}
 690			if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
 691				err = -EINVAL;
 692				goto out;
 693			}
 694
 695			if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
 696				err = -E2BIG;
 697				goto out;
 698			}
 699
 700
 701			copy_size = (fmt[i + 2] == '4') ? 4 : 16;
 702
 703			err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
 704						(void *) (long) args[fmt_cnt],
 705						copy_size);
 706			if (err < 0)
 707				memset(bufs->buf[memcpy_cnt], 0, copy_size);
 708			params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
 709
 710			i += 2;
 711			fmt_cnt++;
 712			memcpy_cnt++;
 713			continue;
 714		}
 715
 716		if (fmt[i] == 'l') {
 717			i++;
 718			if (fmt[i] == 'l')
 719				i++;
 720		}
 721
 722		if (fmt[i] != 'i' && fmt[i] != 'd' &&
 723		    fmt[i] != 'u' && fmt[i] != 'x' &&
 724		    fmt[i] != 'X') {
 725			err = -EINVAL;
 726			goto out;
 727		}
 728
 729		params[fmt_cnt] = args[fmt_cnt];
 730		fmt_cnt++;
 731	}
 732
 733	/* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
 734	 * all of them to seq_printf().
 735	 */
 736	seq_printf(m, fmt, params[0], params[1], params[2], params[3],
 737		   params[4], params[5], params[6], params[7], params[8],
 738		   params[9], params[10], params[11]);
 739
 740	err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
 741out:
 742	this_cpu_dec(bpf_seq_printf_buf_used);
 743	return err;
 744}
 745
 746BTF_ID_LIST(bpf_seq_printf_btf_ids)
 747BTF_ID(struct, seq_file)
 748
 749static const struct bpf_func_proto bpf_seq_printf_proto = {
 750	.func		= bpf_seq_printf,
 751	.gpl_only	= true,
 752	.ret_type	= RET_INTEGER,
 753	.arg1_type	= ARG_PTR_TO_BTF_ID,
 754	.arg2_type	= ARG_PTR_TO_MEM,
 
 755	.arg3_type	= ARG_CONST_SIZE,
 756	.arg4_type      = ARG_PTR_TO_MEM_OR_NULL,
 757	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 758	.btf_id		= bpf_seq_printf_btf_ids,
 759};
 760
 761BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
 762{
 763	return seq_write(m, data, len) ? -EOVERFLOW : 0;
 764}
 765
 766BTF_ID_LIST(bpf_seq_write_btf_ids)
 767BTF_ID(struct, seq_file)
 768
 769static const struct bpf_func_proto bpf_seq_write_proto = {
 770	.func		= bpf_seq_write,
 771	.gpl_only	= true,
 772	.ret_type	= RET_INTEGER,
 773	.arg1_type	= ARG_PTR_TO_BTF_ID,
 774	.arg2_type	= ARG_PTR_TO_MEM,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 775	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 776	.btf_id		= bpf_seq_write_btf_ids,
 777};
 778
 779static __always_inline int
 780get_map_perf_counter(struct bpf_map *map, u64 flags,
 781		     u64 *value, u64 *enabled, u64 *running)
 782{
 783	struct bpf_array *array = container_of(map, struct bpf_array, map);
 784	unsigned int cpu = smp_processor_id();
 785	u64 index = flags & BPF_F_INDEX_MASK;
 786	struct bpf_event_entry *ee;
 787
 788	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 789		return -EINVAL;
 790	if (index == BPF_F_CURRENT_CPU)
 791		index = cpu;
 792	if (unlikely(index >= array->map.max_entries))
 793		return -E2BIG;
 794
 795	ee = READ_ONCE(array->ptrs[index]);
 796	if (!ee)
 797		return -ENOENT;
 798
 799	return perf_event_read_local(ee->event, value, enabled, running);
 800}
 801
 802BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
 803{
 804	u64 value = 0;
 805	int err;
 806
 807	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
 808	/*
 809	 * this api is ugly since we miss [-22..-2] range of valid
 810	 * counter values, but that's uapi
 811	 */
 812	if (err)
 813		return err;
 814	return value;
 815}
 816
 817static const struct bpf_func_proto bpf_perf_event_read_proto = {
 818	.func		= bpf_perf_event_read,
 819	.gpl_only	= true,
 820	.ret_type	= RET_INTEGER,
 821	.arg1_type	= ARG_CONST_MAP_PTR,
 822	.arg2_type	= ARG_ANYTHING,
 823};
 824
 825BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
 826	   struct bpf_perf_event_value *, buf, u32, size)
 827{
 828	int err = -EINVAL;
 829
 830	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 831		goto clear;
 832	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
 833				   &buf->running);
 834	if (unlikely(err))
 835		goto clear;
 836	return 0;
 837clear:
 838	memset(buf, 0, size);
 839	return err;
 840}
 841
 842static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
 843	.func		= bpf_perf_event_read_value,
 844	.gpl_only	= true,
 845	.ret_type	= RET_INTEGER,
 846	.arg1_type	= ARG_CONST_MAP_PTR,
 847	.arg2_type	= ARG_ANYTHING,
 848	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
 849	.arg4_type	= ARG_CONST_SIZE,
 850};
 851
 852static __always_inline u64
 853__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 854			u64 flags, struct perf_sample_data *sd)
 
 855{
 856	struct bpf_array *array = container_of(map, struct bpf_array, map);
 857	unsigned int cpu = smp_processor_id();
 858	u64 index = flags & BPF_F_INDEX_MASK;
 859	struct bpf_event_entry *ee;
 860	struct perf_event *event;
 861
 862	if (index == BPF_F_CURRENT_CPU)
 863		index = cpu;
 864	if (unlikely(index >= array->map.max_entries))
 865		return -E2BIG;
 866
 867	ee = READ_ONCE(array->ptrs[index]);
 868	if (!ee)
 869		return -ENOENT;
 870
 871	event = ee->event;
 872	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
 873		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
 874		return -EINVAL;
 875
 876	if (unlikely(event->oncpu != cpu))
 877		return -EOPNOTSUPP;
 878
 
 
 879	return perf_event_output(event, sd, regs);
 880}
 881
 882/*
 883 * Support executing tracepoints in normal, irq, and nmi context that each call
 884 * bpf_perf_event_output
 885 */
 886struct bpf_trace_sample_data {
 887	struct perf_sample_data sds[3];
 888};
 889
 890static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
 891static DEFINE_PER_CPU(int, bpf_trace_nest_level);
 892BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
 893	   u64, flags, void *, data, u64, size)
 894{
 895	struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
 896	int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
 897	struct perf_raw_record raw = {
 898		.frag = {
 899			.size = size,
 900			.data = data,
 901		},
 902	};
 903	struct perf_sample_data *sd;
 904	int err;
 
 
 
 
 905
 906	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
 907		err = -EBUSY;
 908		goto out;
 909	}
 910
 911	sd = &sds->sds[nest_level - 1];
 912
 913	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
 914		err = -EINVAL;
 915		goto out;
 916	}
 917
 918	perf_sample_data_init(sd, 0, 0);
 919	sd->raw = &raw;
 920
 921	err = __bpf_perf_event_output(regs, map, flags, sd);
 922
 
 923out:
 924	this_cpu_dec(bpf_trace_nest_level);
 
 925	return err;
 926}
 927
 928static const struct bpf_func_proto bpf_perf_event_output_proto = {
 929	.func		= bpf_perf_event_output,
 930	.gpl_only	= true,
 931	.ret_type	= RET_INTEGER,
 932	.arg1_type	= ARG_PTR_TO_CTX,
 933	.arg2_type	= ARG_CONST_MAP_PTR,
 934	.arg3_type	= ARG_ANYTHING,
 935	.arg4_type	= ARG_PTR_TO_MEM,
 936	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 937};
 938
 939static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
 940struct bpf_nested_pt_regs {
 941	struct pt_regs regs[3];
 942};
 943static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
 944static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
 945
 946u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 947		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 948{
 949	int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
 950	struct perf_raw_frag frag = {
 951		.copy		= ctx_copy,
 952		.size		= ctx_size,
 953		.data		= ctx,
 954	};
 955	struct perf_raw_record raw = {
 956		.frag = {
 957			{
 958				.next	= ctx_size ? &frag : NULL,
 959			},
 960			.size	= meta_size,
 961			.data	= meta,
 962		},
 963	};
 964	struct perf_sample_data *sd;
 965	struct pt_regs *regs;
 
 966	u64 ret;
 967
 
 
 
 968	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
 969		ret = -EBUSY;
 970		goto out;
 971	}
 972	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
 973	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
 974
 975	perf_fetch_caller_regs(regs);
 976	perf_sample_data_init(sd, 0, 0);
 977	sd->raw = &raw;
 978
 979	ret = __bpf_perf_event_output(regs, map, flags, sd);
 980out:
 981	this_cpu_dec(bpf_event_output_nest_level);
 
 982	return ret;
 983}
 984
 985BPF_CALL_0(bpf_get_current_task)
 986{
 987	return (long) current;
 988}
 989
 990const struct bpf_func_proto bpf_get_current_task_proto = {
 991	.func		= bpf_get_current_task,
 992	.gpl_only	= true,
 993	.ret_type	= RET_INTEGER,
 994};
 995
 996BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
 997{
 998	struct bpf_array *array = container_of(map, struct bpf_array, map);
 999	struct cgroup *cgrp;
1000
1001	if (unlikely(idx >= array->map.max_entries))
1002		return -E2BIG;
 
 
 
 
1003
1004	cgrp = READ_ONCE(array->ptrs[idx]);
1005	if (unlikely(!cgrp))
1006		return -EAGAIN;
 
1007
1008	return task_under_cgroup_hierarchy(current, cgrp);
1009}
1010
1011static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
1012	.func           = bpf_current_task_under_cgroup,
1013	.gpl_only       = false,
1014	.ret_type       = RET_INTEGER,
1015	.arg1_type      = ARG_CONST_MAP_PTR,
1016	.arg2_type      = ARG_ANYTHING,
 
1017};
1018
1019struct send_signal_irq_work {
1020	struct irq_work irq_work;
1021	struct task_struct *task;
1022	u32 sig;
1023	enum pid_type type;
 
 
1024};
1025
1026static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
1027
1028static void do_bpf_send_signal(struct irq_work *entry)
1029{
1030	struct send_signal_irq_work *work;
 
1031
1032	work = container_of(entry, struct send_signal_irq_work, irq_work);
1033	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
 
 
 
1034}
1035
1036static int bpf_send_signal_common(u32 sig, enum pid_type type)
1037{
1038	struct send_signal_irq_work *work = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
1040	/* Similar to bpf_probe_write_user, task needs to be
1041	 * in a sound condition and kernel memory access be
1042	 * permitted in order to send signal to the current
1043	 * task.
1044	 */
1045	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1046		return -EPERM;
1047	if (unlikely(uaccess_kernel()))
1048		return -EPERM;
1049	if (unlikely(!nmi_uaccess_okay()))
 
1050		return -EPERM;
1051
1052	if (irqs_disabled()) {
1053		/* Do an early check on signal validity. Otherwise,
1054		 * the error is lost in deferred irq_work.
1055		 */
1056		if (unlikely(!valid_signal(sig)))
1057			return -EINVAL;
1058
1059		work = this_cpu_ptr(&send_signal_work);
1060		if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
1061			return -EBUSY;
1062
1063		/* Add the current task, which is the target of sending signal,
1064		 * to the irq_work. The current task may change when queued
1065		 * irq works get executed.
1066		 */
1067		work->task = current;
 
 
 
1068		work->sig = sig;
1069		work->type = type;
1070		irq_work_queue(&work->irq_work);
1071		return 0;
1072	}
1073
1074	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1075}
1076
1077BPF_CALL_1(bpf_send_signal, u32, sig)
1078{
1079	return bpf_send_signal_common(sig, PIDTYPE_TGID);
1080}
1081
1082static const struct bpf_func_proto bpf_send_signal_proto = {
1083	.func		= bpf_send_signal,
1084	.gpl_only	= false,
1085	.ret_type	= RET_INTEGER,
1086	.arg1_type	= ARG_ANYTHING,
1087};
1088
1089BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1090{
1091	return bpf_send_signal_common(sig, PIDTYPE_PID);
1092}
1093
1094static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1095	.func		= bpf_send_signal_thread,
1096	.gpl_only	= false,
1097	.ret_type	= RET_INTEGER,
1098	.arg1_type	= ARG_ANYTHING,
1099};
1100
1101const struct bpf_func_proto *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1102bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1103{
1104	switch (func_id) {
1105	case BPF_FUNC_map_lookup_elem:
1106		return &bpf_map_lookup_elem_proto;
1107	case BPF_FUNC_map_update_elem:
1108		return &bpf_map_update_elem_proto;
1109	case BPF_FUNC_map_delete_elem:
1110		return &bpf_map_delete_elem_proto;
1111	case BPF_FUNC_map_push_elem:
1112		return &bpf_map_push_elem_proto;
1113	case BPF_FUNC_map_pop_elem:
1114		return &bpf_map_pop_elem_proto;
1115	case BPF_FUNC_map_peek_elem:
1116		return &bpf_map_peek_elem_proto;
 
 
1117	case BPF_FUNC_ktime_get_ns:
1118		return &bpf_ktime_get_ns_proto;
1119	case BPF_FUNC_ktime_get_boot_ns:
1120		return &bpf_ktime_get_boot_ns_proto;
1121	case BPF_FUNC_tail_call:
1122		return &bpf_tail_call_proto;
1123	case BPF_FUNC_get_current_pid_tgid:
1124		return &bpf_get_current_pid_tgid_proto;
1125	case BPF_FUNC_get_current_task:
1126		return &bpf_get_current_task_proto;
 
 
 
 
1127	case BPF_FUNC_get_current_uid_gid:
1128		return &bpf_get_current_uid_gid_proto;
1129	case BPF_FUNC_get_current_comm:
1130		return &bpf_get_current_comm_proto;
1131	case BPF_FUNC_trace_printk:
1132		return bpf_get_trace_printk_proto();
1133	case BPF_FUNC_get_smp_processor_id:
1134		return &bpf_get_smp_processor_id_proto;
1135	case BPF_FUNC_get_numa_node_id:
1136		return &bpf_get_numa_node_id_proto;
1137	case BPF_FUNC_perf_event_read:
1138		return &bpf_perf_event_read_proto;
1139	case BPF_FUNC_probe_write_user:
1140		return bpf_get_probe_write_proto();
1141	case BPF_FUNC_current_task_under_cgroup:
1142		return &bpf_current_task_under_cgroup_proto;
1143	case BPF_FUNC_get_prandom_u32:
1144		return &bpf_get_prandom_u32_proto;
 
 
 
1145	case BPF_FUNC_probe_read_user:
1146		return &bpf_probe_read_user_proto;
1147	case BPF_FUNC_probe_read_kernel:
1148		return &bpf_probe_read_kernel_proto;
 
1149	case BPF_FUNC_probe_read_user_str:
1150		return &bpf_probe_read_user_str_proto;
1151	case BPF_FUNC_probe_read_kernel_str:
1152		return &bpf_probe_read_kernel_str_proto;
 
1153#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1154	case BPF_FUNC_probe_read:
1155		return &bpf_probe_read_compat_proto;
 
1156	case BPF_FUNC_probe_read_str:
1157		return &bpf_probe_read_compat_str_proto;
 
1158#endif
1159#ifdef CONFIG_CGROUPS
1160	case BPF_FUNC_get_current_cgroup_id:
1161		return &bpf_get_current_cgroup_id_proto;
 
 
 
 
1162#endif
1163	case BPF_FUNC_send_signal:
1164		return &bpf_send_signal_proto;
1165	case BPF_FUNC_send_signal_thread:
1166		return &bpf_send_signal_thread_proto;
1167	case BPF_FUNC_perf_event_read_value:
1168		return &bpf_perf_event_read_value_proto;
1169	case BPF_FUNC_get_ns_current_pid_tgid:
1170		return &bpf_get_ns_current_pid_tgid_proto;
1171	case BPF_FUNC_ringbuf_output:
1172		return &bpf_ringbuf_output_proto;
1173	case BPF_FUNC_ringbuf_reserve:
1174		return &bpf_ringbuf_reserve_proto;
1175	case BPF_FUNC_ringbuf_submit:
1176		return &bpf_ringbuf_submit_proto;
1177	case BPF_FUNC_ringbuf_discard:
1178		return &bpf_ringbuf_discard_proto;
1179	case BPF_FUNC_ringbuf_query:
1180		return &bpf_ringbuf_query_proto;
1181	case BPF_FUNC_jiffies64:
1182		return &bpf_jiffies64_proto;
1183	case BPF_FUNC_get_task_stack:
1184		return &bpf_get_task_stack_proto;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1185	default:
1186		return NULL;
1187	}
1188}
1189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190static const struct bpf_func_proto *
1191kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1192{
1193	switch (func_id) {
1194	case BPF_FUNC_perf_event_output:
1195		return &bpf_perf_event_output_proto;
1196	case BPF_FUNC_get_stackid:
1197		return &bpf_get_stackid_proto;
1198	case BPF_FUNC_get_stack:
1199		return &bpf_get_stack_proto;
1200#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1201	case BPF_FUNC_override_return:
1202		return &bpf_override_return_proto;
1203#endif
 
 
 
 
 
 
 
 
 
 
 
 
1204	default:
1205		return bpf_tracing_func_proto(func_id, prog);
1206	}
1207}
1208
1209/* bpf+kprobe programs can access fields of 'struct pt_regs' */
1210static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1211					const struct bpf_prog *prog,
1212					struct bpf_insn_access_aux *info)
1213{
1214	if (off < 0 || off >= sizeof(struct pt_regs))
1215		return false;
1216	if (type != BPF_READ)
1217		return false;
1218	if (off % size != 0)
1219		return false;
1220	/*
1221	 * Assertion for 32 bit to make sure last 8 byte access
1222	 * (BPF_DW) to the last 4 byte member is disallowed.
1223	 */
1224	if (off + size > sizeof(struct pt_regs))
1225		return false;
1226
1227	return true;
1228}
1229
1230const struct bpf_verifier_ops kprobe_verifier_ops = {
1231	.get_func_proto  = kprobe_prog_func_proto,
1232	.is_valid_access = kprobe_prog_is_valid_access,
1233};
1234
1235const struct bpf_prog_ops kprobe_prog_ops = {
1236};
1237
1238BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1239	   u64, flags, void *, data, u64, size)
1240{
1241	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1242
1243	/*
1244	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1245	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1246	 * from there and call the same bpf_perf_event_output() helper inline.
1247	 */
1248	return ____bpf_perf_event_output(regs, map, flags, data, size);
1249}
1250
1251static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1252	.func		= bpf_perf_event_output_tp,
1253	.gpl_only	= true,
1254	.ret_type	= RET_INTEGER,
1255	.arg1_type	= ARG_PTR_TO_CTX,
1256	.arg2_type	= ARG_CONST_MAP_PTR,
1257	.arg3_type	= ARG_ANYTHING,
1258	.arg4_type	= ARG_PTR_TO_MEM,
1259	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1260};
1261
1262BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1263	   u64, flags)
1264{
1265	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1266
1267	/*
1268	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1269	 * the other helper's function body cannot be inlined due to being
1270	 * external, thus we need to call raw helper function.
1271	 */
1272	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1273			       flags, 0, 0);
1274}
1275
1276static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1277	.func		= bpf_get_stackid_tp,
1278	.gpl_only	= true,
1279	.ret_type	= RET_INTEGER,
1280	.arg1_type	= ARG_PTR_TO_CTX,
1281	.arg2_type	= ARG_CONST_MAP_PTR,
1282	.arg3_type	= ARG_ANYTHING,
1283};
1284
1285BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1286	   u64, flags)
1287{
1288	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1289
1290	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1291			     (unsigned long) size, flags, 0);
1292}
1293
1294static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1295	.func		= bpf_get_stack_tp,
1296	.gpl_only	= true,
1297	.ret_type	= RET_INTEGER,
1298	.arg1_type	= ARG_PTR_TO_CTX,
1299	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1300	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1301	.arg4_type	= ARG_ANYTHING,
1302};
1303
1304static const struct bpf_func_proto *
1305tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1306{
1307	switch (func_id) {
1308	case BPF_FUNC_perf_event_output:
1309		return &bpf_perf_event_output_proto_tp;
1310	case BPF_FUNC_get_stackid:
1311		return &bpf_get_stackid_proto_tp;
1312	case BPF_FUNC_get_stack:
1313		return &bpf_get_stack_proto_tp;
 
 
1314	default:
1315		return bpf_tracing_func_proto(func_id, prog);
1316	}
1317}
1318
1319static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1320				    const struct bpf_prog *prog,
1321				    struct bpf_insn_access_aux *info)
1322{
1323	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1324		return false;
1325	if (type != BPF_READ)
1326		return false;
1327	if (off % size != 0)
1328		return false;
1329
1330	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1331	return true;
1332}
1333
1334const struct bpf_verifier_ops tracepoint_verifier_ops = {
1335	.get_func_proto  = tp_prog_func_proto,
1336	.is_valid_access = tp_prog_is_valid_access,
1337};
1338
1339const struct bpf_prog_ops tracepoint_prog_ops = {
1340};
1341
1342BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1343	   struct bpf_perf_event_value *, buf, u32, size)
1344{
1345	int err = -EINVAL;
1346
1347	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1348		goto clear;
1349	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1350				    &buf->running);
1351	if (unlikely(err))
1352		goto clear;
1353	return 0;
1354clear:
1355	memset(buf, 0, size);
1356	return err;
1357}
1358
1359static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1360         .func           = bpf_perf_prog_read_value,
1361         .gpl_only       = true,
1362         .ret_type       = RET_INTEGER,
1363         .arg1_type      = ARG_PTR_TO_CTX,
1364         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1365         .arg3_type      = ARG_CONST_SIZE,
1366};
1367
1368BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1369	   void *, buf, u32, size, u64, flags)
1370{
1371#ifndef CONFIG_X86
1372	return -ENOENT;
1373#else
1374	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1375	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1376	u32 to_copy;
1377
1378	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1379		return -EINVAL;
1380
 
 
 
1381	if (unlikely(!br_stack))
1382		return -EINVAL;
1383
1384	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1385		return br_stack->nr * br_entry_size;
1386
1387	if (!buf || (size % br_entry_size != 0))
1388		return -EINVAL;
1389
1390	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1391	memcpy(buf, br_stack->entries, to_copy);
1392
1393	return to_copy;
1394#endif
1395}
1396
1397static const struct bpf_func_proto bpf_read_branch_records_proto = {
1398	.func           = bpf_read_branch_records,
1399	.gpl_only       = true,
1400	.ret_type       = RET_INTEGER,
1401	.arg1_type      = ARG_PTR_TO_CTX,
1402	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1403	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1404	.arg4_type      = ARG_ANYTHING,
1405};
1406
1407static const struct bpf_func_proto *
1408pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1409{
1410	switch (func_id) {
1411	case BPF_FUNC_perf_event_output:
1412		return &bpf_perf_event_output_proto_tp;
1413	case BPF_FUNC_get_stackid:
1414		return &bpf_get_stackid_proto_pe;
1415	case BPF_FUNC_get_stack:
1416		return &bpf_get_stack_proto_pe;
1417	case BPF_FUNC_perf_prog_read_value:
1418		return &bpf_perf_prog_read_value_proto;
1419	case BPF_FUNC_read_branch_records:
1420		return &bpf_read_branch_records_proto;
 
 
1421	default:
1422		return bpf_tracing_func_proto(func_id, prog);
1423	}
1424}
1425
1426/*
1427 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1428 * to avoid potential recursive reuse issue when/if tracepoints are added
1429 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1430 *
1431 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1432 * in normal, irq, and nmi context.
1433 */
1434struct bpf_raw_tp_regs {
1435	struct pt_regs regs[3];
1436};
1437static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1438static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1439static struct pt_regs *get_bpf_raw_tp_regs(void)
1440{
1441	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1442	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1443
1444	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1445		this_cpu_dec(bpf_raw_tp_nest_level);
1446		return ERR_PTR(-EBUSY);
1447	}
1448
1449	return &tp_regs->regs[nest_level - 1];
1450}
1451
1452static void put_bpf_raw_tp_regs(void)
1453{
1454	this_cpu_dec(bpf_raw_tp_nest_level);
1455}
1456
1457BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1458	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1459{
1460	struct pt_regs *regs = get_bpf_raw_tp_regs();
1461	int ret;
1462
1463	if (IS_ERR(regs))
1464		return PTR_ERR(regs);
1465
1466	perf_fetch_caller_regs(regs);
1467	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1468
1469	put_bpf_raw_tp_regs();
1470	return ret;
1471}
1472
1473static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1474	.func		= bpf_perf_event_output_raw_tp,
1475	.gpl_only	= true,
1476	.ret_type	= RET_INTEGER,
1477	.arg1_type	= ARG_PTR_TO_CTX,
1478	.arg2_type	= ARG_CONST_MAP_PTR,
1479	.arg3_type	= ARG_ANYTHING,
1480	.arg4_type	= ARG_PTR_TO_MEM,
1481	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1482};
1483
1484extern const struct bpf_func_proto bpf_skb_output_proto;
1485extern const struct bpf_func_proto bpf_xdp_output_proto;
 
1486
1487BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1488	   struct bpf_map *, map, u64, flags)
1489{
1490	struct pt_regs *regs = get_bpf_raw_tp_regs();
1491	int ret;
1492
1493	if (IS_ERR(regs))
1494		return PTR_ERR(regs);
1495
1496	perf_fetch_caller_regs(regs);
1497	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1498	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1499			      flags, 0, 0);
1500	put_bpf_raw_tp_regs();
1501	return ret;
1502}
1503
1504static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1505	.func		= bpf_get_stackid_raw_tp,
1506	.gpl_only	= true,
1507	.ret_type	= RET_INTEGER,
1508	.arg1_type	= ARG_PTR_TO_CTX,
1509	.arg2_type	= ARG_CONST_MAP_PTR,
1510	.arg3_type	= ARG_ANYTHING,
1511};
1512
1513BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1514	   void *, buf, u32, size, u64, flags)
1515{
1516	struct pt_regs *regs = get_bpf_raw_tp_regs();
1517	int ret;
1518
1519	if (IS_ERR(regs))
1520		return PTR_ERR(regs);
1521
1522	perf_fetch_caller_regs(regs);
1523	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1524			    (unsigned long) size, flags, 0);
1525	put_bpf_raw_tp_regs();
1526	return ret;
1527}
1528
1529static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1530	.func		= bpf_get_stack_raw_tp,
1531	.gpl_only	= true,
1532	.ret_type	= RET_INTEGER,
1533	.arg1_type	= ARG_PTR_TO_CTX,
1534	.arg2_type	= ARG_PTR_TO_MEM,
1535	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1536	.arg4_type	= ARG_ANYTHING,
1537};
1538
1539static const struct bpf_func_proto *
1540raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1541{
1542	switch (func_id) {
1543	case BPF_FUNC_perf_event_output:
1544		return &bpf_perf_event_output_proto_raw_tp;
1545	case BPF_FUNC_get_stackid:
1546		return &bpf_get_stackid_proto_raw_tp;
1547	case BPF_FUNC_get_stack:
1548		return &bpf_get_stack_proto_raw_tp;
 
 
1549	default:
1550		return bpf_tracing_func_proto(func_id, prog);
1551	}
1552}
1553
1554const struct bpf_func_proto *
1555tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1556{
 
 
1557	switch (func_id) {
1558#ifdef CONFIG_NET
1559	case BPF_FUNC_skb_output:
1560		return &bpf_skb_output_proto;
1561	case BPF_FUNC_xdp_output:
1562		return &bpf_xdp_output_proto;
1563	case BPF_FUNC_skc_to_tcp6_sock:
1564		return &bpf_skc_to_tcp6_sock_proto;
1565	case BPF_FUNC_skc_to_tcp_sock:
1566		return &bpf_skc_to_tcp_sock_proto;
1567	case BPF_FUNC_skc_to_tcp_timewait_sock:
1568		return &bpf_skc_to_tcp_timewait_sock_proto;
1569	case BPF_FUNC_skc_to_tcp_request_sock:
1570		return &bpf_skc_to_tcp_request_sock_proto;
1571	case BPF_FUNC_skc_to_udp6_sock:
1572		return &bpf_skc_to_udp6_sock_proto;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1573#endif
1574	case BPF_FUNC_seq_printf:
1575		return prog->expected_attach_type == BPF_TRACE_ITER ?
1576		       &bpf_seq_printf_proto :
1577		       NULL;
1578	case BPF_FUNC_seq_write:
1579		return prog->expected_attach_type == BPF_TRACE_ITER ?
1580		       &bpf_seq_write_proto :
1581		       NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1582	default:
1583		return raw_tp_prog_func_proto(func_id, prog);
 
 
 
1584	}
1585}
1586
1587static bool raw_tp_prog_is_valid_access(int off, int size,
1588					enum bpf_access_type type,
1589					const struct bpf_prog *prog,
1590					struct bpf_insn_access_aux *info)
1591{
1592	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1593		return false;
1594	if (type != BPF_READ)
1595		return false;
1596	if (off % size != 0)
1597		return false;
1598	return true;
1599}
1600
1601static bool tracing_prog_is_valid_access(int off, int size,
1602					 enum bpf_access_type type,
1603					 const struct bpf_prog *prog,
1604					 struct bpf_insn_access_aux *info)
1605{
1606	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1607		return false;
1608	if (type != BPF_READ)
1609		return false;
1610	if (off % size != 0)
1611		return false;
1612	return btf_ctx_access(off, size, type, prog, info);
1613}
1614
1615int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1616				     const union bpf_attr *kattr,
1617				     union bpf_attr __user *uattr)
1618{
1619	return -ENOTSUPP;
1620}
1621
1622const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1623	.get_func_proto  = raw_tp_prog_func_proto,
1624	.is_valid_access = raw_tp_prog_is_valid_access,
1625};
1626
1627const struct bpf_prog_ops raw_tracepoint_prog_ops = {
 
 
 
1628};
1629
1630const struct bpf_verifier_ops tracing_verifier_ops = {
1631	.get_func_proto  = tracing_prog_func_proto,
1632	.is_valid_access = tracing_prog_is_valid_access,
1633};
1634
1635const struct bpf_prog_ops tracing_prog_ops = {
1636	.test_run = bpf_prog_test_run_tracing,
1637};
1638
1639static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1640						 enum bpf_access_type type,
1641						 const struct bpf_prog *prog,
1642						 struct bpf_insn_access_aux *info)
1643{
1644	if (off == 0) {
1645		if (size != sizeof(u64) || type != BPF_READ)
1646			return false;
1647		info->reg_type = PTR_TO_TP_BUFFER;
1648	}
1649	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1650}
1651
1652const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1653	.get_func_proto  = raw_tp_prog_func_proto,
1654	.is_valid_access = raw_tp_writable_prog_is_valid_access,
1655};
1656
1657const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1658};
1659
1660static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1661				    const struct bpf_prog *prog,
1662				    struct bpf_insn_access_aux *info)
1663{
1664	const int size_u64 = sizeof(u64);
1665
1666	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1667		return false;
1668	if (type != BPF_READ)
1669		return false;
1670	if (off % size != 0) {
1671		if (sizeof(unsigned long) != 4)
1672			return false;
1673		if (size != 8)
1674			return false;
1675		if (off % size != 4)
1676			return false;
1677	}
1678
1679	switch (off) {
1680	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1681		bpf_ctx_record_field_size(info, size_u64);
1682		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1683			return false;
1684		break;
1685	case bpf_ctx_range(struct bpf_perf_event_data, addr):
1686		bpf_ctx_record_field_size(info, size_u64);
1687		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1688			return false;
1689		break;
1690	default:
1691		if (size != sizeof(long))
1692			return false;
1693	}
1694
1695	return true;
1696}
1697
1698static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1699				      const struct bpf_insn *si,
1700				      struct bpf_insn *insn_buf,
1701				      struct bpf_prog *prog, u32 *target_size)
1702{
1703	struct bpf_insn *insn = insn_buf;
1704
1705	switch (si->off) {
1706	case offsetof(struct bpf_perf_event_data, sample_period):
1707		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1708						       data), si->dst_reg, si->src_reg,
1709				      offsetof(struct bpf_perf_event_data_kern, data));
1710		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1711				      bpf_target_off(struct perf_sample_data, period, 8,
1712						     target_size));
1713		break;
1714	case offsetof(struct bpf_perf_event_data, addr):
1715		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1716						       data), si->dst_reg, si->src_reg,
1717				      offsetof(struct bpf_perf_event_data_kern, data));
1718		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1719				      bpf_target_off(struct perf_sample_data, addr, 8,
1720						     target_size));
1721		break;
1722	default:
1723		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1724						       regs), si->dst_reg, si->src_reg,
1725				      offsetof(struct bpf_perf_event_data_kern, regs));
1726		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1727				      si->off);
1728		break;
1729	}
1730
1731	return insn - insn_buf;
1732}
1733
1734const struct bpf_verifier_ops perf_event_verifier_ops = {
1735	.get_func_proto		= pe_prog_func_proto,
1736	.is_valid_access	= pe_prog_is_valid_access,
1737	.convert_ctx_access	= pe_prog_convert_ctx_access,
1738};
1739
1740const struct bpf_prog_ops perf_event_prog_ops = {
1741};
1742
1743static DEFINE_MUTEX(bpf_event_mutex);
1744
1745#define BPF_TRACE_MAX_PROGS 64
1746
1747int perf_event_attach_bpf_prog(struct perf_event *event,
1748			       struct bpf_prog *prog)
 
1749{
1750	struct bpf_prog_array *old_array;
1751	struct bpf_prog_array *new_array;
1752	int ret = -EEXIST;
1753
1754	/*
1755	 * Kprobe override only works if they are on the function entry,
1756	 * and only if they are on the opt-in list.
1757	 */
1758	if (prog->kprobe_override &&
1759	    (!trace_kprobe_on_func_entry(event->tp_event) ||
1760	     !trace_kprobe_error_injectable(event->tp_event)))
1761		return -EINVAL;
1762
1763	mutex_lock(&bpf_event_mutex);
1764
1765	if (event->prog)
1766		goto unlock;
1767
1768	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1769	if (old_array &&
1770	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1771		ret = -E2BIG;
1772		goto unlock;
1773	}
1774
1775	ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1776	if (ret < 0)
1777		goto unlock;
1778
1779	/* set the new array to event->tp_event and set event->prog */
1780	event->prog = prog;
 
1781	rcu_assign_pointer(event->tp_event->prog_array, new_array);
1782	bpf_prog_array_free(old_array);
1783
1784unlock:
1785	mutex_unlock(&bpf_event_mutex);
1786	return ret;
1787}
1788
1789void perf_event_detach_bpf_prog(struct perf_event *event)
1790{
1791	struct bpf_prog_array *old_array;
1792	struct bpf_prog_array *new_array;
1793	int ret;
1794
1795	mutex_lock(&bpf_event_mutex);
1796
1797	if (!event->prog)
1798		goto unlock;
1799
1800	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1801	ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1802	if (ret == -ENOENT)
1803		goto unlock;
 
1804	if (ret < 0) {
1805		bpf_prog_array_delete_safe(old_array, event->prog);
1806	} else {
1807		rcu_assign_pointer(event->tp_event->prog_array, new_array);
1808		bpf_prog_array_free(old_array);
1809	}
1810
 
 
 
 
 
 
 
 
1811	bpf_prog_put(event->prog);
1812	event->prog = NULL;
1813
1814unlock:
1815	mutex_unlock(&bpf_event_mutex);
1816}
1817
1818int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1819{
1820	struct perf_event_query_bpf __user *uquery = info;
1821	struct perf_event_query_bpf query = {};
1822	struct bpf_prog_array *progs;
1823	u32 *ids, prog_cnt, ids_len;
1824	int ret;
1825
1826	if (!perfmon_capable())
1827		return -EPERM;
1828	if (event->attr.type != PERF_TYPE_TRACEPOINT)
1829		return -EINVAL;
1830	if (copy_from_user(&query, uquery, sizeof(query)))
1831		return -EFAULT;
1832
1833	ids_len = query.ids_len;
1834	if (ids_len > BPF_TRACE_MAX_PROGS)
1835		return -E2BIG;
1836	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1837	if (!ids)
1838		return -ENOMEM;
1839	/*
1840	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1841	 * is required when user only wants to check for uquery->prog_cnt.
1842	 * There is no need to check for it since the case is handled
1843	 * gracefully in bpf_prog_array_copy_info.
1844	 */
1845
1846	mutex_lock(&bpf_event_mutex);
1847	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1848	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1849	mutex_unlock(&bpf_event_mutex);
1850
1851	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1852	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1853		ret = -EFAULT;
1854
1855	kfree(ids);
1856	return ret;
1857}
1858
1859extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1860extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1861
1862struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1863{
1864	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1865
1866	for (; btp < __stop__bpf_raw_tp; btp++) {
1867		if (!strcmp(btp->tp->name, name))
1868			return btp;
1869	}
1870
1871	return bpf_get_raw_tracepoint_module(name);
1872}
1873
1874void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1875{
1876	struct module *mod = __module_address((unsigned long)btp);
1877
1878	if (mod)
1879		module_put(mod);
 
 
1880}
1881
1882static __always_inline
1883void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1884{
 
 
 
 
1885	cant_sleep();
 
 
 
 
 
 
 
 
1886	rcu_read_lock();
1887	(void) BPF_PROG_RUN(prog, args);
1888	rcu_read_unlock();
 
 
 
 
1889}
1890
1891#define UNPACK(...)			__VA_ARGS__
1892#define REPEAT_1(FN, DL, X, ...)	FN(X)
1893#define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1894#define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1895#define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1896#define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1897#define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1898#define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1899#define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1900#define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1901#define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1902#define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1903#define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1904#define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
1905
1906#define SARG(X)		u64 arg##X
1907#define COPY(X)		args[X] = arg##X
1908
1909#define __DL_COM	(,)
1910#define __DL_SEM	(;)
1911
1912#define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1913
1914#define BPF_TRACE_DEFN_x(x)						\
1915	void bpf_trace_run##x(struct bpf_prog *prog,			\
1916			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
1917	{								\
1918		u64 args[x];						\
1919		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
1920		__bpf_trace_run(prog, args);				\
1921	}								\
1922	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1923BPF_TRACE_DEFN_x(1);
1924BPF_TRACE_DEFN_x(2);
1925BPF_TRACE_DEFN_x(3);
1926BPF_TRACE_DEFN_x(4);
1927BPF_TRACE_DEFN_x(5);
1928BPF_TRACE_DEFN_x(6);
1929BPF_TRACE_DEFN_x(7);
1930BPF_TRACE_DEFN_x(8);
1931BPF_TRACE_DEFN_x(9);
1932BPF_TRACE_DEFN_x(10);
1933BPF_TRACE_DEFN_x(11);
1934BPF_TRACE_DEFN_x(12);
1935
1936static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1937{
1938	struct tracepoint *tp = btp->tp;
 
1939
1940	/*
1941	 * check that program doesn't access arguments beyond what's
1942	 * available in this tracepoint
1943	 */
1944	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1945		return -EINVAL;
1946
1947	if (prog->aux->max_tp_access > btp->writable_size)
1948		return -EINVAL;
1949
1950	return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1951}
1952
1953int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1954{
1955	return __bpf_probe_register(btp, prog);
1956}
1957
1958int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1959{
1960	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1961}
1962
1963int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1964			    u32 *fd_type, const char **buf,
1965			    u64 *probe_offset, u64 *probe_addr)
 
1966{
1967	bool is_tracepoint, is_syscall_tp;
1968	struct bpf_prog *prog;
1969	int flags, err = 0;
1970
1971	prog = event->prog;
1972	if (!prog)
1973		return -ENOENT;
1974
1975	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1976	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1977		return -EOPNOTSUPP;
1978
1979	*prog_id = prog->aux->id;
1980	flags = event->tp_event->flags;
1981	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1982	is_syscall_tp = is_syscall_trace_event(event->tp_event);
1983
1984	if (is_tracepoint || is_syscall_tp) {
1985		*buf = is_tracepoint ? event->tp_event->tp->name
1986				     : event->tp_event->name;
1987		*fd_type = BPF_FD_TYPE_TRACEPOINT;
1988		*probe_offset = 0x0;
1989		*probe_addr = 0x0;
 
 
 
 
1990	} else {
1991		/* kprobe/uprobe */
1992		err = -EOPNOTSUPP;
1993#ifdef CONFIG_KPROBE_EVENTS
1994		if (flags & TRACE_EVENT_FL_KPROBE)
1995			err = bpf_get_kprobe_info(event, fd_type, buf,
1996						  probe_offset, probe_addr,
1997						  event->attr.type == PERF_TYPE_TRACEPOINT);
1998#endif
1999#ifdef CONFIG_UPROBE_EVENTS
2000		if (flags & TRACE_EVENT_FL_UPROBE)
2001			err = bpf_get_uprobe_info(event, fd_type, buf,
2002						  probe_offset,
2003						  event->attr.type == PERF_TYPE_TRACEPOINT);
2004#endif
2005	}
2006
2007	return err;
2008}
2009
2010static int __init send_signal_irq_work_init(void)
2011{
2012	int cpu;
2013	struct send_signal_irq_work *work;
2014
2015	for_each_possible_cpu(cpu) {
2016		work = per_cpu_ptr(&send_signal_work, cpu);
2017		init_irq_work(&work->irq_work, do_bpf_send_signal);
2018	}
2019	return 0;
2020}
2021
2022subsys_initcall(send_signal_irq_work_init);
2023
2024#ifdef CONFIG_MODULES
2025static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2026			    void *module)
2027{
2028	struct bpf_trace_module *btm, *tmp;
2029	struct module *mod = module;
 
2030
2031	if (mod->num_bpf_raw_events == 0 ||
2032	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2033		return 0;
2034
2035	mutex_lock(&bpf_module_mutex);
2036
2037	switch (op) {
2038	case MODULE_STATE_COMING:
2039		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2040		if (btm) {
2041			btm->module = module;
2042			list_add(&btm->list, &bpf_trace_modules);
 
 
2043		}
2044		break;
2045	case MODULE_STATE_GOING:
2046		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2047			if (btm->module == module) {
2048				list_del(&btm->list);
2049				kfree(btm);
2050				break;
2051			}
2052		}
2053		break;
2054	}
2055
2056	mutex_unlock(&bpf_module_mutex);
2057
2058	return 0;
 
2059}
2060
2061static struct notifier_block bpf_module_nb = {
2062	.notifier_call = bpf_event_notify,
2063};
2064
2065static int __init bpf_event_init(void)
2066{
2067	register_module_notifier(&bpf_module_nb);
2068	return 0;
2069}
2070
2071fs_initcall(bpf_event_init);
2072#endif /* CONFIG_MODULES */